What could be a problem when running a Python script? - python

In the python I am starting and I have a problem with the use of example scripts, however, there is a problem with the operation even using arguments when calling the script.
I will not say that this script solves a lot of my problems without servicing data writing.
And I will not say that it would be useful to run it :)
I've been giving arguments and nothing in any way
import httplib2
import os
import sys
import csv
from apiclient.discovery import build
from apiclient.errors import HttpError
from oauth2client.client import flow_from_clientsecrets
from oauth2client.file import Storage
import argparse
from oauth2client.tools import argparser, run_flow
library/python/guide/aaa_client_secrets
CLIENT_SECRETS_FILE = "client_secret.json"
YOUTUBE_SCOPES = ["https://www.googleapis.com/auth/youtube.readonly",
"https://www.googleapis.com/auth/yt-analytics.readonly"]
YOUTUBE_API_SERVICE_NAME = "youtube"
YOUTUBE_API_VERSION = "v3"
YOUTUBE_ANALYTICS_API_SERVICE_NAME = "youtubeAnalytics"
YOUTUBE_ANALYTICS_API_VERSION = "v1"
MISSING_CLIENT_SECRETS_MESSAGE = """
WARNING: Please configure OAuth 2.0
To make this sample run you will need to populate the client_secrets.json file
found at:
%s
with information from the Developers Console
https://console.developers.google.com/
For more information about the client_secrets.json file format, please visit:
https://developers.google.com/api-client-library/python/guide/aaa_client_secrets
""" % os.path.abspath(os.path.join(os.path.dirname(__file__),
CLIENT_SECRETS_FILE))
def get_authenticated_services(args):
flow = flow_from_clientsecrets(CLIENT_SECRETS_FILE,
scope=" ".join(YOUTUBE_SCOPES),
message=MISSING_CLIENT_SECRETS_MESSAGE)
storage = Storage("%s-oauth2.json" % sys.argv[0])
credentials = storage.get()
if credentials is None or credentials.invalid:
credentials = run_flow(flow, storage, args)
http = credentials.authorize(httplib2.Http())
youtube_analytics = build(YOUTUBE_ANALYTICS_API_SERVICE_NAME,
YOUTUBE_ANALYTICS_API_VERSION, http=http)
return youtube_analytics
def run_analytics_report(youtube_analytics, options, count):
# Call the Analytics API to retrieve a report. For a list of available
# reports, see:
# https://developers.google.com/youtube/analytics/v1/channel_reports
analytics_query_response = youtube_analytics.reports().query(
ids="channel==%s" % options.channel_id,
metrics=options.metrics,
dimensions=options.dimensions,
filters=options.filters,
start_date=options.start_date,
end_date=options.end_date,
#max_results=options.max_results,
sort=options.sort
).execute()
print "Analytics Data for Channel %s" % options.channel_id
if count == 0:
with open('results.csv', 'w') as csv_out:
csvWriter=csv.writer(csv_out, delimiter=',', lineterminator = '\n')
headers = [ch["name"] for ch in analytics_query_response.get("columnHeaders", [])]
headers.append("country")
csvWriter.writerow(headers)
else:
with open('results.csv', 'a') as csv_out:
csvWriter=csv.writer(csv_out, delimiter=',', lineterminator = '\n')
for row in analytics_query_response.get("rows", []):
values = []
for value in row:
values.append(str(value))
values.append((options.filters[9]+""+options.filters[10]))
csvWriter.writerow(values)
print "Results exported to csv"
if __name__ == "__main__":
count = 0
now = datetime.now()
one_day_ago = (now - timedelta(days=1)).strftime("%Y-%m-%d")
one_week_ago = (now - timedelta(days=7)).strftime("%Y-%m-%d")
f = open('countries.csv', 'rb')
reader = csv.reader(f)
for row in reader:
argparser = argparse.ArgumentParser()
argparser.add_argument("--channel-id", help="Channel ID",
default="UCJ5v_MCY6GNUBTO8-D3XoAg")
argparser.add_argument("--metrics", help="Report metrics",
default="views,estimatedMinutesWatched")
argparser.add_argument("--dimensions", help="Report dimensions",
default="deviceType")
argparser.add_argument("--filters", help="Report filters",
default="country==" + ''.join(row))
argparser.add_argument("--start-date", default=one_week_ago,
help="Start date, in YYYY-MM-DD format")
argparser.add_argument("--end-date", default=one_day_ago,
help="End date, in YYYY-MM-DD format")
#argparser.add_argument("--max-results", help="Max results", default=10)
argparser.add_argument("--sort", help="Sort order", default="-views")
args = argparser.parse_args()
youtube_analytics = get_authenticated_services(args)
try:
run_analytics_report(youtube_analytics, args, count)
count = count + 1
except HttpError, e:
print "An HTTP error %d occurred:\n%s" % (e.resp.status, e.content)
I do not know, my question is howling.
When running the script even giving arguments shows the following message?
File "yt-mario.py", line 91
print "Analytics Data for Channel %s" % options.channel_id
^
SyntaxError: Missing parentheses in call to 'print'. Did you mean print("Analytics Data for Channel %s" % options.channel_id)?

SyntaxError: Missing parentheses in call to 'print'. Did you mean print("Analytics Data for Channel %s" % options.channel_id)?
This code is probably Python2.7, where you don't need to put parenthesis around the print statement.
Try running this using Python 2.7.

Related

how make use of multiprocessing in python to make api calls simultaneously

// function to verify user login
def get_authenticated_service(file,client_secret_file):
credentials = None
if os.path.exists(file):
print('Loading Credentials From File...')
with open(file, 'rb') as token:
credentials = pickle.load(token)
return build(API_SERVICE_NAME, API_VERSION, credentials = credentials)
if not credentials or not credentials.valid:
if credentials and credentials.expired and credentials.refresh_token:
print('Refreshing Access Token...')
credentials.refresh(Request())
else:
print('Fetching New Tokens...')
flow = InstalledAppFlow.from_client_secrets_file(
client_secret_file,
scopes=[
'https://www.googleapis.com/auth/youtube.readonly',
'https://www.googleapis.com/auth/youtube.upload',
'https://www.googleapis.com/auth/youtube.force-ssl'
]
)
flow.run_local_server(port=8000, prompt='consent',
authorization_prompt_message='')
credentials = flow.credentials
# Save the credentials for the next run
with open(file, 'wb') as f:
print('Saving Credentials for Future Use...')
pickle.dump(credentials, f)
return build(API_SERVICE_NAME, API_VERSION, credentials = credentials)
//function for verify user and make https body request
def initialize_upload( options, file, tokenFile, client_secret_file):
youtube = get_authenticated_service(tokenFile,client_secret_file)
tags = None
if options.keywords:
tags = options.keywords.split(',')
body=dict(
snippet=dict(
title=options.title,
description=options.description,
tags=tags,
categoryId=options.category
),
status=dict(
privacyStatus=options.privacyStatus
)
)
# Call the API's videos.insert method to create and upload the video.
print("running")
insert_request = youtube.videos().insert(
part=','.join(body.keys()),
body=body,
media_body=MediaFileUpload(file, chunksize=-1, resumable=True)
)
print("multiprocessing start")
resumable_upload(insert_request)
print("multiprocessing ends")
//function for uploading file
def resumable_upload(request):
response = None
error = None
retry = 0
print("running by " + multiprocessing.current_process().name)
while response is None:
try:
print('Uploading file...')
status, response = request.next_chunk()
print('Uploading file done...')
if response is not None:
if 'id' in response:
print('Video id "%s" was successfully uploaded.' % response['id'])
else:
exit('The upload failed with an unexpected response: %s' % response)
except HttpError as e:
if e.resp.status in RETRIABLE_STATUS_CODES:
error = 'A retriable HTTP error %d occurred:\n%s' % (e.resp.status,e.content)
else:
raise
except RETRIABLE_EXCEPTIONS as e:
error = 'A retriable error occurred: %s' % e
if error is not None:
print(error)
retry += 1
if retry > MAX_RETRIES:
exit('No longer attempting to retry.')
max_sleep = 2 ** retry
# sleep_seconds = random.random() * max_sleep
sleep_seconds = 0.5
print('Sleeping %f seconds and then retrying...' % sleep_seconds)
time.sleep(sleep_seconds)
// then title, description etc passing as arguments and making multiprocessing call
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--title', help='Video title', default='Android react native code sample(ANdroid emulator)')
parser.add_argument('--description', help='Android react native code sample description',
default='Android react native code sample description(ANdroid emulator)')
parser.add_argument('--category', default='22',
help='Numeric video category. ' +
'See https://developers.google.com/youtube/v3/docs/videoCategories/list')
parser.add_argument('--keywords', help='react native, react, android, emulator',
default='react native, react')
parser.add_argument('--privacyStatus', choices=VALID_PRIVACY_STATUSES,
default='private', help='Video privacy status.')
args = parser.parse_args()
try:
l = []
start = time.perf_counter()
t1 = multiprocessing.Process(target=initialize_upload, args=(args, "android1.mp4", 'token1.pickle','client_secret.json'))
t2 = multiprocessing.Process(target=initialize_upload, args=(args, "android1.mp4", 'token2.pickle','client_secret.json'))
t1.start()
t2.start()
t1.join()
t2.join()
finish = time.perf_counter()
print(f'Finished in {round(finish-start, 2)} seconds..')
except HttpError as e:
print('An HTTP error %d occurred:\n%s' % (e.resp.status, e.content))
if i try to make this call it takes 12.09 secs and without multiprocessing (i.e calling simultaneously one after another it takes 12.39 secs). after debugging little bit i get to know that till print("uploading file...) (in resumable_upload()) its working properly but after that its like working one at a time. Can anyone suggest how i can make him work with multiprocessing ? below is the output of this file

Azure Document Translator - First Page and Last Page is Blank after Translation

May I seek your help, I am new to Azure Document Translations,
I am using Python as the main language and is using the Azure Document Translation API for Python.
Upon testing about 20 pdf files, it is able to translate the documents however, first page and last page seems to be blank on most times.
If I highlight the blank page it using my mouse, it seems to highlight something on the page but If it try to copy paste it, nothing is pasted.
Here is below code for reference,
I wanted to know if anyone else encountered this issue, it might be particular to files that I am translating but I would be helpful to get more information from others as well.
import os
import re
import sys
import traceback
import logging
import logging.handlers
import argparse
import datetime
import configparser
from azure.core.credentials import AzureKeyCredential
from azure.ai.translation.document import DocumentTranslationClient
APP_LOGGER = None
def setup_logger(log_filename):
app_logger = logging.getLogger('AppLogger')
app_logger.setLevel(logging.INFO)
formatter = logging.Formatter(fmt='%(asctime)s %(levelname)-8s %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
handler = logging.handlers.TimedRotatingFileHandler(log_filename, when='H', interval=12, backupCount=20)
handler.setFormatter(formatter)
app_logger.addHandler(handler)
has_yesterday_file = False
log_file = os.path.basename(log_filename)
yesterday_date = (datetime.datetime.today() + datetime.timedelta(days=-1)).strftime(".%Y-%m-%d")
yesterday_file_pattern = log_file + yesterday_date
today_date = datetime.datetime.today().strftime(".%Y-%m-%d")
today_file_pattern = log_file + today_date
log_path = os.path.dirname(log_filename)
files = os.listdir(log_path)
for f in files:
if re.search(yesterday_file_pattern, f):
has_yesterday_file = True
break
if re.search(today_file_pattern, f):
has_yesterday_file = True
break
if not has_yesterday_file:
handler.doRollover()
return app_logger
def main():
global APP_LOGGER
error_code = 0
try:
parser = argparse.ArgumentParser(description='Translate documents in AWS container.')
parser.add_argument('config', help='config file path to read', nargs='?', default='config.ini')
parser.add_argument('file', help='name of file to translate', nargs='?')
parser.add_argument('-f', '--folder', action='store_true', default=False)
args = parser.parse_args()
config = configparser.RawConfigParser()
config.read(args.config)
key = config['MAIN']['key']
endpoint = config['MAIN']['endpoint']
sourceUrl = config['MAIN']['source_url']
targetUrl = config['MAIN']['target_url']
language = config['MAIN']['language']
client = DocumentTranslationClient(endpoint, AzureKeyCredential(key))
if args.folder:
poller = client.begin_translation(sourceUrl, targetUrl, language)
elif args.file:
poller = client.begin_translation(sourceUrl, targetUrl, language, prefix=args.file)
else:
error_code = 1
return error_code
result = poller.result()
for document in result:
if document.status == "Succeeded":
else:
error_code = 1
except:
error_code = 2
return error_code
def test():
key = ""
endpoint = ""
sourceUrl = ""
targetUrl = ""
client = DocumentTranslationClient(endpoint, AzureKeyCredential(key))
poller = client.begin_translation(sourceUrl, targetUrl, "en", prefix="18_DE_2022-04-12")
result = poller.result()
for document in result:
if document.status == "Succeeded":
else:
print("Error Code: {}, Message: {}\n".format(document.error.code, document.error.message))
if __name__ == '__main__':
error_code = main()
print(error_code)
if error_code > 0:
sys.exit(error_code)
Thank You

Raspberry Pi python youtube api json error

I'm using Raspberry Pi 3 & Python 2.7.9.
I'm trying to using youtube api. here's the code I use:
#!/usr/bin/python
from apiclient.discovery import build
from apiclient.errors import HttpError
from oauth2client.tools import argparser
import json
import urllib
# Set DEVELOPER_KEY to the API key value from the APIs & auth > Registered apps
# tab of
# https://cloud.google.com/console
# Please ensure that you have enabled the YouTube Data API for your project.
DEVELOPER_KEY = "REPLACE_ME" #I did replace api key
YOUTUBE_API_SERVICE_NAME = "youtube"
YOUTUBE_API_VERSION = "v3"
FREEBASE_SEARCH_URL = "https://www.googleapis.com/freebase/v1/search?%s"
def get_topic_id(options):
# Retrieve a list of Freebase topics associated with the provided query term.
freebase_params = dict(query=options.query, key=DEVELOPER_KEY)
freebase_url = FREEBASE_SEARCH_URL % urllib.urlencode(freebase_params)
freebase_response = json.loads(urllib.urlopen(freebase_url).read())
if len(freebase_response["result"]) == 0:
exit("No matching terms were found in Freebase.")
# Display the list of matching Freebase topics.
mids = []
index = 1
print "The following topics were found:"
for result in freebase_response["result"]:
mids.append(result["mid"])
print " %2d. %s (%s)" % (index, result.get("name", "Unknown"),
result.get("notable", {}).get("name", "Unknown"))
index += 1
# Display a prompt for the user to select a topic and return the topic ID
# of the selected topic.
mid = None
while mid is None:
index = raw_input("Enter a topic number to find related YouTube %ss: " %
options.type)
try:
mid = mids[int(index) - 1]
except ValueError:
pass
return mid
def youtube_search(mid, options):
youtube = build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION,
developerKey=DEVELOPER_KEY)
# Call the search.list method to retrieve results associated with the
# specified Freebase topic.
search_response = youtube.search().list(
topicId=mid,
type=options.type,
part="id,snippet",
maxResults=options.max_results
).execute()
# Print the title and ID of each matching resource.
for search_result in search_response.get("items", []):
if search_result["id"]["kind"] == "youtube#video":
print "%s (%s)" % (search_result["snippet"]["title"],
search_result["id"]["videoId"])
elif search_result["id"]["kind"] == "youtube#channel":
print "%s (%s)" % (search_result["snippet"]["title"],
search_result["id"]["channelId"])
elif search_result["id"]["kind"] == "youtube#playlist":
print "%s (%s)" % (search_result["snippet"]["title"],
search_result["id"]["playlistId"])
if __name__ == "__main__":
argparser.add_argument("--query", help="Freebase search term", default="Google")
argparser.add_argument("--max-results", help="Max YouTube results",
default=25)
argparser.add_argument("--type",
help="YouTube result type: video, playlist, or channel", default="channel")
args = argparser.parse_args()
mid = get_topic_id(args)
try:
youtube_search(mid, args)
except HttpError, e:
print "An HTTP error %d occurred:\n%s" % (e.resp.status, e.content)
And I got error:
ValueError: No JSON object could be decoded
I really don't know how to handle this... anyone how to fix this?
I think it's related with OAuth 2.0 json file, but not sure... and I don't know how to use it

google calendar acces from Python

I newbe on this communauty
till nov 17, I used google calendar V2 to turn on or off my swiming pool pump
I'm trying to upgrade my python script to Google API V3 but I'm not sure to understand everything:
I'm using (no, trying to use) following code get on internet :
# Inspired from 'Raspberry Pi as a Google Calender Alarm Clock'
# http://www.esologic.com/?p=634
from datetime import datetime
import logging, os, platform, re, time
from apiclient.discovery import build
import httplib2
from oauth2client.client import flow_from_clientsecrets
from oauth2client.file import Storage
from oauth2client.tools import run
from config import *
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
class Alarm():
system = platform.system().lower()
flow = flow_from_clientsecrets(CLIENT_SECRET_FILE,
scope='https://www.googleapis.com/auth/calendar',
redirect_uri='http://localhost:8080/')
storage = Storage('calendar.dat')
credentials = storage.get()
if credentials is None or credentials.invalid == True:
credentials = run(flow, storage)
# Google Calendar service connection
http = httplib2.Http()
http = credentials.authorize(http)
service = build(serviceName='calendar', version='v3', http=http, developerKey=API_KEY)
def check_credentials(self):
if self.credentials is None or self.credentials.invalid == True:
credentials = run(self.flow, self.storage)
def calendar_event_query(self):
self.check_credentials()
today = datetime.today()
events = self.service.events().list(singleEvents=True, calendarId=CALENDAR_ID).execute()
for i, event in enumerate(events['items']):
name = event['summary'].lower()
start = event['start']['dateTime'][:-9]
description = event.get('description', '')
repeat = True if description.lower() == 'repeat' else False
now = today.strftime('%Y-%m-%dT%H:%M')
if start >= now:
logger.debug('Event #%s, Name: %s, Start: %s', i, name, start)
if start == now:
if name.startswith('say'):
name = re.sub(r'[^a-zA-Z0-9\s\']', '', name)
command = '{0} "{1}"'.format('say' if system == 'darwin' else 'espeak - ven+m2', name[4:])
logger.info('Event starting. Announcing \'%s\'...', name[4:])
else:
mp3_files = os.listdir(MP3_FOLDER)
mp3_name = name.replace(' ', '_') + '.mp3'
mp3_name = mp3_name if mp3_name in mp3_files else 'default.mp3'
command = 'mpg123 \'{}/{}\''.format(MP3_FOLDER, mp3_name)
logger.info('Event %s starting. Playing mp3 file %s...', name, mp3_name)
os.system(command)
if repeat == False:
time.sleep(60)
def poll(self):
logger.info('Polling calendar for events...')
self.calendar_event_query()
while True:
a = Alarm()
a.poll()
time.sleep(FREQUENCY_CHECK)
Of course, I created client ID and key ID on google consele
But, when I run my script, It's not working and I get a webpage on my raspberry with an error 400 : rederict_URI mismatch
An idea?

import python with __main__ method

I have a python script that have __main__ statement and took all values parametric.
I want to import and use it in my own script.
Actually I can import but don't know how to use it.
As you see below, __main__ is a bit complicated and rewriting it will take time because I even don't know what does most of code mean.
Want to know is there any way to import and use the code as a function?
import os
import sys
import time
import base64
from urllib2 import urlopen
from urllib2 import Request
from urllib2 import HTTPError
from urllib import urlencode
from urllib import quote
from exceptions import Exception
from email.mime.multipart import MIMEMultipart
from email.mime.base import MIMEBase
from email.mime.application import MIMEApplication
from email.encoders import encode_noop
from api_util import json2python, python2json
class MalformedResponse(Exception):
pass
class RequestError(Exception):
pass
class Client(object):
default_url = 'http://nova.astrometry.net/api/'
def __init__(self,
apiurl = default_url):
self.session = None
self.apiurl = apiurl
def get_url(self, service):
return self.apiurl + service
def send_request(self, service, args={}, file_args=None):
'''
service: string
args: dict
'''
if self.session is not None:
args.update({ 'session' : self.session })
print 'Python:', args
json = python2json(args)
print 'Sending json:', json
url = self.get_url(service)
print 'Sending to URL:', url
# If we're sending a file, format a multipart/form-data
if file_args is not None:
m1 = MIMEBase('text', 'plain')
m1.add_header('Content-disposition', 'form-data; name="request-json"')
m1.set_payload(json)
m2 = MIMEApplication(file_args[1],'octet-stream',encode_noop)
m2.add_header('Content-disposition',
'form-data; name="file"; filename="%s"' % file_args[0])
#msg.add_header('Content-Disposition', 'attachment',
# filename='bud.gif')
#msg.add_header('Content-Disposition', 'attachment',
# filename=('iso-8859-1', '', 'FuSballer.ppt'))
mp = MIMEMultipart('form-data', None, [m1, m2])
# Makie a custom generator to format it the way we need.
from cStringIO import StringIO
from email.generator import Generator
class MyGenerator(Generator):
def __init__(self, fp, root=True):
Generator.__init__(self, fp, mangle_from_=False,
maxheaderlen=0)
self.root = root
def _write_headers(self, msg):
# We don't want to write the top-level headers;
# they go into Request(headers) instead.
if self.root:
return
# We need to use \r\n line-terminator, but Generator
# doesn't provide the flexibility to override, so we
# have to copy-n-paste-n-modify.
for h, v in msg.items():
print >> self._fp, ('%s: %s\r\n' % (h,v)),
# A blank line always separates headers from body
print >> self._fp, '\r\n',
# The _write_multipart method calls "clone" for the
# subparts. We hijack that, setting root=False
def clone(self, fp):
return MyGenerator(fp, root=False)
fp = StringIO()
g = MyGenerator(fp)
g.flatten(mp)
data = fp.getvalue()
headers = {'Content-type': mp.get('Content-type')}
if False:
print 'Sending headers:'
print ' ', headers
print 'Sending data:'
print data[:1024].replace('\n', '\\n\n').replace('\r', '\\r')
if len(data) > 1024:
print '...'
print data[-256:].replace('\n', '\\n\n').replace('\r', '\\r')
print
else:
# Else send x-www-form-encoded
data = {'request-json': json}
print 'Sending form data:', data
data = urlencode(data)
print 'Sending data:', data
headers = {}
request = Request(url=url, headers=headers, data=data)
try:
f = urlopen(request)
txt = f.read()
print 'Got json:', txt
result = json2python(txt)
print 'Got result:', result
stat = result.get('status')
print 'Got status:', stat
if stat == 'error':
errstr = result.get('errormessage', '(none)')
raise RequestError('server error message: ' + errstr)
return result
except HTTPError, e:
print 'HTTPError', e
txt = e.read()
open('err.html', 'wb').write(txt)
print 'Wrote error text to err.html'
def login(self, apikey):
args = { 'apikey' : apikey }
result = self.send_request('login', args)
sess = result.get('session')
print 'Got session:', sess
if not sess:
raise RequestError('no session in result')
self.session = sess
def _get_upload_args(self, **kwargs):
args = {}
for key,default,typ in [('allow_commercial_use', 'd', str),
('allow_modifications', 'd', str),
('publicly_visible', 'y', str),
('scale_units', None, str),
('scale_type', None, str),
('scale_lower', None, float),
('scale_upper', None, float),
('scale_est', None, float),
('scale_err', None, float),
('center_ra', None, float),
('center_dec', None, float),
('radius', None, float),
('downsample_factor', None, int),
('tweak_order', None, int),
('crpix_center', None, bool),
# image_width, image_height
]:
if key in kwargs:
val = kwargs.pop(key)
val = typ(val)
args.update({key: val})
elif default is not None:
args.update({key: default})
print 'Upload args:', args
return args
def url_upload(self, url, **kwargs):
args = dict(url=url)
args.update(self._get_upload_args(**kwargs))
result = self.send_request('url_upload', args)
return result
def upload(self, fn, **kwargs):
args = self._get_upload_args(**kwargs)
try:
f = open(fn, 'rb')
result = self.send_request('upload', args, (fn, f.read()))
return result
except IOError:
print 'File %s does not exist' % fn
raise
def submission_images(self, subid):
result = self.send_request('submission_images', {'subid':subid})
return result.get('image_ids')
def overlay_plot(self, service, outfn, wcsfn, wcsext=0):
from astrometry.util import util as anutil
wcs = anutil.Tan(wcsfn, wcsext)
params = dict(crval1 = wcs.crval[0], crval2 = wcs.crval[1],
crpix1 = wcs.crpix[0], crpix2 = wcs.crpix[1],
cd11 = wcs.cd[0], cd12 = wcs.cd[1],
cd21 = wcs.cd[2], cd22 = wcs.cd[3],
imagew = wcs.imagew, imageh = wcs.imageh)
result = self.send_request(service, {'wcs':params})
print 'Result status:', result['status']
plotdata = result['plot']
plotdata = base64.b64decode(plotdata)
open(outfn, 'wb').write(plotdata)
print 'Wrote', outfn
def sdss_plot(self, outfn, wcsfn, wcsext=0):
return self.overlay_plot('sdss_image_for_wcs', outfn,
wcsfn, wcsext)
def galex_plot(self, outfn, wcsfn, wcsext=0):
return self.overlay_plot('galex_image_for_wcs', outfn,
wcsfn, wcsext)
def myjobs(self):
result = self.send_request('myjobs/')
return result['jobs']
def job_status(self, job_id, justdict=False):
result = self.send_request('jobs/%s' % job_id)
if justdict:
return result
stat = result.get('status')
if stat == 'success':
result = self.send_request('jobs/%s/calibration' % job_id)
print 'Calibration:', result
result = self.send_request('jobs/%s/tags' % job_id)
print 'Tags:', result
result = self.send_request('jobs/%s/machine_tags' % job_id)
print 'Machine Tags:', result
result = self.send_request('jobs/%s/objects_in_field' % job_id)
print 'Objects in field:', result
result = self.send_request('jobs/%s/annotations' % job_id)
print 'Annotations:', result
result = self.send_request('jobs/%s/info' % job_id)
print 'Calibration:', result
return stat
def sub_status(self, sub_id, justdict=False):
result = self.send_request('submissions/%s' % sub_id)
if justdict:
return result
return result.get('status')
def jobs_by_tag(self, tag, exact):
exact_option = 'exact=yes' if exact else ''
result = self.send_request(
'jobs_by_tag?query=%s&%s' % (quote(tag.strip()), exact_option),
{},
)
return result
if __name__ == '__main__':
import optparse
parser = optparse.OptionParser()
parser.add_option('--server', dest='server', default=Client.default_url,
help='Set server base URL (eg, %default)')
parser.add_option('--apikey', '-k', dest='apikey',
help='API key for Astrometry.net web service; if not given will check AN_API_KEY environment variable')
parser.add_option('--upload', '-u', dest='upload', help='Upload a file')
parser.add_option('--wait', '-w', dest='wait', action='store_true', help='After submitting, monitor job status')
parser.add_option('--wcs', dest='wcs', help='Download resulting wcs.fits file, saving to given filename; implies --wait if --urlupload or --upload')
parser.add_option('--kmz', dest='kmz', help='Download resulting kmz file, saving to given filename; implies --wait if --urlupload or --upload')
parser.add_option('--urlupload', '-U', dest='upload_url', help='Upload a file at specified url')
parser.add_option('--scale-units', dest='scale_units',
choices=('arcsecperpix', 'arcminwidth', 'degwidth', 'focalmm'), help='Units for scale estimate')
#parser.add_option('--scale-type', dest='scale_type',
# choices=('ul', 'ev'), help='Scale bounds: lower/upper or estimate/error')
parser.add_option('--scale-lower', dest='scale_lower', type=float, help='Scale lower-bound')
parser.add_option('--scale-upper', dest='scale_upper', type=float, help='Scale upper-bound')
parser.add_option('--scale-est', dest='scale_est', type=float, help='Scale estimate')
parser.add_option('--scale-err', dest='scale_err', type=float, help='Scale estimate error (in PERCENT), eg "10" if you estimate can be off by 10%')
parser.add_option('--ra', dest='center_ra', type=float, help='RA center')
parser.add_option('--dec', dest='center_dec', type=float, help='Dec center')
parser.add_option('--radius', dest='radius', type=float, help='Search radius around RA,Dec center')
parser.add_option('--downsample', dest='downsample_factor', type=int, help='Downsample image by this factor')
parser.add_option('--parity', dest='parity', choices=('0','1'), help='Parity (flip) of image')
parser.add_option('--tweak-order', dest='tweak_order', type=int, help='SIP distortion order (default: 2)')
parser.add_option('--crpix-center', dest='crpix_center', action='store_true', default=None, help='Set reference point to center of image?')
parser.add_option('--sdss', dest='sdss_wcs', nargs=2, help='Plot SDSS image for the given WCS file; write plot to given PNG filename')
parser.add_option('--galex', dest='galex_wcs', nargs=2, help='Plot GALEX image for the given WCS file; write plot to given PNG filename')
parser.add_option('--substatus', '-s', dest='sub_id', help='Get status of a submission')
parser.add_option('--jobstatus', '-j', dest='job_id', help='Get status of a job')
parser.add_option('--jobs', '-J', dest='myjobs', action='store_true', help='Get all my jobs')
parser.add_option('--jobsbyexacttag', '-T', dest='jobs_by_exact_tag', help='Get a list of jobs associated with a given tag--exact match')
parser.add_option('--jobsbytag', '-t', dest='jobs_by_tag', help='Get a list of jobs associated with a given tag')
parser.add_option( '--private', '-p',
dest='public',
action='store_const',
const='n',
default='y',
help='Hide this submission from other users')
parser.add_option('--allow_mod_sa','-m',
dest='allow_mod',
action='store_const',
const='sa',
default='d',
help='Select license to allow derivative works of submission, but only if shared under same conditions of original license')
parser.add_option('--no_mod','-M',
dest='allow_mod',
action='store_const',
const='n',
default='d',
help='Select license to disallow derivative works of submission')
parser.add_option('--no_commercial','-c',
dest='allow_commercial',
action='store_const',
const='n',
default='d',
help='Select license to disallow commercial use of submission')
opt,args = parser.parse_args()
if opt.apikey is None:
# try the environment
opt.apikey = os.environ.get('AN_API_KEY', None)
if opt.apikey is None:
parser.print_help()
print
print 'You must either specify --apikey or set AN_API_KEY'
sys.exit(-1)
args = {}
args['apiurl'] = opt.server
c = Client(**args)
c.login(opt.apikey)
if opt.upload or opt.upload_url:
if opt.wcs or opt.kmz:
opt.wait = True
kwargs = dict(
allow_commercial_use=opt.allow_commercial,
allow_modifications=opt.allow_mod,
publicly_visible=opt.public)
if opt.scale_lower and opt.scale_upper:
kwargs.update(scale_lower=opt.scale_lower,
scale_upper=opt.scale_upper,
scale_type='ul')
elif opt.scale_est and opt.scale_err:
kwargs.update(scale_est=opt.scale_est,
scale_err=opt.scale_err,
scale_type='ev')
elif opt.scale_lower or opt.scale_upper:
kwargs.update(scale_type='ul')
if opt.scale_lower:
kwargs.update(scale_lower=opt.scale_lower)
if opt.scale_upper:
kwargs.update(scale_upper=opt.scale_upper)
for key in ['scale_units', 'center_ra', 'center_dec', 'radius',
'downsample_factor', 'tweak_order', 'crpix_center',]:
if getattr(opt, key) is not None:
kwargs[key] = getattr(opt, key)
if opt.parity is not None:
kwargs.update(parity=int(opt.parity))
if opt.upload:
upres = c.upload(opt.upload, **kwargs)
if opt.upload_url:
upres = c.url_upload(opt.upload_url, **kwargs)
stat = upres['status']
if stat != 'success':
print 'Upload failed: status', stat
print upres
sys.exit(-1)
opt.sub_id = upres['subid']
if opt.wait:
if opt.job_id is None:
if opt.sub_id is None:
print "Can't --wait without a submission id or job id!"
sys.exit(-1)
while True:
stat = c.sub_status(opt.sub_id, justdict=True)
print 'Got status:', stat
jobs = stat.get('jobs', [])
if len(jobs):
for j in jobs:
if j is not None:
break
if j is not None:
print 'Selecting job id', j
opt.job_id = j
break
time.sleep(5)
success = False
while True:
stat = c.job_status(opt.job_id, justdict=True)
print 'Got job status:', stat
if stat.get('status','') in ['success']:
success = (stat['status'] == 'success')
break
time.sleep(5)
if success:
c.job_status(opt.job_id)
# result = c.send_request('jobs/%s/calibration' % opt.job_id)
# print 'Calibration:', result
# result = c.send_request('jobs/%s/tags' % opt.job_id)
# print 'Tags:', result
# result = c.send_request('jobs/%s/machine_tags' % opt.job_id)
# print 'Machine Tags:', result
# result = c.send_request('jobs/%s/objects_in_field' % opt.job_id)
# print 'Objects in field:', result
#result = c.send_request('jobs/%s/annotations' % opt.job_id)
#print 'Annotations:', result
retrieveurls = []
if opt.wcs:
# We don't need the API for this, just construct URL
url = opt.server.replace('/api/', '/wcs_file/%i' % opt.job_id)
retrieveurls.append((url, opt.wcs))
if opt.kmz:
url = opt.server.replace('/api/', '/kml_file/%i/' % opt.job_id)
retrieveurls.append((url, opt.kmz))
for url,fn in retrieveurls:
print 'Retrieving file from', url, 'to', fn
f = urlopen(url)
txt = f.read()
w = open(fn, 'wb')
w.write(txt)
w.close()
print 'Wrote to', fn
opt.job_id = None
opt.sub_id = None
if opt.sdss_wcs:
(wcsfn, outfn) = opt.sdss_wcs
c.sdss_plot(outfn, wcsfn)
if opt.galex_wcs:
(wcsfn, outfn) = opt.galex_wcs
c.galex_plot(outfn, wcsfn)
if opt.sub_id:
print c.sub_status(opt.sub_id)
if opt.job_id:
print c.job_status(opt.job_id)
#result = c.send_request('jobs/%s/annotations' % opt.job_id)
#print 'Annotations:', result
if opt.jobs_by_tag:
tag = opt.jobs_by_tag
print c.jobs_by_tag(tag, None)
if opt.jobs_by_exact_tag:
tag = opt.jobs_by_exact_tag
print c.jobs_by_tag(tag, 'yes')
if opt.myjobs:
jobs = c.myjobs()
print jobs
#print c.submission_images(1)
No, there is no clean way to do so. When the module is being imported, it's code is executed and all global variables are set as attributes to the module object. So if part of the code is not executed at all (is guarded by __main__ condition) there is no clean way to get access to that code. You can however run code of this module with substituted __name__ but that's very hackish.
You should refactor this module and move whole __main__ part into a method and call it like this:
def main():
do_everything()
if __name__ == '__main__':
main()
This way consumer apps will be able to run code without having to run it in a separate process.
Use the runpy module in the Python 3 Standard Library
See that data can be passed to and from the called script
# top.py
import runpy
import sys
sys.argv += ["another parameter"]
module_globals_dict = runpy.run_path("other_script.py",
init_globals = globals(), run_name="__main__")
print(module_globals_dict["return_value"])
# other_script.py
# Note we did not load sys module, it gets passed to this script
script_name = sys.argv[0]
print(f"Script {script_name} loaded")
if __name__ == "__main__":
params = sys.argv[1:]
print(f"Script {script_name} run with params: {params}")
return_value = f"{script_name} Done"
by what your saying you want to call a function in the script that is importing the module so try:
import __main__
__main__.myfunc()

Categories

Resources