i want to use list in args of celery beat - python

i have list and in list i have 10 city i want to use all of my city to the celery beat but i cant
cityList = ["Tehran", "Shiraz", "Mashhad", "Qom", "Isfahan", "Ardabil", "Hamedan", "Yazd", "Tabriz", "Zavareh"]
app.conf.beat_schedule = {
'call_show_every_one_minute': {
"task": 'ali111.get_weather',
'schedule': crontab(minute='*/1'),
'args': ([cityList], ),
}
}
#app.task()
def get_weather(city):
con = redis.StrictRedis(host='localhost',port=6380, db=0, decode_responses=True)
appid = "b3bf68fdfc6ba46923cd50cb8b9a79c3"
URL = 'https://api.openweathermap.org/data/2.5/weather'
temp = con.get(city)
if temp is not None:
return temp
try:
PARAMS = {'q' :city, 'appid' :appid}
r = requests.get(url=URL, params=PARAMS)
city_temp = (r.json()['main']['temp']) - 273.15
my_temp = f"{round(city_temp,1)}c"
con.set(city, my_temp, ex=60)
return my_temp
except ConnectionError:
return "not internet connection"

Related

How to pass variable from script 1 to script 2 in python

I need help please.
I have 2 scripts. The first script consumes from RabbitMQ and I need to send the body received to a variable in script 2.
However, the variable remains empty. I think that script 1 maybe is calling script 2 before the value is received from RabbitMQ?
How can I achieve this? Thanks
script 1
import pika
import time
from script2 import strQueue
class ReceiveFromMQ(object):
def __init__(self):
credentials = pika.PlainCredentials('xxxx', 'xxxx')
parameters = pika.ConnectionParameters('xxxx', xxx, 'xxx',
credentials)
self.connection = pika.BlockingConnection(parameters)
self.channel = self.connection.channel()
self.channel.basic_qos(prefetch_count=1)
self.channel.basic_consume(
queue='queue',
on_message_callback=self.on_response,
auto_ack=True)
self.response = None
self.channel.start_consuming()
def on_response(self, ch, method, props, body):
self.response = body.decode()
strQueue = body.decode()
print(" [x] Received %r" % body.decode())
# getMsg(body.decode())
time.sleep(body.count(b'.'))
print(" [x] Done")
print(' [*] Waiting for messages. To exit press CTRL+C')
return self.response
def call(self):
self.response = None
self.connection.process_data_events(time_limit=None)
print(str(self.response))
return str(self.response)
receive_mq = ReceiveFromMQ()
response = receive_mq.call()
print(response)
script 2
import requests
import json
strQueue = None
# Function Authenticate
def httpAuthenticate (in_apiusers, in_apipass, in_Tenant, in_URL):
try:
print('retrieve token...')
url = in_URL
payload = json.dumps({
"password": str(in_apipass),
"usernameOrEmailAddress": str(in_apiusers),
"tenancyName": str(in_Tenant)
})
headers = {
'Content-Type': 'application/json'
}
response = requests.request("POST", url, headers=headers, data=payload)
json_object = json.loads(response.text)
print('token code: ' + str(response.status_code))
return str(json_object["result"])
except Exception as e:
return 'Fail:'
# Function:Add Queue Item on Uipath Orchestrator
def httpAddQueueItems(in_URL, in_Token, in_QueueName, in_strjson):
try:
print('add queue item...')
url = in_URL
payload = json.dumps({
"itemData": {
"Priority": "Normal",
"Name": str(in_QueueName),
"SpecificContent": {
"in_pjsorequest": in_strpjson
},
"Reference": "ggg"
}
})
headers = {
'X-UIPATH-OrganizationUnitId': '',
'Content-Type': 'application/json',
'Authorization': 'Bearer ' + in_Token
}
response = requests.request("POST", url, headers=headers, data=payload)
except Exception as e:
print(e)
return 'Fail'
# CONSTANTS
OnPremuser = "xxxx"
OnPrempass = "xxx!"
OnPremtenant = "Default"
OnPremUrlAuth = "xxxx"
OnPremUrlAddQueue = "https://xxxx"
OnPremQueue = "JSON"
OnPremPJSON = strQueue
OnPremtoken = httpAuthenticate(OnPremuser, OnPrempass, OnPremtenant, OnPremUrlAuth)
httpAddQueueItems(OnPremUrlAddQueue, OnPremtoken, OnPremQueue, OnPremJSON)
What you are trying to achieve is not possible in this way since you are
trying to access a shared variable (Race Condition).
Moreover, only one bytecode instruction can be executed at a time, mean to
say, only one CPU bound task can be run at a time.
P.S:- It can be achieved by running a consumer for the RabbitMQ producer and then assign the json received to a variable.

how to add many ids to one list?

I have a code that writes down the user id and the name of the streamer, when the streamer starts the stream, the user who entered the command is notified.
How can I correctly add all user IDs of users so that it works for everyone, and not just for one
import requests
import pymongo
from apscheduler.schedulers.asyncio import AsyncIOScheduler
from aiogram import Bot, types
from aiogram.dispatcher import Dispatcher
from aiogram.utils import executor
TOKEN = ''
bot = Bot(token=TOKEN)
dp = Dispatcher(bot)
scheduler = AsyncIOScheduler(timezone="Europe/Kiev")
client = pymongo.MongoClient('')
db = client['Users']
collection = db['twitch']
def add_user(streamer_name, chat_id):
collection.update_one({
"_id": streamer_name
}, {"$set": {
'online': '-',
'chat_id': chat_id
}}, upsert=True)
def set_online(streamers):
collection.update_one({
'_id': streamers
}, {'$set': {
'online': 'True'
}})
def set_offline(streamers):
collection.update_one({
'_id': streamers
}, {'$set': {
'online': 'False'
}})
async def check(streamer_name, chat_id):
client_id = ''
client_secret = ''
body = {
'client_id': client_id,
'client_secret': client_secret,
"grant_type": 'client_credentials'
}
r = requests.post('https://id.twitch.tv/oauth2/token', body)
keys = r.json()
headers = {
'Client-ID': client_id,
'Authorization': 'Bearer ' + keys['access_token']
}
all_records = collection.find()
users = list(all_records)
for i in users:
streamers = i['_id']
send_users = i['chat_id']
online = i['online']
stream = requests.get('https://api.twitch.tv/helix/streams?user_login=' + streamers, headers=headers)
stream_data = stream.json()
if len(stream_data['data']) == 1:
live = (streamers + ' is live: ' + stream_data['data'][0]['title'])
if online == 'False':
await bot.send_message(send_users, live)
set_online(streamers)
if online == 'True':
print('streamer online')
else:
set_offline(streamers)
scheduler.add_job(check, "interval", seconds=5, args=(streamer_name, chat_id))
#dp.message_handler(commands='check')
async def check_stream(message: types.Message):
streamer_name = message.text[7:]
chat_id = message.chat.id
add_user(streamer_name, chat_id)
await check(streamer_name, chat_id)
if __name__ == "__main__":
scheduler.start()
executor.start_polling(dp, skip_updates=True)
And when the streamer starts the stream, then many messages come in and not just one.

Error when posting Payload data string to Hubspot using an AWS Lambda Python API call

I have recently uploaded contact records to HubSpot using Postman. Here is a raw JSON data example and POST method that I use to successfully upload a contact:
https://api.hubapi.com/crm/v3/objects/contacts?hapikey={{hapikey}}
{properties": {
"smbapi": "yes",
"email": "fcgrinding#junkstermail.com",
"business_name":"Forest City Grinding Inc",
"srvc_address_1":"3544 Stenstrom Rd",
"srvc_city_1":"",
"srvc_state_1":"IL",
"srvc_zip_1":"61109",
"proposal_date":"2021-12-07",
"proposal_start_date": "2022-01-01",
"udc_code_1": "COMED",
"eog":"electric",
"fixedprice_1_gas_mcf": 6.63,
"fixedprice_2_gas_mcf": 6.11,
"fixedprice_3_gas_mcf": 5.9,
"term_1": 12,
"term_2": 24,
"term_3": 36,
"smb_bdm_name": "Timothy Chin",
"smb_bdm_phone": "833-999-9999",
"smb_bdm_email": "tim.chin#junkstermail.com"
}
}
Next, I then created a python lambda function to automate this process because we want to ingest CSV files that may have many records to extract. So, I had constructed the dictionary to look the same as the string above which had worked great/fine with Postman. However, when I try and do a Post method API call to HubSpot, using my dictionary payload, I am getting this error:
Invalid input JSON : Cannot build ObjectSchemaEgg, Invalid input JSON
on line 1, column 2: Cannot build ObjectSchemaEgg, some of required
attributes are not set [name, labels]
Here is the processed dictionary string that my code constructed for the API call:
{'properties': '{"smbapi": "yes", "business_name": "Forest City Grinding Inc", "srvc_address_1": "4844 Stenstrom Rd", "srvc_state_1": "IL", "srvc_zip_1": "61109", "proposal_date": "2021-12-07", "proposal_start_date": "2022-01-01", "udc_code_1": "COMED", "fixedprice_1": "6.63", "fixedprice_2": "6.11", "fixedprice_3": "5.9", "term_2": "24", "term_3": "36", "smb_bdm_name": "Gary Wysong", "smb_bdm_phone": "833-389-0881", "smb_bdm_email": "gary.wysong#constellation.com"}'}
Here is my Lambda code in full (give special attention to both the call to post_to_hubspot() and also the post_to_hubspot() function itself). The code that loads the dynamo table is working correctly.:
import boto3
import json
import decimal
from botocore.exceptions import ClientError
from boto3.dynamodb.conditions import Key, Attr
import re
import pandas as pd
import numpy as np
import os
import datetime
from os import urandom
import email
import base64
import requests
from datetime import datetime, timedelta, timezone
import mailparser
import calendar
global payload_data
landing_zone_bucket_name = str(os.environ['BUCKETNAME'])
s3 = boto3.resource('s3')
landing_zone_bucket = s3.Bucket(landing_zone_bucket_name )
s3r = boto3.client('s3')
dynamodb = boto3.resource('dynamodb', region_name='us-west-2')
table = dynamodb.Table(str(os.environ['DYNAMOTABLE']))
unprocessed_records_table = dynamodb.Table(str(os.environ['UNPROCESSEDTABLE']))
email_table = dynamodb.Table(str(os.environ['EMAILSTATUSTABLE']))
endpoint_url=os.environ['ENDPOINT_URL']
access_key = os.environ['ACCESSKEY']
now = datetime.now()
today_date = datetime.strftime(now,'%d')
today_month = datetime.strftime(now,'%m')
today_year = datetime.strftime(now,'%Y')
time_stamp = datetime.now().strftime('%Y%m%d%H%M%S')
payload_data = {}
#WRITE RECORDS TO DYNAMO
def dynamoPut(dObj,table_name=None):
try:
for each in list(dObj['Information']):
if dObj['Information'][each]:
dObj['Information'][each] = str(dObj['Information'][each])
else:
del dObj['Information'][each]
dObj['Month'] = today_month
dObj['Year'] = today_year
dObj['Day'] = today_date
for each in list(dObj):
if dObj[each] != '':
dObj[each] = dObj[each]
else:
del dObj[each]
if table_name != None:
response = unprocessed_records_table.put_item(Item = dObj)
else:
response = table.put_item(Item = dObj)
if response['ResponseMetadata']['HTTPStatusCode'] == 200:
return True
else:
return False
except Exception as e:
print(e)
return False
def dynamoPutFileName(filename,source_type):
try:
dObj = {}
dObj['id'] = urandom(20).hex()
dObj['CreatedAt'] = str(datetime.now())
dObj['FileName'] = filename
dObj['Type'] = source_type
dObj['EmailSent'] = False
response = email_table.put_item(Item = dObj)
if response['ResponseMetadata']['HTTPStatusCode'] == 200:
return True
else:
return False
except Exception as e:
print(e)
return False
def parse_csv_hubspot(event, obj):
#parsing CSV file to write to dynamo
try:
def auto_truncate(val):
return val[:255 ]
print('<< IN PARSE CSV HUBSPOT >>')
print(event)
csv = pd.read_csv(obj['Body'], encoding = "ISO-8859-1")
csv_nn = csv.replace(np.nan, 'null', regex=True)
d = csv_nn.to_dict(orient='records')
source_id = urandom(20).hex()
file_name = event['file_path'].split('/')[-1]
print('<< FILE NAME >>', file_name)
for each in d:
try:
dbObj = {}
#PASSING THE EXTERNAL KEY
UniqueKey = ''
if 'smbapi' in each and each['smbapi'] != 'null':
dbObj['smbapi' ] = each['smbapi']
print('<< SMB API>>', dbObj['smbapi' ])
if 'business_name' in each and each['business_name'] != 'null':
dbObj['business_name'] = each['business_name']
print('<< BUSINESS NAME >>', dbObj['business_name'])
if 'srvc_address_1' in each and each['srvc_address_1'] != 'null':
dbObj['srvc_address_1'] = each['srvc_address_1']
print('<< ADDRESS 1 >>', dbObj['srvc_address_1'])
if 'srvc_city_1' in each and each['srvc_city_1'] != 'null':
dbObj['srvc_city_1'] = each['srvc_city_1']
if 'srvc_state_1' in each and each['srvc_state_1'] != 'null':
dbObj['srvc_state_1'] = each['srvc_state_1']
if 'srvc_zip_1' in each and each['srvc_zip_1'] != 'null':
dbObj['srvc_zip_1']= str(each['srvc_zip_1']).zfill(5)
if 'proposal_date' in each and each['proposal_date'] != 'null':
dbObj['proposal_date']= try_parsing_date(each['proposal_date']).date().isoformat()
if 'proposal_start_date' in each and each['proposal_start_date'] != 'null':
dbObj['proposal_start_date']= try_parsing_date(each['proposal_start_date']).date().isoformat()
if 'udc_code_1' in each and each['udc_code_1'] != 'null':
dbObj['udc_code_1']= each['udc_code_1']
if 'eog' in each and each['eog'] != 'null':
dbObj['eog']= each['eog']
if 'fixedprice_1' in each and each['fixedprice_1'] != 'null':
dbObj['fixedprice_1']= each['fixedprice_1']
if 'fixedprice_2' in each and each['fixedprice_2'] != 'null':
dbObj['fixedprice_2']= each['fixedprice_2']
if 'fixedprice_3' in each and each['fixedprice_3'] != 'null':
dbObj['fixedprice_3']= each['fixedprice_3']
if 'fixedprice_1_gas_therm' in each and each['fixedprice_1_gas_therm'] != 'null':
dbObj['fixedprice_1_gas_therm']= each['fixedprice_1_gas_therm']
if 'fixedprice_2_gas_therm' in each and each['fixedprice_2_gas_therm'] != 'null':
dbObj['fixedprice_2_gas_therm']= each['fixedprice_2_gas_therm']
if 'fixedprice_3_gas_therm' in each and each['fixedprice_3_gas_therm'] != 'null':
dbObj['fixedprice_3_gas_therm']= each['fixedprice_3_gas_therm']
if 'fixedprice_1_gas_ccf' in each and each['fixedprice_1_gas_ccf'] != 'null':
dbObj['fixedprice_1_gas_ccf']= each['fixedprice_1_gas_ccf']
if 'fixedprice_2_gas_ccf' in each and each['fixedprice_2_gas_ccf'] != 'null':
dbObj['fixedprice_2_gas_ccf']= each['fixedprice_2_gas_ccf']
if 'fixedprice_3_gas_ccf' in each and each['fixedprice_3_gas_ccf'] != 'null':
dbObj['fixedprice_3_gas_ccf']= each['fixedprice_3_gas_ccf']
if 'fixedprice_1_gas_dth' in each and each['fixedprice_1_gas_dth'] != 'null':
dbObj['fixedprice_1_gas_dth']= each['fixedprice_1_gas_dth']
if 'fixedprice_2_gas_dth' in each and each['fixedprice_2_gas_dth'] != 'null':
dbObj['fixedprice_2_gas_dth']= each['fixedprice_2_gas_dth']
if 'fixedprice_3_gas_dth' in each and each['fixedprice_3_gas_dth'] != 'null':
dbObj['fixedprice_3_gas_dth']= each['fixedprice_3_gas_dth']
if 'fixedprice_1_gas_mcf' in each and each['fixedprice_1_gas_mcf'] != 'null':
dbObj['fixedprice_1_gas_mcf']= each['fixedprice_1_gas_mcf']
if 'fixedprice_2_gas_mcf' in each and each['fixedprice_2_gas_mcf'] != 'null':
dbObj['fixedprice_2_gas_mcf']= each['fixedprice_2_gas_mcf']
if 'fixedprice_3_gas_mcf' in each and each['fixedprice_3_gas_mcf'] != 'null':
dbObj['fixedprice_3_gas_mcf']= each['fixedprice_3_gas_mcf']
if 'term_1' in each and each['term_1'] != 'null':
dbObj['term_1']= each['term_1']
if 'term_2' in each and each['term_2'] != 'null':
dbObj['term_2']= each['term_2']
if 'term_3' in each and each['term_3'] != 'null':
dbObj['term_3']= each['term_3']
if 'smb_bdm_name' in each and each['smb_bdm_name'] != 'null':
dbObj['smb_bdm_name']= each['smb_bdm_name']
if 'smb_bdm_phone' in each and each['smb_bdm_phone'] != 'null':
if '.' in str(each['smb_bdm_phone']):
dbObj['smb_bdm_phone']= str(int(float(each['smb_bdm_phone'])))
else:
dbObj['smb_bdm_phone']= str(each['smb_bdm_phone'])
if 'smb_bdm_email' in each and each['smb_bdm_email'] != 'null' and each['smb_bdm_email'].strip() != '' and each['smb_bdm_email'] != None:
dbObj['smb_bdm_email']= each['smb_bdm_email']
print('<< OBJ >> ',dbObj)
N = urandom(20).hex()
now = str(datetime.now())
#<< END of HUBSPOT INGESTION >>
# table.put_item(
Item = {
'CogId' : str(N),
'CreatedAt': now,
'ExternalId': UniqueKey,
'Information' : dbObj,
'SourceBucket': landing_zone_bucket_name,
'SourcePath' : event['file_path'],
'Source' : 'HubSpot',
'SourceId' : source_id,
'SourceFileName': time_stamp + '_' + file_name
}
#WRITE-TO-DYNAMO
files_processing = dynamoPut(Item)
if not files_processing:
print('Writing {} record to dynamodb Failed'.format(Item))
except Exception as e:
print(e)
N = urandom(20).hex()
Item = {
'CogId' : str(N),
'CreatedAt': now,
'Information' : each,
'SourceBucket': landing_zone_bucket_name,
'SourcePath' : event['file_path'],
'Source' : 'HubSpot',
'message': str(e),
'SourceId' : source_id,
'ExternalId': UniqueKey
}
files_processing = dynamoPut(Item,'Fail')
pass
temp_file_name = time_stamp + '_' + file_name
isert_file_name = dynamoPutFileName(temp_file_name,'HubSpot')
post_to_hubspot(dbObj)
return True
except Exception as e:
print(e)
new_folder_path = os.environ['CSV_NEW_FOLDER_HUBSPOT']
unprocessed_folder_path = os.environ['CSV_ERROR_FOLDER_HUBSPOT']
# MOVING PROCESSED FILES FROM NEW TO UNPROCESSED FOLDER
move_file_to_processed = moving_files_new_to_processed(event, new_folder_path,unprocessed_folder_path)
return False
def try_parsing_date(text):
for fmt in ('%m/%d/%Y','%Y-%m-%dT%H:%M:%S-%f', '%m/%d/%y', '%Y-%m-%d', '%m.%d.%Y','%Y-%m-%dT%I', '%Y-%m-%dT%I%p', '%Y-%m-%dT%H:%M:%S.%f', '%Y-%m-%dT%H:%M:%S.%f+','%Y-%m-%dT%H:%M:%S'):#2018-11-20T08:05:54-0500
try:
return datetime.strptime(text, fmt)
except ValueError:
print('in except')
pass
return ValueError('no valid date format found')
def post_to_hubspot(list_contacts):
print('<< IN POST-To-HUBSPOT >>')
data_string = **json.dumps(list_contacts)**
payload_data = {"properties": data_string}
print('<< dbOBJ LIST >> ',payload_data)
response = requests.request("POST", endpoint_url+access_key, headers={'Content-Type': 'application/json'}, data=payload_data)
token_response=json.loads(response.text)
print('<< TOKEN RESPONSE >>',token_response)
def moving_files_new_to_processed(event, new_folder,processed_folder):
#MOVING-FILES-TO-PROCESSED
try:
copy_source = {
'Bucket': landing_zone_bucket_name,
'Key': event['file_path']
}
path = event['file_path']
processed_folder = processed_folder + time_stamp + '_'
new_key = path.replace(new_folder, processed_folder)
new_obj = landing_zone_bucket.Object(new_key)
new_obj.copy(copy_source)
s3.Object(landing_zone_bucket_name, event['file_path']).delete()
return True
except Exception as e:
print(e)
return False
def lambda_handler(event,context):
print("Starting to Push Records to Dynamo Lambda")
print(event)
try:
parse_flag = False
new_folder_path = ''
processed_folder_path = ''
#Gets file path and calls required function to parse it out
key = str(os.environ['CSV_NEW_FOLDER_HUBSPOT'])
obj = s3r.get_object(Bucket=landing_zone_bucket_name, Key=event['file_path'])
print('after obj')
print(os.environ['CSV_NEW_FOLDER_HUBSPOT'])
print('in HubSpot parse_csv')
parse_csv_func = parse_csv_hubspot(event, obj)
# Checks if parse_csv return empty dictionary
if parse_csv_func:
parse_flag = True
new_folder_path = os.environ['CSV_NEW_FOLDER_HUBSPOT']
processed_folder_path = os.environ['CSV_PROCESSED_FOLDER_HUBSPOT']
else:
print('File Format not Supported for {}'.format(event['file_path']))
if parse_flag:
# UPLOADING CONTACT.MOVING PROCESSED FILES FROM NEW TO PROCESSED FOLDER
#print('<< PAYLOAD >> ',payload)
#response = requests.request("POST", "https://api.hubapi.com/crm/v3/schemas/?hapikey="+access_key, headers={'Content-Type': 'application/json'}, data=json.dumps(str(payload)))
#token_response=json.loads(response.text)
#print('<< TOKEN RESPONSE >>',token_response)
#MOVING PROCESSED FILES FROM NEW TO PROCESSED FOLDER
move_file_to_processed = moving_files_new_to_processed(event, new_folder_path,processed_folder_path)
if move_file_to_processed:
print('File {} moved Successfully from {} to {}'.format(event['file_path'],new_folder_path,processed_folder_path))
else:
print('Moving {} file from new to processing folder Failed'.format(event['file_path']))
except Exception as e:
print(e)
What could be the problem? Thanks for your help.
The problem was caused by two issues:
The dictionary should have been placed in json.dumps() to convert it to JSON string when doing a POST so the dictionary didn't need to change its structure. Here's the response from the POST:
<< TOKEN RESPONSE >> {
"id": "135120801",
"properties": {
"business_name": "Millers Brand Oats",
"createdate": "2021-12-21T02:31:12.452Z",
"fixedprice_1": "6.63",
"fixedprice_2": "6.11",
"fixedprice_3": "5.9",
"hs_all_contact_vids": "135120801",
"hs_is_contact": "true",
"hs_is_unworked": "true",
"hs_marketable_until_renewal": "false",
"hs_object_id": "135120801",
"hs_pipeline": "contacts-lifecycle-pipeline",
"lastmodifieddate": "2021-12-21T02:31:12.452Z",
"proposal_date": "2021-12-07",
"proposal_start_date": "2022-01-01",
"smb_bdm_email": "Tim.Chu#junkster.com",
"smb_bdm_name": "Tim Chu",
"smb_bdm_phone": "833-999-9999",
"smbapi": "yes",
"srvc_address_1": "4844 Stenstrom Rd",
"srvc_state_1": "IL",
"srvc_zip_1": "61109",
"term_2": "24",
"term_3": "36",
"udc_code_1": "COMED"
},
"createdAt": "2021-12-21T02:31:12.452Z",
"updatedAt": "2021-12-21T02:31:12.452Z",
"archived": false
}
I was using the wrong endpoint:
https://api.hubapi.com/crm/v3/schemas/
instead of:
https://api.hubapi.com/crm/v3/objects/contacts/
Now I just need to find out why the AWS Lambda POSTs allow for duplicate contacts to be created in HubSpot while Postman POSTs prohibit duplicate contacts to be created.

django StreamingHttpResponse and apache wsgi - not working

I have the following streamed response:
def reportgen_iterator(request, object_id):
output_format = request.GET.get('output', 'pdf')
response_data = {
'progress': 'Retrieving data...',
'error': False,
'err_msg': None
}
yield json.dumps(response_data)
try:
vendor_id, dr_datasets = get_dr_datasets(
object_id=object_id, ws_user=settings.WS_USER,
include_vendor_id=True, request=request
)
except Exception as e:
response_data.update({
'error': True,
'err_msg': "Unable to retrieve data for report generation. Exception message: {}".format(e.message)
})
yield "{}{}".format(DELIMITER, json.dumps(response_data))
time.sleep(BEFORE_STOP_ITERATION_SLEEP_SECS)
raise StopIteration
# data retrieved correctly, continue
response_data['progress'] = 'Data retrieved.'
yield "{}{}".format(DELIMITER, json.dumps(response_data))
domain = settings.DR['API_DOMAIN']
dr_id, delivery_opts = get_dr_ids(vendor_id=vendor_id)
delivery_option_id = delivery_opts.get(output_format)
run_idle_time = REST_RUN_IDLE_TIME_MS / 1000 or 1
headers = settings.DR['AUTHORIZATION_HEADER']
headers.update({
'Content-Type': 'application/json', 'deliveryOptionId': delivery_option_id
})
# POST request
response_data['progress'] ='Generating document...'
yield "{}{}".format(DELIMITER, json.dumps(response_data))
post_url = 'https://{domain}{rel_url}/'.format(
domain=domain,
rel_url=settings.DR['API_ENDPOINTS']['create'](ddp_id)
)
header_img, footer_img = get_images_for_template(vendor_id=vendor_id, request=None)
images = {
'HeaderImg': header_img,
'FooterImg': footer_img
}
data = OrderedDict(
[('deliveryOptionId', delivery_option_id),
('clientId', 'MyClient'),
('data', dr_datasets),
('images', images)]
)
payload = json.dumps(data, indent=4).encode(ENCODING)
req = requests.Request('POST', url=post_url, headers=headers, data=payload)
prepared_request = req.prepare()
session = requests.Session()
post_response = session.send(prepared_request)
if post_response.status_code != 200:
response_data.update({
'error': True,
'err_msg': "Error: post response status code != 200, exit."
})
yield "{}{}".format(DELIMITER, json.dumps(response_data))
time.sleep(BEFORE_STOP_ITERATION_SLEEP_SECS)
raise StopIteration
# Post response successful, continue.
# RUN URL - periodic check
post_response_dict = post_response.json()
run_url = 'https://{domain}/{path}'.format(
domain=domain,
path=post_response_dict.get('links', {}).get('self', {}).get('href'),
headers=headers
)
run_id = post_response_dict.get('runId', '')
status = 'Running'
attempt_counter = 0
file_url = '{url}/files/'.format(url=run_url)
while status == 'Running':
attempt_counter += 1
run_response = requests.get(url=run_url, headers=headers)
runs_data = run_response.json()
status = runs_data['status']
message = runs_data['message']
progress = runs_data['progress']
response_data['progress'] = '{} - {}%'.format(status, int(progress * 100))
yield "{}{}".format(DELIMITER, json.dumps(response_data))
if status == 'Error':
msg = '{sc} - run_id: {run_id} - error_id: [{error_id}]: {message}'.format(
sc=run_response.status_code, run_id=run_id,
error_id=runs_data.get('errorId', 'N/A'), message=message
)
response_data.update({
'error': True,
'err_msg': msg
})
yield "{}{}".format(DELIMITER, json.dumps(response_data))
time.sleep(BEFORE_STOP_ITERATION_SLEEP_SECS)
raise StopIteration
if status == 'Complete':
break
if attempt_counter >= ATTEMPTS_LIMIT:
msg = 'File failed to generate after {att_limit} retrieve attempts: ' \
'({progress}% progress) - {message}'.format(
att_limit=ATTEMPTS_LIMIT,
progress=int(progress * 100),
message=message
)
response_data.update({
'error': True,
'err_msg': msg
})
yield "{}{}".format(DELIMITER, json.dumps(response_data))
time.sleep(BEFORE_STOP_ITERATION_SLEEP_SECS)
raise StopIteration
time.sleep(run_idle_time)
# GET GENERATED FILE
file_url_response = requests.get(
url=file_url,
headers=headers,
params={'userId': settings.DR_CREDS['userId']},
stream=True,
)
if file_url_response.status_code != 200:
response_data.update({
'error': True,
'err_msg': 'error in retrieving file\nurl: {url}\n'.format(url=file_url)
})
yield "{}{}".format(DELIMITER, json.dumps(response_data))
time.sleep(BEFORE_STOP_ITERATION_SLEEP_SECS)
raise StopIteration
file_url_dict = file_url_response.json()
retrieve_file_rel_url = file_url_dict['files'][0]['links']['file']['href']
file_ext = DELIVERY_MAPPING.get(output_format, 'pdf')
response_data.update({
'progress': 'Generated.',
'doc_url': retrieve_file_rel_url,
'dest_file_ext': file_ext
})
yield "{}{}".format(DELIMITER, json.dumps(response_data))
class FullDownloadRosterStreamingView(View):
def get(self, request, object_id):
"""
"""
stream = reportgen_iterator(request, object_id)
try:
response = StreamingHttpResponse(
streaming_content=stream, status=200,
content_type='application/octet-stream'
)
response['Cache-Control'] = 'no-cache'
return response
except Exception as e:
return HttpResponseServerError(e.message)
def get_file(request):
domain = settings.DR['API_DOMAIN']
retrieve_file_rel_url = request.GET.get('doc_url')
file_ext = request.GET.get('file_ext')
retrieve_file_response = requests.get(
url='https://{domain}/{path}'.format(
domain=domain,
path=retrieve_file_rel_url
),
headers=settings.DR['AUTHORIZATION_HEADER'],
params={'userId': settings.DR_CREDS['userId']},
stream=True,
)
if retrieve_file_response.status_code != 200:
return HttpResponseServerError(
"Error while downloading file"
)
response = HttpResponse(content_type=CONTENT_TYPE_MAPPING.get(file_ext, 'pdf'))
response['Content-Disposition'] = (
'attachment; filename="my_doc.{}"'.format(file_ext)
)
response.write(retrieve_file_response.content)
return response
handled client side by this js code:
function getStreamedResponse(lo_id, output){
var xhr = new XMLHttpRequest(),
method = 'GET';
xhr.overrideMimeType("application/octet-stream");
var url = window.amphr.baseUrl + '/dl/stream/' + lo_id + '/?output=' + output;
url += "&" + (new Date()).getTime(); // added a timestamp to prevent xhr requests caching
this.rspObj = null;
xhr.onprogress = function (evt) {
var _this = evt.currentTarget;
if (_this.responseText.length == 0) return;
var delimiter = '|';
var responseTextChunks = _this.responseText.split(delimiter);
if (responseTextChunks.length == 0) return;
_this.rspObj = JSON.parse(responseTextChunks.slice(-1)[0]);
if (_this.rspObj.error === true) {
_this.abort(evt);
}
updateProgressMessage(_this.rspObj.progress);
};
xhr.onload = function (evt) {
toggleProgress(false);
var _this = evt.currentTarget;
var uri = window.amphr.baseUrl + "/dl/get_file/?doc_url=" + _this.rspObj.doc_url +"&file_ext=" + _this.rspObj.dest_file_ext;
getFile(uri);
};
xhr.onerror = function (evt) {
var _this = evt.currentTarget;
toggleProgress(false);
};
xhr.onabort = function (evt) {
toggleProgress(false);
var _this = evt.currentTarget;
setTimeout(function(){
if (window.confirm("Error while generating document.\nDownload original?")) {
getFile(window.amphr.originalDownloadUrl);
}}, 100);
};
var getFile = function (uri) {
var link = document.createElement("a");
link.href = uri;
document.body.appendChild(link);
link.click();
document.body.removeChild(link);
delete link;
};
xhr.open(method, url, true);
xhr.send();
}
function toggleProgress(show) {
//toggle overlay/spinner/progress message
var display = (show === true) ? 'block' : 'none';
var overlayDiv = document.getElementsByClassName('overlay')[0];
if (show === true) overlayDiv.style.display = display;
overlayDiv.style.display = display;
var loaderDiv = document.getElementsByClassName('loader')[0];
var msgDiv = document.getElementById('progress-msg');
loaderDiv.style.display = display;
msgDiv.style.display = display;
if (show === false) {
overlayDiv.style.display = display;
msgDiv.innerHTML = "";
}
}
function updateProgressMessage(msg) {
var msgDiv = document.getElementById('progress-msg');
msgDiv.innerHTML = msg;
it works fine locally using the development server (runserver or runserver_plus), the response text comes in chunks.
However, on the dev environment (Apache/wsgi_module with HTTPS), the response is returned entirely at the end, not chuncked.
Any hints about why this is happening?
thanks

Conditional Statement to re-start Python script based on response from POST request

I have a python script where I am sending a POST request for data to a server. I am expecting a particular response which indicates there is data in the response. If I do not receive this response, how can I restart my script/go to the beginning of it. The script is wrapped in a function which allows it to run every minute.
I would like to return to the beginning of my function if my response isn't as expected.
Script:
import sched, time, requests, jsonpickle, arcpy, requests, json, datetime
s = sched.scheduler(time.time, time.sleep)
def do_something(sc):
data2 = jsonpickle.decode((f2.read()))
Start = datetime.datetime.now()
# Start = datetime.datetime.strftime(data2['QueryRequest']['LastUpdatedDate'])
DD = datetime.timedelta(minutes=5)
earlier = Start - DD
earlier_str = earlier.strftime('X%m/%d/%Y %H:%M:%S').replace('X0','X').replace('X','')
data2["QueryRequest"]['LastUpdatedDate'] = str(earlier_str)
data2 = jsonpickle.encode(data2)
BulkyItemInfo = " "
spatial_ref = arcpy.SpatialReference(4326)
lastpage = 'false'
startrow = 0
newquery = 'new'
pagesize = 100
url2 = "URL"
headers2 = {'Content-type': 'text/plain', 'Accept': '/'}
while lastpage == 'false':
r2 = requests.post(url2, data=data2, headers=headers2)
print r2.text
decoded2 = json.loads(r2.text)
f2 =open('C:\Users\GeoffreyWest\Desktop\Request.json')
data2 = jsonpickle.decode((f2.read()))
if decoded2['Response']['LastPage'] == 'false':
data2['QueryRequest']['PageSize'] = pagesize
startrow = startrow + data2['QueryRequest']['PageSize']
data2['QueryRequest']['StartRowNum'] = startrow
data2['QueryRequest']['NewQuery'] = 'false'
data2 = jsonpickle.encode(data2)
print startrow
else:
lastpage = 'true'
print json.dumps(decoded2, sort_keys=True, indent=4)
items = []
for sr in decoded2['Response']['ListOfServiceRequest']['ServiceRequest']:#Where response is successful or fails
Output for successful response:
{
"status": {
"code": 311,
"message": "Service Request Successfully Queried.",
"cause": ""
},
"Response": {
"LastPage": "false",
"NumOutputObjects": "100",
"ListOfServiceRequest": {
"ServiceRequest": [
{
Output for unsuccessful response:
{"status":{"code":311,"message":"Service Request Successfully Queried.","cause":""},"Response":{"LastPage":"true","NumOutputObjects":"0","ListOfServiceRequest":{}}}

Categories

Resources