I'm using google-cloud-tasks==2.2.0 with Flask Gunicorn. This is how I send a task to a queue:
def send_task(payload, queue, uri, *args):
url = f'https://www.mywebsite.com/{uri}'
payload = json.dumps(payload)
payload = payload.encode()
parent = client.queue_path(project=project, location=location, queue=queue)
service_account_email = 'myaccount.com'
# Construct the request body.
td = '1800s'
duration = duration_pb2.Duration()
time = duration.FromJsonString(td)
now = datetime.utcnow() + timedelta(seconds=10)
ts = timestamp_pb2.Timestamp()
now = ts.FromDatetime(now)
task = {
'http_request': { # Specify the type of request.
'http_method': tasks_v2beta3.HttpMethod.POST,
'url': url,
'body': payload, # Convert dictionary to string
'headers': { # Add custom header
'Content-Type': 'application/json'
},
'oidc_token': {'service_account_email': service_account_email}
}
}
task['schedule_time'] = now
task['dispatch_deadline'] = time
response = client.create_task(request={"parent": parent, "task": task}, timeout=30.0)
I use dispatch_deadline which is supposed to support 30 minutes timeout, using this API reference.
But no matter how I try, my task fails after 60 seconds with 504 DEADLINE_EXCEEDED error.
Honestly, is this something necessary I'm missing here, or is it a bug?
Related
I am trying to build a pipeline on Snapchat Marketing API to fetch campaign insight data under the ad account but when I want to run the following python script, It returns the response only with country dimension (do not return os dimension) and only impression metric from the fields list (do not return spend, swipes, total_installs metrics). I think the problem is because of the list values, it takes only the first ones but, I could not solve it.
def post_snapchat_campaign_data(adAccount_id, access_token, granularity, breakdown, report_dimension, start_time, end_time, fields):
url = "https://adsapi.snapchat.com/v1/adaccounts/{}/stats".format(adAccount_id)
params = {
"granularity": granularity,
"breakdown": breakdown,
"report_dimension": report_dimension,
"start_time": start_time,
"end_time": end_time,
"fields": fields
}
headers = {'Authorization': access_token}
response = requests.request("GET", url, headers = headers, params=params)
return response
def get_snapchat_campaign_data(adAccount_id, access_token):
response = post_snapchat_campaign_data(
adAccount_id = adAccount_id,
access_token = access_token,
granularity = "DAY",
breakdown = "campaign",
report_dimension = ["country", "os"],
start_time = "2022-02-02",
end_time = "2022-02-03",
fields = ["impressions", "spend", "swipes", "total_installs"]
)
return response.json()
I need help please.
I have 2 scripts. The first script consumes from RabbitMQ and I need to send the body received to a variable in script 2.
However, the variable remains empty. I think that script 1 maybe is calling script 2 before the value is received from RabbitMQ?
How can I achieve this? Thanks
script 1
import pika
import time
from script2 import strQueue
class ReceiveFromMQ(object):
def __init__(self):
credentials = pika.PlainCredentials('xxxx', 'xxxx')
parameters = pika.ConnectionParameters('xxxx', xxx, 'xxx',
credentials)
self.connection = pika.BlockingConnection(parameters)
self.channel = self.connection.channel()
self.channel.basic_qos(prefetch_count=1)
self.channel.basic_consume(
queue='queue',
on_message_callback=self.on_response,
auto_ack=True)
self.response = None
self.channel.start_consuming()
def on_response(self, ch, method, props, body):
self.response = body.decode()
strQueue = body.decode()
print(" [x] Received %r" % body.decode())
# getMsg(body.decode())
time.sleep(body.count(b'.'))
print(" [x] Done")
print(' [*] Waiting for messages. To exit press CTRL+C')
return self.response
def call(self):
self.response = None
self.connection.process_data_events(time_limit=None)
print(str(self.response))
return str(self.response)
receive_mq = ReceiveFromMQ()
response = receive_mq.call()
print(response)
script 2
import requests
import json
strQueue = None
# Function Authenticate
def httpAuthenticate (in_apiusers, in_apipass, in_Tenant, in_URL):
try:
print('retrieve token...')
url = in_URL
payload = json.dumps({
"password": str(in_apipass),
"usernameOrEmailAddress": str(in_apiusers),
"tenancyName": str(in_Tenant)
})
headers = {
'Content-Type': 'application/json'
}
response = requests.request("POST", url, headers=headers, data=payload)
json_object = json.loads(response.text)
print('token code: ' + str(response.status_code))
return str(json_object["result"])
except Exception as e:
return 'Fail:'
# Function:Add Queue Item on Uipath Orchestrator
def httpAddQueueItems(in_URL, in_Token, in_QueueName, in_strjson):
try:
print('add queue item...')
url = in_URL
payload = json.dumps({
"itemData": {
"Priority": "Normal",
"Name": str(in_QueueName),
"SpecificContent": {
"in_pjsorequest": in_strpjson
},
"Reference": "ggg"
}
})
headers = {
'X-UIPATH-OrganizationUnitId': '',
'Content-Type': 'application/json',
'Authorization': 'Bearer ' + in_Token
}
response = requests.request("POST", url, headers=headers, data=payload)
except Exception as e:
print(e)
return 'Fail'
# CONSTANTS
OnPremuser = "xxxx"
OnPrempass = "xxx!"
OnPremtenant = "Default"
OnPremUrlAuth = "xxxx"
OnPremUrlAddQueue = "https://xxxx"
OnPremQueue = "JSON"
OnPremPJSON = strQueue
OnPremtoken = httpAuthenticate(OnPremuser, OnPrempass, OnPremtenant, OnPremUrlAuth)
httpAddQueueItems(OnPremUrlAddQueue, OnPremtoken, OnPremQueue, OnPremJSON)
What you are trying to achieve is not possible in this way since you are
trying to access a shared variable (Race Condition).
Moreover, only one bytecode instruction can be executed at a time, mean to
say, only one CPU bound task can be run at a time.
P.S:- It can be achieved by running a consumer for the RabbitMQ producer and then assign the json received to a variable.
I am preparing code for querying some endpoints. Code is ok, works quite good but it takes too much time. I would like to use Python multiprocessing module to speed up the process. My main target is to put 12 API queries to be processed in parallel. Once jobs are processed I would like to fetch the result and put them into the list of dictionaries, one response as one dictionary in the list. API response is in json format. I am new to Python and don't have experience in such kind of cases.
Code I want to run in parallel below.
def api_query_process(cloud_type, api_name, cloud_account, resource_type):
url = "xxx"
payload = {
"limit": 0,
"query": f'config from cloud.resource where cloud.type = \'{cloud_type}\' AND api.name = \'{api_name}\' AND '
f'cloud.account = \'{cloud_account}\'',
"timeRange": {
"relativeTimeType": "BACKWARD",
"type": "relative",
"value": {
"amount": 0,
"unit": "minute"
}
},
"withResourceJson": True
}
headers = {
"content-type": "application/json; charset=UTF-8",
"x-redlock-auth": api_token_input
}
response = requests.request("POST", url, json=payload, headers=headers)
result = response.json()
resource_count = len(result["data"]["items"])
if resource_count:
dictionary = dictionary_create(cloud_type, cloud_account, resource_type, resource_count)
property_list_summary.append(dictionary)
else:
dictionary = dictionary_create(cloud_type, cloud_account, resource_type, 0)
property_list_summary.append(dictionary)
Interesting problem and I think you should think about idempotency. What would happen if you hit the end-point consecutively. You can use multiprocessing with or without lock.
Without Lock:
import multiprocessing
with multiprocessing.Pool(processes=12) as pool:
jobs = []
for _ in range(12):
jobs.append(pool.apply_async(api_query_process(*args))
for job in jobs:
job.wait()
With Lock:
import multiprocessing
multiprocessing_lock = multiprocessing.Lock()
def locked_api_query_process(cloud_type, api_name, cloud_account, resource_type):
with multiprocessing_lock:
api_query_process(cloud_type, api_name, cloud_account, resource_type)
with multiprocessing.Pool(processes=12) as pool:
jobs = []
for _ in range(12):
jobs.append(pool.apply_async(locked_api_query_process(*args)))
for job in jobs:
job.wait()
Can't really do an End-2-End test but hopefully this general setup helps you get it up and running.
Since a HTTP request is an I/O Bound operation, you do not need multiprocessing. You can use threads to get a better performance. Something like the following would help.
MAX_WORKERS would say how many requests you want to send in
parallel
API_INPUTS are all the requests you want to make
Untested code sample:
from concurrent.futures import ThreadPoolExecutor
import requests
API_TOKEN = "xyzz"
MAX_WORKERS = 4
API_INPUTS = (
("cloud_type_one", "api_name_one", "cloud_account_one", "resource_type_one"),
("cloud_type_two", "api_name_two", "cloud_account_two", "resource_type_two"),
("cloud_type_three", "api_name_three", "cloud_account_three", "resource_type_three"),
)
def make_api_query(api_token_input, cloud_type, api_name, cloud_account):
url = "xxx"
payload = {
"limit": 0,
"query": f'config from cloud.resource where cloud.type = \'{cloud_type}\' AND api.name = \'{api_name}\' AND '
f'cloud.account = \'{cloud_account}\'',
"timeRange": {
"relativeTimeType": "BACKWARD",
"type": "relative",
"value": {
"amount": 0,
"unit": "minute"
}
},
"withResourceJson": True
}
headers = {
"content-type": "application/json; charset=UTF-8",
"x-redlock-auth": api_token_input
}
response = requests.request("POST", url, json=payload, headers=headers)
return response.json()
def main():
futures = []
with ThreadPoolExecutor(max_workers=MAX_WORKERS) as pool:
for (cloud_type, api_name, cloud_account, resource_type) in API_INPUTS:
futures.append(
pool.submit(make_api_query, API_TOKEN, cloud_type, api_name, cloud_account)
)
property_list_summary = []
for future, api_input in zip(futures, API_INPUTS):
api_response = future.result()
cloud_type, api_name, cloud_account, resource_type = api_input
resource_count = len(api_response["data"]["items"])
dictionary = dictionary_create(cloud_type, cloud_account, resource_type, resource_count)
property_list_summary.append(dictionary)
I think using async functions would help a lot in speeding this up.
Your code is blocking while it waits for a response from the external API. So using more processes or threads is overkill. You dont need more resources on your end. Instead you should just make your code execute the next request instead of idling until the response arrives. This can be done using coroutines.
You could use aiohttp instead of requests, collect the individual tasks and execute them in an event loop.
Here is a small example code to run get requests, and collect the json bodies from the responses. Should be easy to adapt to your use case
from aiohttp import ClientSession
import asyncio
RESULTS = dict()
async def get_url(url, session):
async with session.get(url) as response:
print("Status:", response.status)
print("Content-type:", response.headers['content-type'])
result = await response.json()
RESULTS[url] = result
async def get_all_urls(urls):
async with ClientSession() as session:
tasks = [get_url(url, session) for url in urls]
await asyncio.gather(*tasks)
if __name__ == "__main__":
urls = [
"https://accounts.google.com/.well-known/openid-configuration",
"https://www.facebook.com/.well-known/openid-configuration/"
]
asyncio.run(get_all_urls(urls=urls))
print(RESULTS.keys())
I create a bot to monitor the comment if there is any new comment and if so it will automatically private_replies them But instead i got a Request [400] Error instead.
def monitor_comment():
print("Bot is monitoring comments")
time.sleep(5)
comment_data = graph.get_connections(COMBINED_POST_ID_TO_MONITOR,"comments",order='reverse_chronological')
commends = []
for comment in comment_data['data'][:10]:
commends.append (comment)
data = commends[0]['id']
data_converted = str(data)
#time.sleep(5)
print(data)
return data_converted
def private_reply(comment_ids):
url = "https://graph.facebook.com/v12.0/me/messages?"
access = {"access_token":Page_Token}
params = {
"recipient": {
"comment_id": comment_ids
},
"message": {
"text":"Testing Private_Replies"
}
request = requests.post(url=url, files=access, json=params)
print(request)
This is the logs
{"error":{"message":"An active access token must be used to query information about the current user.","type":"OAuthException","code":2500,"fbtrace_id":"AMCiqy1Aw8CyODPlUBE1b98"}}
I have been struggling to send a signed request to binance future using signature.
I found that example code on StackOverflow ("Binance API call with SHA56 and Python requests") and an answer has been given to it mentioning to use hmac
as below: but unfortunately i still don't see how to write this example. Could anyone show how the code of this example should look like? i am really uncomfortable with signed request. Thanks a lot for your understanding and your help advice given:
params = urlencode({
"signature" : hashedsig,
"timestamp" : servertimeint,
})
hashedsig = hmac.new(secret.encode('utf-8'), params.encode('utf-8'), hashlib.sha256).hexdigest()
Original example:
import requests, json, time, hashlib
apikey = "myactualapikey"
secret = "myrealsecret"
test = requests.get("https://api.binance.com/api/v1/ping")
servertime = requests.get("https://api.binance.com/api/v1/time")
servertimeobject = json.loads(servertime.text)
servertimeint = servertimeobject['serverTime']
hashedsig = hashlib.sha256(secret)
userdata = requests.get("https://api.binance.com/api/v3/account",
params = {
"signature" : hashedsig,
"timestamp" : servertimeint,
},
headers = {
"X-MBX-APIKEY" : apikey,
}
)
print(userdata)
The proper way would be:
apikey = "myKey"
secret = "mySecret"
servertime = requests.get("https://api.binance.com/api/v1/time")
servertimeobject = json.loads(servertime.text)
servertimeint = servertimeobject['serverTime']
params = urlencode({
"timestamp" : servertimeint,
})
hashedsig = hmac.new(secret.encode('utf-8'), params.encode('utf-8'),
hashlib.sha256).hexdigest()
userdata = requests.get("https://api.binance.com/api/v3/account",
params = {
"timestamp" : servertimeint,
"signature" : hashedsig,
},
headers = {
"X-MBX-APIKEY" : apikey,
}
)
print(userdata)
print(userdata.text)
Make sure to put the signature as the last parameter or the request will return [400]...
Incorrect:
params = {
"signature" : hashedsig,
"timestamp" : servertimeint,
}
Correct:
params = {
"timestamp" : servertimeint,
"signature" : hashedsig,
}
At the time of writing, Binance themselves are mainting a repo with some examples*, using the requests library. Here is a sample in case the link goes down or is moved:
import hmac
import time
import hashlib
import requests
from urllib.parse import urlencode
KEY = ''
SECRET = ''
# BASE_URL = 'https://fapi.binance.com' # production base url
BASE_URL = 'https://testnet.binancefuture.com' # testnet base url
''' ====== begin of functions, you don't need to touch ====== '''
def hashing(query_string):
return hmac.new(SECRET.encode('utf-8'), query_string.encode('utf-8'), hashlib.sha256).hexdigest()
def get_timestamp():
return int(time.time() * 1000)
def dispatch_request(http_method):
session = requests.Session()
session.headers.update({
'Content-Type': 'application/json;charset=utf-8',
'X-MBX-APIKEY': KEY
})
return {
'GET': session.get,
'DELETE': session.delete,
'PUT': session.put,
'POST': session.post,
}.get(http_method, 'GET')
# used for sending request requires the signature
def send_signed_request(http_method, url_path, payload={}):
query_string = urlencode(payload)
# replace single quote to double quote
query_string = query_string.replace('%27', '%22')
if query_string:
query_string = "{}×tamp={}".format(query_string, get_timestamp())
else:
query_string = 'timestamp={}'.format(get_timestamp())
url = BASE_URL + url_path + '?' + query_string + '&signature=' + hashing(query_string)
print("{} {}".format(http_method, url))
params = {'url': url, 'params': {}}
response = dispatch_request(http_method)(**params)
return response.json()
# used for sending public data request
def send_public_request(url_path, payload={}):
query_string = urlencode(payload, True)
url = BASE_URL + url_path
if query_string:
url = url + '?' + query_string
print("{}".format(url))
response = dispatch_request('GET')(url=url)
return response.json()
response = send_signed_request('POST', '/fapi/v1/order', params)
print(response)
Some additional thoughts from myself:
You can also use a new library also from Binance called Binance connector. It is a bit new, it has some issues, but it can do the basic operations without you worrying about signed requests.
I wouldn't use serverTime because that means you need to make an additional request and networks can be slow, I'd follow this example and use the int(time.time() * 1000) you may not even need the function.
I purposedly used the POST example, because this is more complicated as you need to also encode and hash your custom parameters
At the time of writing, v3 is the latest version
Hope it helps.
* https://github.com/binance/binance-signature-examples/blob/master/python/futures.py