I am trying to run async function that will call to a regular function.
Tried this:
import asyncio
from okta.client import Client as OktaClient
def startFunc(metadataURL,appID):
print("This is the",metadataURL, appID)
config = {
'orgUrl': 'https://example.com',
'token': 'myToken'}
okta_client = OktaClient(config)
async def skipFunc():
apps, resp, err = await okta_client.list_applications()
for app in apps:
appsList = (app.label,app.id)
for app.label in appsList:
if app.label == 'John':
strApp = str(app)
appJson = ast.literal_eval(strApp)
metadataURL = (appJson['links']['metadata']['href'])
appID = (appJson['id'])
startFunc(metadataURL,appID) ## Trying to call this function
loop = asyncio.get_event_loop()
loop.run_until_complete(skipFunc())
I do not receive the output, It looks like the function startFunc() didn't execute and I am not getting any error.
Hope you can help me.
Related
When I try to run this code
`
import json
import os
import random
from pprint import pprint
import aiohttp
import discord
import requests
from discord.ext import commands
from dotenv import load_dotenv
from mojang import api
# Functions
# Sends a Get request to a given url
def get_info(call):
r = requests.get(call)
return r.json()
# Get the sum of coins in the bazaar
def get_bazaar_buy_order_value(bazaar_data):
sum_coins = 0
price_increase_threshold = 2
buy_order_values = []
# For every product
for item_name, item_data in bazaar_data.get("products", {}).items():
item_sum_coins = 0
# For every buy order
for idx, buy_order in enumerate(item_data.get("buy_summary", [])):
# If its the best price
if(idx == 0):
item_expected_value = buy_order.get("pricePerUnit", 0)
item_sum_coins += buy_order.get("amount", 0) * buy_order.get("pricePerUnit", 0)
# If its not the best price, check for reasonable price
else:
if(buy_order.get("pricePerUnit", 0) < (item_expected_value * price_increase_threshold)):
item_sum_coins += buy_order.get("amount", 0) * buy_order.get("pricePerUnit", 0)
buy_order_values.append((item_name, item_sum_coins))
sum_coins += item_sum_coins
sort_bazaar_buy_orders_by_value(buy_order_values)
return sum_coins
# Sorts and displays a list of buy order items by total value
def sort_bazaar_buy_orders_by_value(buy_order_values):
# Sort items by values
buy_order_values.sort(key = lambda x: -x[1])
# Display items and values
for (item_name, item_sum_coins) in buy_order_values:
print(f"{item_name.ljust(30, ' ')} | {round(item_sum_coins):,}")
return
# Returns Bazaar data
def get_bazaar_data():
return get_info("https://api.hypixel.net/skyblock/bazaar")
# Returns a specific item from the Bazaar
def get_bazaar_item():
return
# Returns auction info from player uuid
def get_auctions_from_player(uuid):
return get_info(f"https://api.hypixel.net/skyblock/auction?key={API_KEY}&player={uuid}")
# Returns current mayor/election data
def get_election_data():
return get_info(f"https://api.hypixel.net/resources/skyblock/election")
# Returns a list of player profiles
def get_profiles_data():
return get_info(f"https://sky.shiiyu.moe/api/v2/profile/{example_uuid}")
# Returns player UUID when prompted with the name
async def get_uuid(name):
return get_info(f"https://sky.shiiyu.moe/api/v2/profile/{name}")
# Discord Functions / Vars
load_dotenv()
TOKEN = os.getenv('DISCORD_TOKEN')
GUILD = os.getenv('DISCORD_GUILD')
client = discord.Client(intents=discord.Intents.default())
intents = discord.Intents.all()
bot = commands.Bot(command_prefix='/',intents=intents)
# Hypixel Vars
Item = "Diamond"
API_FILE = open("API_KEY.json","r")
example_name = "4748"
example_uuid = "147ab344d3e54952b74a8b0fedee5534"
uuid_dashed = "147ab344-d3e5-4952-b74a-8b0fedee5534"
API_KEY = json.loads(API_FILE.read())["API_KEY"]
example_player_uuid = "147ab344d3e54952b74a8b0fedee5534"
auctions_player_url = f"https://api.hypixel.net/skyblock/auction?key={API_KEY}&player={example_player_uuid}"
# Commands
#bot.command(name='bazaar', description = "Gives a detailed readout of a certain item in the bazaar", brief = "Get data of an item in bazaar")
async def bazaar(ctx):
await ctx.send(get_bazaar_data())
await ctx.send(API_KEY)
#bot.command(name="bazaartotal", description = "Show the total amount of coins on the bazaar at any given point", brief = "Shows the amount of coins in the bazaar")
async def baztot(ctx):
await ctx.send(get_bazaar_buy_order_value(get_bazaar_data()))
#bot.command(name = "apikey", description = "Gives 4748's API key, make sure to remove me once publicly availible!", brief = "API Key")
async def key(ctx):
await ctx.send(API_KEY)
#bot.command(name = "profiles", description = 'Get a list of player profiles and data about them', brief = "List player profiles")
async def prof(ctx):
await ctx.send("Username to check?")
message = client.wait_for('message', check=lambda m: m.user == ctx.user)
username = str(message.content)
uuid = get_uuid(username)
pprint(uuid)
await ctx.send(uuid)
bot.run(TOKEN)
I get this error
discord.ext.commands.errors.CommandInvokeError: Command raised an exception: AttributeError: loop attribute cannot be accessed in non-async contexts. Consider using either an asynchronous main function and passing it to asyncio.run or using asynchronous initialisation hooks such as Client.setup_hook
Anyone have a fix for this? The bot runs normally, but once I try to run /profiles it gives me that error. Also, other commands work fine, but when I try to access an api with a
Changed my code multiple times, putting the get_uuid command in async, and googling for a few hours. any help is appreciated!
Your Bot variable is called bot, but you're using client in your wait_for statement.
You've got both a discord.Client ("client") and a commands.Bot ("bot") instance. This doesn't make a whole lot of sense. If you only need Client features then use Client, if you want Bot features then use Bot. You can't use both at the same time.
Also, wait_for is a coroutine, so you should await it.
# Yours:
message = client.wait_for('message', check=lambda m: m.user == ctx.user)
# ^^^^^^^
# Missing await keyword & wrong bot variable
# Correct:
message = await bot.wait_for(...)
# ^^^^^ ^^^
Docs: https://discordpy.readthedocs.io/en/stable/api.html?highlight=wait_for#discord.Client.wait_for
PS requests is blocking and will make your whole bot freeze. Consider looking into an asynchronous http library like aiohttp.
try to do this:
message = await client.wait_for('message', check=lambda m: m.user == ctx.user)
I advise you to remove the 'name' from the slash command argument, it's better to just name the function and add an additional check for the channel 'm.channel == ctx.channel' to wait_for and split the file into several
I am using beanie==1.10.1
I want to perform bulk operation with updating multiple documents with upsert=True. I expect following code to insert full document if find query didn't give results.
I was using this as a reference: https://github.com/roman-right/beanie/blob/main/tests/odm/documents/test_bulk_write.py
Here is full code:
import beanie
import asyncio
import random
from beanie import BulkWriter
from beanie.odm.operators.update.general import Set
from motor.motor_asyncio import AsyncIOMotorClient
class TestDoc(beanie.Document):
a: str
b: int
async def init_mongo():
mongo_client = AsyncIOMotorClient("mongodb://127.0.0.1:27017")
await beanie.init_beanie(
database=mongo_client.db_name, document_models=[TestDoc]
)
async def run_test():
await init_mongo()
docs = [TestDoc(a=f"id_{i}", b=random.randint(1, 100)) for i in range(10)]
async with BulkWriter() as bulk_writer:
for doc in docs:
await TestDoc \
.find_one({TestDoc.a: doc.a}, bulk_writer=bulk_writer) \
.upsert(Set({TestDoc.b: doc.b}), on_insert=doc, bulk_writer=bulk_writer)
# .update_one(Set(doc), bulk_writer=bulk_writer, upsert=True)
read_docs = await TestDoc.find().to_list()
print(f"read_docs: {read_docs}")
if __name__ == '__main__':
pool = asyncio.get_event_loop()
pool.run_until_complete(run_test())
After executing no documents are inserted into db. Not with .upsert() nor with .update_one() method. What is correct way to achieve that logic?
With pymongo such operation would be written like so (and it works):
def write_reviews(self, docs: List[TestDoc]):
operations = []
for doc in docs:
doc_dict = to_dict(doc)
update_operation = pymongo.UpdateOne(
{"a": doc.a}, {"$set": doc_dict}, upsert=True
)
operations.append(update_operation)
result = self.test_collection.bulk_write(operations)
PS: Cannot create beanie tag here. Can someone create it for me?
This is old, and you probably figured it out but since its the first result in a google search for me I thought I would answer.
The current way you use bulk_writer is just wrapping the options and then committing them.
from beanie.odm.operators.update.general import Set
async def run_test():
await init_mongo()
docs = [TestDoc(a=f"id_{i}", b=random.randint(1, 100)) for i in range(10)]
async with BulkWriter() as bulk_writer:
for doc in docs:
await TestDoc \
.find_one({TestDoc.a: doc.a}) \
.upsert(Set({TestDoc.b: doc.b}), on_insert=doc)
bulk_writer.commit()
read_docs = await TestDoc.find().to_list()
print(f"read_docs: {read_docs}")
Original Test in Beanie Test Suite
Ok this is my working code:
data = aiohttp.FormData()
data.add_field('title', title)
data.add_field('author', user)
data.add_field('upload_file', open(path, 'rb'))
up_session = aiohttp.ClientSession()
async with up_session.post(url="http://example.com/upload.php", data=data) as response:
resp = await response.text()
resp = json.loads(resp)
What I want to know is how to add some sort progress monitoring to it. I can't find any sort of callback in the docs nor a generator that works with MultipartWriter (FormData is just a helper for MultipartWriter). I'm losing my mind here. Thanks in advance.
EDIT: I used to get it with request and requeststoolbelt (MultipartEncoder, MultipartEncoderMonitor) but those are not async, and aiohttp is such a complete library i can't believe you cant do that.
encoder = MultipartEncoder(
fields={
'upload_file': (ntpath.basename(path), open(path, 'rb'),
'application/octet-stream'),
'title': str(''),
'author': str(user),
})
upload_data = MultipartEncoderMonitor(encoder, upload_progress)
headers={'Content-Type': upload_data.content_type}
headers.update(http_headers)
r_2 = session.post(url=url_domain + "/repository/repository_ajax.php?action=upload", data=upload_data)
def upload_progress(monitor):
print (str(monitor.len) + " - " + "{:.2f}%".format(monitor.bytes_read/monitor.len))
I did find a solution, a little hacky but it works and it's kind of general for other libraries that don't provide good progress tracking but accepts binary streams. It's a wrapper around BufferedReader. Technically counts the bytes as they're read instead of send, but for a progress bar it's kind of the same.
from pathlib import Path
from io import BufferedReader
from aiohttp import ClientSession
class ProgressFileReader(BufferedReader):
def __init__(self, filename, read_callback=None):
f = open(filename, "rb")
self.__read_callback = read_callback
super().__init__(raw=f)
self.length = Path(filename).stat().st_size
def read(self, size=None):
calc_sz = size
if not calc_sz:
calc_sz = self.length - self.tell()
if self.__read_callback:
self.__read_callback(self.tell(), self.length)
return super(ProgressFileReader, self).read(size)
def progress_callback(current, total):
print(100 * current // total)
async def main():
with ProgressFileReader(filename="./file.jpg", read_callback=progress_callback) as file:
upload_payload = {
"foo": "bar",
"file": file,
}
async with ClientSession() as session:
async with session.post("http://example.org", data=upload_payload) as response:
resp = await response.text()
The wrapper itself is sync, but it's no biggie and it can be made async if you wrap around aiofiles instead of the builtin "open". Credits for the wrapper class idea to some other stackoverflow question that I can't find now.
I am very new to asynchronous programming and I was playing around with httpx. I have the following code and I am sure I am doing something wrong - just don't know what it is. There are two methods, one synchronous and other asynchronous. They are both pull from google finance. On my system I am seeing the time spent as following:
Asynchronous: 5.015218734741211
Synchronous: 5.173618316650391
Here is the code:
import httpx
import asyncio
import time
#
#--------------------------------------------------------------------
#
#--------------------------------------------------------------------
#
def sync_pull(url):
r = httpx.get(url)
print(r.status_code)
#
#--------------------------------------------------------------------
#
#--------------------------------------------------------------------
#
async def async_pull(url):
async with httpx.AsyncClient() as client:
r = await client.get(url)
print(r.status_code)
#
#--------------------------------------------------------------------
#
#--------------------------------------------------------------------
#
if __name__ == "__main__":
goog_fin_nyse_url = 'https://www.google.com/finance/quote/'
tickers = ['F', 'TWTR', 'CVX', 'VZ', 'GME', 'GM', 'PG', 'AAL',
'MARK', 'AAP', 'THO', 'NGD', 'ZSAN', 'SEAC',
]
print("Running asynchronously...")
async_start = time.time()
for ticker in tickers:
url = goog_fin_nyse_url + ticker + ':NYSE'
asyncio.run(async_pull(url))
async_end = time.time()
print(f"Time lapsed is: {async_end - async_start}")
print("Running synchronously...")
sync_start = time.time()
for ticker in tickers:
url = goog_fin_nyse_url + ticker + ':NYSE'
sync_pull(url)
sync_end = time.time()
print(f"Time lapsed is: {sync_end - sync_start}")
I had hoped the asynchronous method approach would require a fraction of the time the synchronous approach is requiring. What am I doing wrong?
When you say asyncio.run(async_pull) you're saying run 'async_pull' and wait for the result to come back. Since you do this once per each ticker in your loop, you're essentially using asyncio to run things synchronously and won't see performance benefits.
What you need to do is create several async calls and run them concurrently. There are several ways to do this, the easiest is to use asyncio.gather (see https://docs.python.org/3/library/asyncio-task.html#asyncio.gather) which takes in a sequence of coroutines and runs them concurrently. Adapting your code is fairly straightforward, you create an async function to take a list of urls and then call async_pull on each of them and then pass that in to asyncio.gather and await the results. Adapting your code to this looks like the following:
import httpx
import asyncio
import time
def sync_pull(url):
r = httpx.get(url)
print(r.status_code)
async def async_pull(url):
async with httpx.AsyncClient() as client:
r = await client.get(url)
print(r.status_code)
async def async_pull_all(urls):
return await asyncio.gather(*[async_pull(url) for url in urls])
if __name__ == "__main__":
goog_fin_nyse_url = 'https://www.google.com/finance/quote/'
tickers = ['F', 'TWTR', 'CVX', 'VZ', 'GME', 'GM', 'PG', 'AAL',
'MARK', 'AAP', 'THO', 'NGD', 'ZSAN', 'SEAC',
]
print("Running asynchronously...")
async_start = time.time()
results = asyncio.run(async_pull_all([goog_fin_nyse_url + ticker + ':NYSE' for ticker in tickers]))
async_end = time.time()
print(f"Time lapsed is: {async_end - async_start}")
print("Running synchronously...")
sync_start = time.time()
for ticker in tickers:
url = goog_fin_nyse_url + ticker + ':NYSE'
sync_pull(url)
sync_end = time.time()
print(f"Time lapsed is: {sync_end - sync_start}")
Running this way, the asynchronous version runs in about a second for me as opposed to seven synchronously.
Here's a nice pattern I use (I tend to change it a little each time). In general, I make a module async_utils.py and just import the top-level fetching function (e.g. here fetch_things), and then my code is free to forget about the internals (other than error handling). You can do it in other ways, but I like the 'functional' style of aiostream, and often find the repeated calls to the process function take certain defaults I set using functools.partial.
Note: async currying with partials is Python 3.8+ only
You can pass in a tqdm.tqdm progress bar to pbar (initialised with known size total=len(things)) to have it update when each async response is processed.
import asyncio
import httpx
from aiostream import stream
from functools import partial
__all__ = ["fetch", "process", "async_fetch_urlset", "fetch_things"]
async def fetch(session, url, raise_for_status=False):
response = await session.get(str(url))
if raise_for_status:
response.raise_for_status()
return response
async def process_thing(data, things, pbar=None, verbose=False):
# Map the response back to the thing it came from in the things list
source_url = data.history[0].url if data.history else data.url
thing = next(t for t in things if source_url == t.get("thing_url"))
# Handle `data.content` here, where `data` is the `httpx.Response`
if verbose:
print(f"Processing {source_url=}")
build.update({"computed_value": "result goes here"})
if pbar:
pbar.update()
async def async_fetch_urlset(urls, things, pbar=None, verbose=False, timeout_s=10.0):
timeout = httpx.Timeout(timeout=timeout_s)
async with httpx.AsyncClient(timeout=timeout) as session:
ws = stream.repeat(session)
xs = stream.zip(ws, stream.iterate(urls))
ys = stream.starmap(xs, fetch, ordered=False, task_limit=20)
process = partial(process_thing, things=things, pbar=pbar, verbose=verbose)
zs = stream.map(ys, process)
return await zs
def fetch_things(urls, things, pbar=None, verbose=False):
return asyncio.run(async_fetch_urlset(urls, things, pbar, verbose))
In this example, the input is a list of dicts (with string keys and values), things: list[dict[str,str]], and the key "thing_url" is accessed to retrieve the URL. Having a dict or object is desirable instead of just the URL string for when you want to 'map' the result back to the object it came from. The process_thing function is able to modify the input list things in-place (i.e. any changes are not scoped within the function, they change it back in the scope that called it).
You'll often find errors arise during async runs that you don't get when running synchronously, so you'll need to catch them, and re-try. A common gotcha is to retry at the wrong level (e.g. around the entire loop)
In particular, you'll want to import and catch httpcore.ConnectTimeout, httpx.ConnectTimeout, httpx.RemoteProtocolError, and httpx.ReadTimeout.
Increasing the timeout_s parameter will reduce the frequency of the timeout errors by letting the AsyncClient 'wait' for longer, but doing so may in fact slow down your program (it won't "fail fast" quite as fast).
Here's an example of how to use the async_utils module given above:
from async_utils import fetch_things
import httpx
import httpcore
# UNCOMMENT THIS TO SEE ALL THE HTTPX INTERNAL LOGGING
#import logging
#log = logging.getLogger()
#log.setLevel(logging.DEBUG)
#log_format = logging.Formatter('[%(asctime)s] [%(levelname)s] - %(message)s')
#console = logging.StreamHandler()
#console.setLevel(logging.DEBUG)
#console.setFormatter(log_format)
#log.addHandler(console)
things = [
{"url": "https://python.org", "name": "Python"},
{"url": "https://www.python-httpx.org/", "name": "HTTPX"},
]
#log.debug("URLSET:" + str(list(t.get("url") for t in things)))
def make_urlset(things):
"""Make a URL generator (empty if all have been fetched)"""
urlset = (t.get("url") for t in things if "computed_value" not in t)
return urlset
retryable_errors = (
httpcore.ConnectTimeout,
httpx.ConnectTimeout, httpx.RemoteProtocolError, httpx.ReadTimeout,
)
# ASYNCHRONOUS:
max_retries = 100
for i in range(max_retries):
print(f"Retry {i}")
try:
urlset = make_urlset(things)
foo = fetch_things(urls=urlset, things=things, verbose=True)
except retryable_errors as exc:
print(f"Caught {exc!r}")
if i == max_retries - 1:
raise
except Exception:
raise
# SYNCHRONOUS:
#for t in things:
# resp = httpx.get(t["url"])
In this example I set a key "computed_value" on a dictionary once the async response has successfully been processed which then prevents that URL from being entered into the generator on the next round (when make_urlset is called again). In this way, the generator gets progressively smaller. You can also do it with lists but I find a generator of the URLs to be pulled works reliably. For an object you'd change the dictionary key assignment/access (update/in) to attribute assignment/access (settatr/hasattr).
I wanted to post working version of the coding using futures - virtually the same run-time:
import httpx
import asyncio
import time
#
#--------------------------------------------------------------------
# Synchronous pull
#--------------------------------------------------------------------
#
def sync_pull(url):
r = httpx.get(url)
print(r.status_code)
#
#--------------------------------------------------------------------
# Asynchronous Pull
#--------------------------------------------------------------------
#
async def async_pull(url):
async with httpx.AsyncClient() as client:
r = await client.get(url)
print(r.status_code)
#
#--------------------------------------------------------------------
# Build tasks queue & execute coroutines
#--------------------------------------------------------------------
#
async def build_task() -> None:
goog_fin_nyse_url = 'https://www.google.com/finance/quote/'
tickers = ['F', 'TWTR', 'CVX', 'VZ', 'GME', 'GM', 'PG', 'AAL',
'MARK', 'AAP', 'THO', 'NGD', 'ZSAN', 'SEAC',
]
tasks= []
#
## Following block of code will create a queue full of function
## call
for ticker in tickers:
url = goog_fin_nyse_url + ticker + ':NYSE'
tasks.append(asyncio.ensure_future(async_pull(url)))
start_time = time.time()
#
## This block of code will derefernce the function calls
## from the queue, which will cause them all to run
## rapidly
await asyncio.gather(*tasks)
#
## Calculate time lapsed
finish_time = time.time()
elapsed_time = finish_time - start_time
print(f"\n Time spent processing: {elapsed_time} ")
# Start from here
if __name__ == "__main__":
asyncio.run(build_task())
I use python-telegram-bot wrapper.
For a command that is called upon using '/' I have the following method:
def advice(update, context):
with open ("advice.txt", "rt") as file_advice:
line=file_advice.readlines()
advice_message=random.choice(line)
context.bot.send_message(chat_id=update.effective_chat.id, text=advice_message)
advice_handler = CommandHandler('advice', advice)
dispatcher.add_handler(advice_handler)
I want this method to be called in inline mode when '/advice' is typed and there should be no output in all other cases.
I'm familiar with examples from the wrapper developers, where they show the following code:
def inline_caps(update, context):
query = update.inline_query.query
if not query:
return
results = list()
results.append(
InlineQueryResultArticle(
id=query.upper(),
title='Caps',
input_message_content=InputTextMessageContent(query.upper())
)
)
context.bot.answer_inline_query(update.inline_query.id, results)
inline_caps_handler = InlineQueryHandler(inline_caps)
dispatcher.add_handler(inline_caps_handler)
I tried to swap query.upper() with advice(update, context) but there are different errors about unknown type so feels like I don't know what I'm doing.
It won't work like that because InputTextMessageContent() requieres a string at least and your function doesn't return a string. In that case I would rewrite the advice function so you can reuse it in your command handler and your inline handler.
import random
from uuid import uuid4
def getAdvice():
with open ("advice.txt", "rt") as file_advice:
line=file_advice.readlines()
advice_message=random.choice(line)
return advice_message
def advice(update, context):
advice_message=getAdvice()
context.bot.send_message(chat_id=update.effective_chat.id, text=advice_message)
def inlinequery(update, context):
query = update.inline_query.query
results = [
InlineQueryResultArticle(
id=uuid4(),
title="/advice",
input_message_content=InputTextMessageContent(
getAdvice()))
]
update.inline_query.answer(results)
advice_handler = CommandHandler('advice', advice)
dispatcher.add_handler(advice_handler)
inlinequery_handler = InlineQueryHandler(inlinequery)
dispatcher.add_handler(inlinequery_handler)