aiohttp.ClientSession hangs without hitting timeout threshold - python

My code is pretty similar to this:
import aiohttp
import asyncio
class Multiple_HTTP:
#----------------------------------------------------------------------------------
#staticmethod
async def fetch(session,url):
try:
async with session.get(url) as response:
status = response.status
text = await response.text()
return (url,status,text)
except Exception as e:
return e
#----------------------------------------------------------------------------------
#staticmethod
async def fetch_all(urls,timeout):
loop = asyncio.get_event_loop()
session_timeout = aiohttp.ClientTimeout(total=None,sock_connect=timeout,sock_read=timeout)
async with aiohttp.ClientSession(loop=loop , timeout=session_timeout , connector=aiohttp.TCPConnector(verify_ssl=False)) as session:
cwlist = [loop.create_task(Multiple_HTTP.fetch(session,url)) for url in urls]
results = []
results = await asyncio.gather(*cwlist,return_exceptions=True)
return results
#----------------------------------------------------------------------------------
#staticmethod
def run(urls,timeout=5):
return asyncio.run(Multiple_HTTP.fetch_all(urls,timeout))
#----------------------------------------------------------------------------------
And, for some URLs, it hangs at response.text()
It does not honour the timeout, and the web renderizes alright on Chrome running on a Windows box.
Is there something wrong in my code?
How can I modify it for it to work (work as in raising a TimeOut Exception instead of just hanging, for example)?

Related

Getting "ValueError: too many file descriptors in select()" despite of using asyncio.Semaphore

I am doing some requests to Azure Maps. I have a subscription key (subscriptionKey) and a list of addresses I want to look for (addresses):
query_template = 'https://atlas.microsoft.com/search/address/json?&subscription-key={}&api-version=1.0&language=en-US&query={}'
queries = [query_template.format(subscriptionKey, address) for address in addresses]
I come from this question (not necessary to read it to understand the following) and everything worked fine in my sample of 1k queries. However, when I tried 10k queries I got ValueError: too many file descriptors in select(). I added some of the answers from here and now my code looks like this:
import asyncio
from aiohttp import ClientSession
from ssl import SSLContext
from sys import platform
import nest_asyncio
nest_asyncio.apply()
# Function to get a JSON from the result of a query
async def fetch(url, session):
async with session.get(url, ssl=SSLContext()) as response:
return await response.json()
# Function to run 'fetch()' with a Semaphore and check that the result is a dictionary (JSON)
async def fetch_sem(sem, attempts, url, session):
semaphore = asyncio.Semaphore(sem)
async with semaphore:
for _ in range(attempts):
result = await fetch(url, session)
if isinstance(result, dict):
break
return result
# Function to search for all queries
async def fetch_all(sem, attempts, urls):
async with ClientSession() as session:
return await asyncio.gather(*[fetch_sem(sem, attempts, url, session) for url in urls], return_exceptions=True)
# Making the queries
if __name__ == '__main__':
if platform == 'win32':
loop = asyncio.ProactorEventLoop()
asyncio.set_event_loop(loop)
loop = asyncio.get_event_loop()
results = loop.run_until_complete(fetch_all(1000, 3, queries))
Note that I have included both asyncio.Semaphore and asyncio.ProactorEventLoop(). But despite of this additions, I still get ValueError: too many file descriptors in select().
Could I get some help with this issue? Thank you!
The purpose of the semaphore is to count how many fetch operations are currently running and enforce an upper limit. That's why you need to have one semaphore:
You could create it in fetch_all and pass to fetch_sem:
async def fetch_sem(semaphore, attempts, url, session):
async with semaphore:
...
return result
async def fetch_all(limit, attempts, urls):
semaphore = asyncio.Semaphore(limit)
async with ClientSession() as session:
return await asyncio.gather(*[fetch_sem(semaphore, attempts, url, session) for url in urls], return_exceptions=True)
....
results = loop.run_until_complete(fetch_all(1000, 3, queries))

Python asyncio.gather returns None

I'm using Python asyncio to implement a fast http client.
As you can see in the comments below inside the worker function I get the responses as soon as they are finished. I would like to get the responses ordered and this is why I'm using asyncio.gather.
Why is it returning None? Can anybody help?
Thank you so much!
import time
import aiohttp
import asyncio
MAXREQ = 100
MAXTHREAD = 500
URL = 'https://google.com'
g_thread_limit = asyncio.Semaphore(MAXTHREAD)
async def worker(session):
async with session.get(URL) as response:
await response.read() #If I print this line I get the responses correctly
async def run(worker, *argv):
async with g_thread_limit:
await worker(*argv)
async def main():
async with aiohttp.ClientSession() as session:
await asyncio.gather(*[run(worker, session) for _ in range(MAXREQ)])
if __name__ == '__main__':
totaltime = time.time()
print(asyncio.get_event_loop().run_until_complete(main())) #I'm getting a None here
print (time.time() - totaltime)
Your function run doesn't return nothing explicitly, so it returns None implicitly. Add return statement and you'll get a result
async def worker(session):
async with session.get(URL) as response:
return await response.read()
async def run(worker, *argv):
async with g_thread_limit:
return await worker(*argv)

Aiohttp try to get page response with page numbers until hit empty response

I'm trying to have a small function scraping data from a JSON end point,
the url is like https://xxxxxxxx.com/products.json?&page=" which I can insert a page number,
While I was using requests module I just had a while loop and incrementing the page number and break until I get a empty response (which page is empty)
Is there a possible way to do the same thing with aiohttp?
What I only achieved so far is just pre-genenrate certain number of urls and pass it into tasks
Wondering if I can use a loop as well and stop when see empty response
Thank you very much
'''
import asyncio
import aiohttp
async def download_one(url):
async with aiohttp.ClientSession() as session:
async with session.get(url) as resp:
pprint.pprint(await resp.json(content_type=None))
async def download_all(sites):
tasks = [asyncio.create_task(download_one(site)) for site in sites]
await asyncio.gather(*tasks)
def main():
sites = list(map(lambda x: request_url + str(x), range(1, 50)))
asyncio.run(download_all(sites))
'''
Here is a piece of untested code. Even if it won't work, it will give you an idea how to do the job
import asyncio
import aiohttp
async def download_one(session, url):
async with session.get(url) as resp:
resp = await resp.json()
if not resp:
raise Exception("No data found") # needs to be there for breaking the loop
async def download_all(sites):
async with aiohttp.ClientSession() as session:
futures = [download_one(session, site) for site in sites]
done, pending = await asyncio.wait(
futures, return_when=FIRST_EXCEPTION # will return the result when exception is raised by any future
)
for future in pending:
future.cancel() # it will shut down all redundant jobs
def main():
sites = list(map(lambda x: request_url + str(x), range(1, 50)))
asyncio.run_until_complete(download_all(sites))

How to run DB requests asynchronously?

When I run this it lists off the websites in the database one by one with the response code and it takes about 10 seconds to run through a very small list. It should be way faster and isn't running asynchronously but I'm not sure why.
import dblogin
import aiohttp
import asyncio
import async_timeout
dbconn = dblogin.connect()
dbcursor = dbconn.cursor(buffered=True)
dbcursor.execute("SELECT thistable FROM adatabase")
website_list = dbcursor.fetchall()
async def fetch(session, url):
with async_timeout.timeout(30):
async with session.get(url, ssl=False) as response:
await response.read()
return response.status, url
async def main():
async with aiohttp.ClientSession() as session:
for all_urls in website_list:
url = all_urls[0]
resp = await fetch(session, url)
print(resp, url)
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
loop.close()
dbcursor.close()
dbconn.close()
This article explains the details. What you need to do is pass each fetch call in a Future object, and then pass a list of those to either asyncio.wait or asyncio.gather depending on your needs.
Your code would look something like this:
async def fetch(session, url):
with async_timeout.timeout(30):
async with session.get(url, ssl=False) as response:
await response.read()
return response.status, url
async def main():
tasks = []
async with aiohttp.ClientSession() as session:
for all_urls in website_list:
url = all_urls[0]
task = asyncio.create_task(fetch(session, url))
tasks.append(task)
responses = await asyncio.gather(*tasks)
if __name__ == '__main__':
loop = asyncio.get_event_loop()
future = asyncio.create_task(main())
loop.run_until_complete(future)
Also, are you sure that loop.close() call is needed? The docs mention that
The loop must not be running when this function is called. Any pending callbacks will be discarded.
This method clears all queues and shuts down the executor, but does not wait for the executor to finish.
As mentioned in the docs and in the link that #user4815162342 posted, it is better to use the create_task method instead of the ensure_future method when we know that the argument is a coroutine. Note that this was added in Python 3.7, so previous versions should continue using ensure_future instead.

My script encounters an error when it is supposed to run asynchronously

I've written a script in python using asyncio association with aiohttp library to parse the names out of pop up boxes initiated upon clicking on contact info buttons out of diffetent agency information located within a table from this website asynchronously. The webpage displayes the tabular contents across 513 pages.
I encountered this error too many file descriptors in select() when I tried with asyncio.get_event_loop() but when I came across this thread I could see that there is a suggestion to use asyncio.ProactorEventLoop() to avoid such error so I used the latter but noticed that, even when I complied with the suggestion, the script collects the names only from few pages until it throws the following error. How can i fix this?
raise client_error(req.connection_key, exc) from exc
aiohttp.client_exceptions.ClientConnectorError: Cannot connect to host www.tursab.org.tr:443 ssl:None [The semaphore timeout period has expired]
This is my try so far with:
import asyncio
import aiohttp
from bs4 import BeautifulSoup
links = ["https://www.tursab.org.tr/en/travel-agencies/search-travel-agency?sayfa={}".format(page) for page in range(1,514)]
lead_link = "https://www.tursab.org.tr/en/displayAcenta?AID={}"
async def get_links(url):
async with asyncio.Semaphore(10):
async with aiohttp.ClientSession() as session:
async with session.get(url) as response:
text = await response.text()
result = await process_docs(text)
return result
async def process_docs(html):
coros = []
soup = BeautifulSoup(html,"lxml")
items = [itemnum.get("data-id") for itemnum in soup.select("#acentaTbl tr[data-id]")]
for item in items:
coros.append(fetch_again(lead_link.format(item)))
await asyncio.gather(*coros)
async def fetch_again(link):
async with asyncio.Semaphore(10):
async with aiohttp.ClientSession() as session:
async with session.get(link) as response:
text = await response.text()
sauce = BeautifulSoup(text,"lxml")
try:
name = sauce.select_one("p > b").text
except Exception: name = ""
print(name)
if __name__ == '__main__':
loop = asyncio.ProactorEventLoop()
asyncio.set_event_loop(loop)
loop.run_until_complete(asyncio.gather(*(get_links(link) for link in links)))
In short, What the process_docs() function does is collect data-id numbers from each pages to reuse them as the prefix of this https://www.tursab.org.tr/en/displayAcenta?AID={} link to collect the names from pop up boxes. One such id is 8757 and one such qualified links therefore https://www.tursab.org.tr/en/displayAcenta?AID=8757.
Btw, If I change the highest number used in the links variable to 20 or 30 or so, It goes smoothly.
async def get_links(url):
async with asyncio.Semaphore(10):
You can't do such a thing: it means that on each function call new semaphore instance will be created, while you need to single semaphore instance for all requests. Change your code this way:
sem = asyncio.Semaphore(10) # module level
async def get_links(url):
async with sem:
# ...
async def fetch_again(link):
async with sem:
# ...
You can also return default loop once you're using semaphore correctly:
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.run_until_complete(...)
Finally, you should alter both get_links(url) and fetch_again(link) to do parsing outside of semaphore to release it as soon as possible, before semaphore is needed inside process_docs(text).
Final code:
import asyncio
import aiohttp
from bs4 import BeautifulSoup
links = ["https://www.tursab.org.tr/en/travel-agencies/search-travel-agency?sayfa={}".format(page) for page in range(1,514)]
lead_link = "https://www.tursab.org.tr/en/displayAcenta?AID={}"
sem = asyncio.Semaphore(10)
async def get_links(url):
async with sem:
async with aiohttp.ClientSession() as session:
async with session.get(url) as response:
text = await response.text()
result = await process_docs(text)
return result
async def process_docs(html):
coros = []
soup = BeautifulSoup(html,"lxml")
items = [itemnum.get("data-id") for itemnum in soup.select("#acentaTbl tr[data-id]")]
for item in items:
coros.append(fetch_again(lead_link.format(item)))
await asyncio.gather(*coros)
async def fetch_again(link):
async with sem:
async with aiohttp.ClientSession() as session:
async with session.get(link) as response:
text = await response.text()
sauce = BeautifulSoup(text,"lxml")
try:
name = sauce.select_one("p > b").text
except Exception:
name = "o"
print(name)
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.run_until_complete(asyncio.gather(*(get_links(link) for link in links)))

Categories

Resources