I have some equipment with http interface that frequently generate infinite http page with values I want to parse and save to the database.
I started with requests:
import asyncio
import asyncpg
import requests
class node_http_mtr():
def __init__(self, ip, nsrc, ndst):
self.ip = ip
self.nsrc = nsrc
self.ndst = ndst
try:
self.data = requests.get('http://' + self.ip + '/nph-cgi_mtr?duration=-1&interval=0', stream=True, timeout=10)
except:
return
def __iter__(self):
return self
def __next__(self):
mtr = list()
try:
for chunk in self.data.iter_content(32 * (self.nsrc + self.ndst), '\n'):
# DEBUG log chunk
for line in chunk.split('\n'):
# DEBUG log line
if line.startswith('MTR'):
try:
_, io, num, val = line.split(' ')
l, r = val.split(':')[1], val.split(':')[2]
mtr.append((self.ip, io+num, l, r))
except:
# ERROR log line
pass
if len(mtr) == self.nsrc + self.ndst:
break
if len(mtr) == self.nsrc + self.ndst:
yield mtr
else:
continue
except:
# ERROR connection lost
return
async def save_to_db(data_to_save):
global pool
try:
async with pool.acquire() as conn:
await conn.execute('''INSERT INTO mtr (ip, io, l, r) VALUES %s''' % ','.join(str(row) for row in data_to_save))
finally:
await pool.release(conn)
async def remove_from_db(ip):
global pool
try:
async with pool.acquire() as conn:
await conn.execute('''DELETE FROM httpmtr WHERE ip = $1''', ip)
finally:
await pool.release(conn)
async def http_mtr_worker():
global workers_list
global loop
while True:
await asyncio.sleep(0)
for ip in list(workers_list):
data_to_save = next(workers_list[ip])
if data_to_save:
asyncio.ensure_future(save_to_db(next(data_to_save)))
await asyncio.sleep(0)
async def check_for_workers():
global workers_list
global pool
while True:
await asyncio.sleep(0)
try:
async with pool.acquire() as conn:
workers = await conn.fetch('''SELECT ip FROM httpmtr''')
finally:
await pool.release(conn)
for worker in workers:
if worker['ip'] not in list(workers_list):
workers_list[worker['ip']] = node_http_mtr(worker['ip'], 8, 8)
await asyncio.sleep(0)
print('Add worker', worker['ip'])
await asyncio.sleep(0)
ips_to_delete = set(workers_list.keys()) - set([i[0] for i in workers])
if len(ips_to_delete) != 0:
for ip in ips_to_delete:
print('Delete worker ', ip)
workers_list.pop(ip)
await asyncio.sleep(0)
async def make_db_connection():
pool = await asyncpg.create_pool(user='postgres', password='data', database='test', host='localhost', max_queries=50000, command_timeout=60)
return pool
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
pool = loop.run_until_complete(make_db_connection())
workers_list = {}
try:
asyncio.ensure_future(check_for_workers())
asyncio.ensure_future(http_mtr_worker())
loop.run_forever()
except Exception as e:
print(e)
pass
finally:
print("Closing Loop")
loop.close()
I have triggered procedure in DB which deletes all data older then 1 second, the final result with one worker in PostgreSQL is:
test=# select count(*) from mtr;
count
-------
384
(1 ёЄЁюър)
It means 384 results per second. There are 16 different kinds of data in each device, so I have 384/16 = 24 values per second. It's appropriate result. But the more workers I add the worse performance I have: with 10 workers I have 2-3 times less values. The goal is to have hundreds of workers and 24-25 values/sec.
Next I tried to do is to use aiohttp. I expected to get much better result. Hastily I wrote test code:
import asyncio
from aiohttp import ClientSession
import asyncpg
async def parse(line):
if line.startswith('MTR'):
_, io, num, val = line.split(' ')
l, r = val.split(':')[1], val.split(':')[2]
return ('ip.will.be.here', io + num, l, r)
async def run():
url = "http://10.150.20.130/nph-cgi_mtr?duration=-1&interval=0"
async with ClientSession() as session:
while True:
async with session.get(url) as response:
buffer = b''
start = False
async for line in response.content.iter_any():
if line.startswith(b'\n'):
start = True
buffer += line
elif start and line.endswith(b'\n'):
buffer += line
mtr = [await parse(line) for line in buffer.decode().split('\n')[1:-1]]
await save_to_db(mtr)
break
elif start:
buffer += line
async def make_db_connection():
pool = await asyncpg.create_pool(user='postgres', password='data', database='test', host='localhost', max_queries=50000, command_timeout=60)
return pool
async def save_to_db(data_to_save):
global pool
try:
async with pool.acquire() as conn:
await conn.execute('''INSERT INTO mtr (ip, io, l, r) VALUES %s''' % ','.join(str(row) for row in data_to_save))
finally:
await pool.release(conn)
loop = asyncio.get_event_loop()
pool = loop.run_until_complete(make_db_connection())
future = asyncio.ensure_future(run())
loop.run_until_complete(future)
And I've got this:
test=# select count(*) from mtr;
count
-------
80
(1 ёЄЁюър)
i.e. I've gotten 5 time worse performance with asynchronous requests. I'm stuck. I don't understand how to solve it.
UPDATE. Profiling didn't make the situation more clear at all.
requests:
aiohttp:
With requests the situation is more or less clear. But what the problem with async aiohttp I don't understand at all.
UPDATE 16/05/18. Finally I came back to multithreading and I got what I need - constant performance with big amount of workers. Asynchronous calls is not a panacea indeed.
Related
I need to check several hundred proxy servers and get the number of not working. Script for this
import urllib.request
import socket
net = ['http://192.168.1.1:8080',
'http://192.168.1.2:8080',
'http://192.168.1.3:8080',
'http://192.168.1.4:8080',
'http://192.168.1.5:8080',
'http://192.168.1.6:8080',
'http://192.168.1.7:8080',
'http://192.168.1.8:8080',
'http://192.168.1.9:8080',
'http://192.168.1.10:8080']
fail = 0
socket.setdefaulttimeout(3)
for x in net:
try:
print(x)
proxy = urllib.request.ProxyHandler({'http': (x)})
opener = urllib.request.build_opener(proxy)
urllib.request.install_opener(opener)
urllib.request.urlretrieve('http://google.com')
except IOError:
print ("Connection error")
fail+=1
print(fail)
Proxies in the list, I have given a simple version.
It takes 55 seconds to check 250 working proxies. I can't wait that long, need to increase the execution speed.
How can this be done using async?
This should give you an idea of how to approach it. You have to wrap the various connection blocks in try, except yourself.
NOTE: This code is not tested as I do not have any way of doing so.
import asyncio, aiohttp
def returnPartionedList(inputlist, x=100):
return([inputlist[i:i + x] for i in range(0, len(inputlist), x)])
# Returns: Original list split into segments of x.
async def TestProxy(url, proxy, session):
async with session.get(url, proxy=proxy, timeout=3) as response:
if response.status == 200:
_ = await response.text()
return(proxy)
async def TestProxies(listofproxies):
returnResults = []
url = "https://google.com" # Test proxy with this url
ProxyPartitions = returnPartionedList(listofproxies, 20) # Rate limit 20 per second
for partition in ProxyPartitions:
ProxyTasks = []
async with aiohttp.ClientSession() as session:
for proxy in partition:
ProxyTasks.append(asyncio.create_task(TestProxy(url, proxy, session)))
results = await asyncio.gather(*ProxyTasks, return_exceptions=False)
if results:
for result in results:
if result:
returnResults.append(result)
await asyncio.sleep(1)
return(returnResults)
async def main():
listofproxies = [
'http://10.10.1.1:8080',
'http://10.10.1.2:8080',
'http://10.10.1.3:8080',
'http://10.10.1.4:8080',
'http://10.10.1.5:8080',
'http://10.10.1.6:8080',
'http://10.10.1.7:8080',
'http://10.10.1.8:8080',
'http://10.10.1.9:8080',
'http://10.10.1.10:8080'
]
test_proxies = await TestProxies(listofproxies)
print(test_proxies)
if __name__ == "__main__":
asyncio.run(main())
I have a set of CPU-intensive processes that once in a while depend on each other to proceed. So something like
def run():
while True:
do stuff
wake up some other process
wait for some other process to wake me up
do stuff
Within each process I'd like to use async, so that I can always have an instance of run running while others are waiting to be woken up. Looking at the asyncio docs, the only IPC option in the "High-level APIs" section that I see uses sockets. I'd much rather use a pipe, which it looks like I can perhaps do with the low-level API, but that documentation is chock full of warnings that if you're just writing an application then it's a mistake to be using it. Can someone weigh in on the idiomatic thing to do here? (And also, speed is an important factor, so if there's some less-idiomatic-but-more-performant thing I'd like to know about that option as well.)
I would like to mention the aioprocessing library, as I successfully used it in one of my projects. It provides an anync interface to the multiprocessing primitives including IPC, such as Process, Pipe, Lock, Queue and etc. It uses thread pool to do this:
...
#staticmethod
def coro_maker(func):
def coro_func(self, *args, loop=None, **kwargs):
return self.run_in_executor(
getattr(self, func), *args, loop=loop, **kwargs
)
return coro_func
But to be honest, a lot depends on the problem being solved, on what tasks are being performed concurrently, since the intensive IPC itself within the async approach is less effective than the synchronous approach due to overhead of event loop, thread pool and etc. Sometimes it is better to make all IPC operations synchronous and put it all in a separate thread. Again, it all depends on the problem and the environment. Below is a benchmark that is far from comprehensive, but it can give an approximate picture of the problem that is being solved in it (intensive exchange of buffers).
note: I wrote about the difference between a Queue and SimpleQueue here
Sync SimpleQueue: 1.4309470653533936
AioSimpleQueue: 12.32670259475708
AioQueue: 14.342737436294556
AioPipe: 11.747064590454102
subprocess pipe stream: 7.344956159591675
socket stream: 4.360717058181763
# main.py
import sys
import time
import asyncio
import aioprocessing as ap
import multiprocessing as mp
import proc
count = 5*10**4
data = b'*'*100
async def sync_simple_queue_func():
out_ = mp.SimpleQueue()
in_ = mp.SimpleQueue()
p = ap.AioProcess(target=proc.start_sync_queue_func, args=(out_, in_))
p.start()
begin_ts = time.time()
for i in range(count):
out_.put(data)
res = in_.get()
print('Sync SimpleQueue: ', time.time() - begin_ts)
out_.put(None)
async def simple_queue_func():
out_ = ap.AioSimpleQueue()
in_ = ap.AioSimpleQueue()
p = ap.AioProcess(target=proc.start_queue_func, args=(out_, in_))
p.start()
begin_ts = time.time()
for i in range(count):
await out_.coro_put(data)
res = await in_.coro_get()
print('AioSimpleQueue: ', time.time() - begin_ts)
await out_.coro_put(None)
async def queue_func():
out_ = ap.AioQueue()
in_ = ap.AioQueue()
p = ap.AioProcess(target=proc.start_queue_func, args=(out_, in_))
p.start()
begin_ts = time.time()
for i in range(count):
await out_.coro_put(data)
res = await in_.coro_get()
print('AioQueue: ', time.time() - begin_ts)
await out_.coro_put(None)
async def pipe_func():
main_, child_ = ap.AioPipe()
p = ap.AioProcess(target=proc.start_pipe_func, args=(child_,))
p.start()
begin_ts = time.time()
for i in range(count):
await main_.coro_send(data)
res = await main_.coro_recv()
print('AioPipe: ', time.time() - begin_ts)
await main_.coro_send(None)
await p.coro_join()
server = None
async def handle_child(reader, writer):
begin_ts = time.time()
for i in range(count):
writer.write(data)
res = await reader.read(len(data))
print('socket stream: ', time.time() - begin_ts)
writer.close()
async def socket_func():
global server
addr = ('127.0.0.1', 8888)
server = await asyncio.start_server(handle_child, *addr)
p = ap.AioProcess(target=proc.start_socket_func, args=(addr,))
p.start()
async with server:
await server.serve_forever()
async def subprocess_func():
prog = await asyncio.create_subprocess_shell(
'python proc.py',
stdin=asyncio.subprocess.PIPE,
stdout=asyncio.subprocess.PIPE)
begin_ts = time.time()
for i in range(count):
prog.stdin.write(data)
res = await prog.stdout.read(len(data))
print('subprocess pipe stream: ', time.time() - begin_ts)
prog.stdin.close()
async def main():
await sync_simple_queue_func()
await simple_queue_func()
await queue_func()
await pipe_func()
await subprocess_func()
await socket_func()
asyncio.run(main())
# proc.py
import asyncio
import sys
import aioprocessing as ap
async def sync_queue_func(in_, out_):
while True:
n = in_.get()
if n is None:
return
out_.put(n)
async def queue_func(in_, out_):
while True:
n = await in_.coro_get()
if n is None:
return
await out_.coro_put(n)
async def pipe_func(child):
while True:
n = await child.coro_recv()
if n is None:
return
await child.coro_send(n)
data = b'*' * 100
async def socket_func(addr):
reader, writer = await asyncio.open_connection(*addr)
while True:
n = await reader.read(len(data))
if not n:
break
writer.write(n)
def start_sync_queue_func(in_, out_):
asyncio.run(sync_queue_func(in_, out_))
def start_queue_func(in_, out_):
asyncio.run(queue_func(in_, out_))
def start_pipe_func(child):
asyncio.run(pipe_func(child))
def start_socket_func(addr):
asyncio.run(socket_func(addr))
async def connect_stdin_stdout():
loop = asyncio.get_event_loop()
reader = asyncio.StreamReader()
protocol = asyncio.StreamReaderProtocol(reader)
dummy = asyncio.Protocol()
await loop.connect_read_pipe(lambda: protocol, sys.stdin) # sets read_transport
w_transport, _ = await loop.connect_write_pipe(lambda: dummy, sys.stdout)
writer = asyncio.StreamWriter(w_transport, protocol, reader, loop)
return reader, writer
async def main():
reader, writer = await connect_stdin_stdout()
while True:
res = await reader.read(len(data))
if not res:
break
writer.write(res)
if __name__ == "__main__":
asyncio.run(main())
I post a new question related the old for a problem with the get from queue. This is the code (thanks to Martijn Pieters)
import asyncio
import sys
import json
import os
import websockets
async def socket_consumer(socket, outgoing):
# take messages from the web socket and push them into the queue
async for message in socket:
await outgoing.put(message)
file = open(r"/home/host/Desktop/FromSocket.txt", "a")
file.write("From socket: " + ascii(message) + "\n")
file.close()
async def socket_producer(socket, incoming):
# take messages from the queue and send them to the socket
while True:
message = await incoming.get()
file = open(r"/home/host/Desktop/ToSocket.txt", "a")
file.write("To socket: " + ascii(message) + "\n")
file.close()
await socket.send(message)
async def connect_socket(incoming, outgoing, loop=None):
header = {"Authorization": r"Basic XXX="}
uri = 'XXXXXX'
async with websockets.connect(uri, extra_headers=header) as web_socket:
# create tasks for the consumer and producer. The asyncio loop will
# manage these independently
consumer_task = asyncio.ensure_future(
socket_consumer(web_socket, outgoing), loop=loop)
producer_task = asyncio.ensure_future(
socket_producer(web_socket, incoming), loop=loop)
# start both tasks, but have the loop return to us when one of them
# has ended. We can then cancel the remainder
done, pending = await asyncio.wait(
[consumer_task, producer_task], return_when=asyncio.FIRST_COMPLETED)
for task in pending:
task.cancel()
# pipe support
async def stdio(loop=None):
if loop is None:
loop = asyncio.get_event_loop()
reader = asyncio.StreamReader()
await loop.connect_read_pipe(
lambda: asyncio.StreamReaderProtocol(reader), sys.stdin)
writer_transport, writer_protocol = await loop.connect_write_pipe(
asyncio.streams.FlowControlMixin, os.fdopen(sys.stdout.fileno(), 'wb'))
writer = asyncio.streams.StreamWriter(
writer_transport, writer_protocol, None, loop)
return reader, writer
async def pipe_consumer(pipe_reader, outgoing):
# take messages from the pipe and push them into the queue
while True:
message = await pipe_reader.readline()
if not message:
break
file = open(r"/home/host/Desktop/FromPipe.txt", "a")
file.write("From pipe: " + ascii(message.decode('utf8')) + "\n")
file.close()
await outgoing.put(message.decode('utf8'))
async def pipe_producer(pipe_writer, incoming):
# take messages from the queue and send them to the pipe
while True:
json_message = await incoming.get()
file = open(r"/home/host/Desktop/ToPipe.txt", "a")
file.write("Send to pipe message: " + ascii(json_message) + "\n")
file.close()
try:
message = json.loads(json_message)
message_type = int(message.get('header', {}).get('messageID', -1))
except (ValueError, TypeError, AttributeError):
# failed to decode the message, or the message was not
# a dictionary, or the messageID was convertable to an integer
message_type = None
file = open(r"/home/host/Desktop/Error.txt", "a")
file.write(" Error \n")
file.close()
# 1 is DENM message, 2 is CAM message
file.write("Send to pipe type: " + type)
if message_type in {1, 2}:
file.write("Send to pipe: " + json_message)
pipe_writer.write(json_message.encode('utf8') + b'\n')
await pipe_writer.drain()
async def connect_pipe(incoming, outgoing, loop=None):
reader, writer = await stdio()
# create tasks for the consumer and producer. The asyncio loop will
# manage these independently
consumer_task = asyncio.ensure_future(
pipe_consumer(reader, outgoing), loop=loop)
producer_task = asyncio.ensure_future(
pipe_producer(writer, incoming), loop=loop)
# start both tasks, but have the loop return to us when one of them
# has ended. We can then cancel the remainder
done, pending = await asyncio.wait(
[consumer_task, producer_task], return_when=asyncio.FIRST_COMPLETED)
for task in pending:
task.cancel()
# force a result check; if there was an exception it'll be re-raised
for task in done:
task.result()
def main():
loop = asyncio.get_event_loop()
pipe_to_socket = asyncio.Queue(loop=loop)
socket_to_pipe = asyncio.Queue(loop=loop)
socket_coro = connect_socket(pipe_to_socket, socket_to_pipe, loop=loop)
pipe_coro = connect_pipe(socket_to_pipe, pipe_to_socket, loop=loop)
loop.run_until_complete(asyncio.gather(socket_coro, pipe_coro))
main()
To send to the Pipe I use this code:
import pexpect
test = r"/home/host/PycharmProjects/Tim/Tim.py"
process = pexpect.spawn("python3 " + test)
message = '{"header":{"protocolVersion":1,"messageID":2,"stationID":400},"cam":{"generationDeltaTime":1,"camParameters":{"basicContainer":{"stationType":5}}';
process.write(message + "\n")
process.wait()
but how can I create a script to read instead of write?
I tried with
test = r"/home/host/PycharmProjects/Tim/Tim.py"
p = pexpect.spawn("python3 " + test, timeout=None)
while True:
m = p.read()
file = open(r"/home/host/Desktop/OpeListening.txt", "a")
file.write(str(m))
file.close()
p.wait()
But the read goes immediately to the next step without any message. What is my error?
At the moment I use with success Popen
process = subprocess.Popen(['python3', test], shell=False, stdout=subprocess.PIPE,
stdin=subprocess.PIPE, stderr=subprocess.STDOUT)
while True:
result = process.stdout.readline()
result = result.decode("utf-8")
print(result)
proc.wait()
I have a websocket server (python 3.x) taking requests where each is a url variable. It runs just fine except it only executes each request in serial, after one another. While the function is running it also blocks the client(s) trying to connect. Non-blocking is what i want!
Asyncronous multiprocessed threading of both websocket and subprocess function.
The ability to set the number of cores to use. This is not obligatory though.
Here's what i've got:
ANSWER (illustration and asyncio.subprocess in accepted answer)
So, I didn't get very far with this frustration. I reverted back to my original code and as it turns out, you need to sleep the function with await asyncio.sleep(.001). Now it runs perfectly fine, I tested with multiple clients at the same time and it handles it asynchronously.
import asyncio, websockets, json
async def handler(websocket, path):
print("New client connected.")
await websocket.send('CONNECTED')
try:
while True:
inbound = await websocket.recv()
if inbound is None:
break
while inbound != None:
import time
for line in range(10):
time.sleep(1)
data = {}
data['blah'] = line
await asyncio.sleep(.000001) # THIS
print(data)
await websocket.send(json.dumps(data))
await websocket.send(json.dumps({'progress': 'DONE'}))
break
except websockets.exceptions.ConnectionClosed:
print("Client disconnected.")
if __name__ == "__main__":
server = websockets.serve(handler, '0.0.0.0', 8080)
loop = asyncio.get_event_loop()
loop.run_until_complete(server)
loop.run_forever()
Update: as suggested by #udi, if you want a slow external process, the way to go is asyncio.subprocess and not subprocess. Reading from pipe with a blocking call stalls the other threads, which is what asyncio.subprocess takes care of.
time.sleep() is blocking.
Try:
# blocking_server.py
import asyncio
import time
import websockets
x = 0
async def handler(websocket, path):
global x
x += 1
client_id = x
try:
print("[#{}] Connected.".format(client_id))
n = int(await websocket.recv())
print("[#{}] Got: {}".format(client_id, n))
for i in range(1, n + 1):
print("[#{}] zzz...".format(client_id))
time.sleep(1)
print("[#{}] woke up!".format(client_id))
await asyncio.sleep(.001)
msg = "*" * i
print("[#{}] sending: {}".format(client_id, msg))
await websocket.send(msg)
msg = "bye!"
print("[#{}] sending: {}".format(client_id, msg))
await websocket.send(msg)
print("[#{}] Done.".format(client_id, msg))
except websockets.exceptions.ConnectionClosed:
print("[#{}] Disconnected!.".format(client_id))
if __name__ == "__main__":
port = 8080
server = websockets.serve(handler, '0.0.0.0', port)
print("Started server on port {}".format(port))
loop = asyncio.get_event_loop()
loop.run_until_complete(server)
loop.run_forever()
With this test client:
# test_client.py
import asyncio
import time
import websockets
async def client(client_id, n):
t0 = time.time()
async with websockets.connect('ws://localhost:8080') as websocket:
print("[#{}] > {}".format(client_id, n))
await websocket.send(str(n))
while True:
resp = await websocket.recv()
print("[#{}] < {}".format(client_id, resp))
if resp == "bye!":
break
print("[#{}] Done in {:.2f} seconds".format(client_id, time.time() - t0))
tasks = [client(i + 1, 3) for i in range(4)]
loop = asyncio.get_event_loop()
loop.run_until_complete(asyncio.wait(tasks))
loop.close()
Now compare the result when time.sleep(x) is replaced with await asyncio.sleep(x)!
If you need to run a slow external process via asyncio, try asynico.subprocess:
An example external program:
# I am `slow_writer.py`
import sys
import time
n = int(sys.argv[1])
for i in range(1, n + 1):
time.sleep(1)
print("*" * i)
with this server:
# nonblocking_server.py
import asyncio
import sys
import websockets
x = 0
async def handler(websocket, path):
global x
x += 1
client_id = x
try:
print("[#{}] Connected.".format(client_id))
n = int(await websocket.recv())
print("[#{}] Got: {}. Running subprocess..".format(client_id, n))
cmd = (sys.executable, 'slow_writer.py', str(n))
proc = await asyncio.create_subprocess_exec(
*cmd, stdout=asyncio.subprocess.PIPE)
async for data in proc.stdout:
print("[#{}] got from subprocess, sending: {}".format(
client_id, data))
await websocket.send(data.decode().strip())
return_value = await proc.wait()
print("[#{}] Subprocess done.".format(client_id))
msg = "bye!"
print("[#{}] sending: {}".format(client_id, msg))
await websocket.send(msg)
print("[#{}] Done.".format(client_id, msg))
except websockets.exceptions.ConnectionClosed:
print("[#{}] Disconnected!.".format(client_id))
if __name__ == "__main__":
if sys.platform == 'win32':
loop = asyncio.ProactorEventLoop()
asyncio.set_event_loop(loop)
port = 8080
server = websockets.serve(handler, '0.0.0.0', port)
print("Started server on port {}".format(port))
loop = asyncio.get_event_loop()
loop.run_until_complete(server)
loop.run_forever()
I followed up this tutorial: https://pawelmhm.github.io/asyncio/python/aiohttp/2016/04/22/asyncio-aiohttp.html and everything works fine when I am doing like 50 000 requests. But I need to do 1 milion API calls and then I have problem with this code:
url = "http://some_url.com/?id={}"
tasks = set()
sem = asyncio.Semaphore(MAX_SIM_CONNS)
for i in range(1, LAST_ID + 1):
task = asyncio.ensure_future(bound_fetch(sem, url.format(i)))
tasks.add(task)
responses = asyncio.gather(*tasks)
return await responses
Because Python needs to create 1 milion tasks, it basically just lags and then prints Killed message in terminal. Is there any way to use a generator insted of pre-made set (or list) of urls? Thanks.
Schedule all 1 million tasks at once
This is the code you are talking about. It takes up to 3 GB RAM so it is easily possible that it will be terminated by the operating system if you have low free memory.
import asyncio
from aiohttp import ClientSession
MAX_SIM_CONNS = 50
LAST_ID = 10**6
async def fetch(url, session):
async with session.get(url) as response:
return await response.read()
async def bound_fetch(sem, url, session):
async with sem:
await fetch(url, session)
async def fetch_all():
url = "http://localhost:8080/?id={}"
tasks = set()
async with ClientSession() as session:
sem = asyncio.Semaphore(MAX_SIM_CONNS)
for i in range(1, LAST_ID + 1):
task = asyncio.create_task(bound_fetch(sem, url.format(i), session))
tasks.add(task)
return await asyncio.gather(*tasks)
if __name__ == '__main__':
asyncio.run(fetch_all())
Use queue to streamline the work
This is my suggestion how to use asyncio.Queue to pass URLs to worker tasks. The queue is filled as-needed, there is no pre-made list of URLs.
It takes only 30 MB RAM :)
import asyncio
from aiohttp import ClientSession
MAX_SIM_CONNS = 50
LAST_ID = 10**6
async def fetch(url, session):
async with session.get(url) as response:
return await response.read()
async def fetch_worker(url_queue):
async with ClientSession() as session:
while True:
url = await url_queue.get()
try:
if url is None:
# all work is done
return
response = await fetch(url, session)
# ...do something with the response
finally:
url_queue.task_done()
# calling task_done() is necessary for the url_queue.join() to work correctly
async def fetch_all():
url = "http://localhost:8080/?id={}"
url_queue = asyncio.Queue(maxsize=100)
worker_tasks = []
for i in range(MAX_SIM_CONNS):
wt = asyncio.create_task(fetch_worker(url_queue))
worker_tasks.append(wt)
for i in range(1, LAST_ID + 1):
await url_queue.put(url.format(i))
for i in range(MAX_SIM_CONNS):
# tell the workers that the work is done
await url_queue.put(None)
await url_queue.join()
await asyncio.gather(*worker_tasks)
if __name__ == '__main__':
asyncio.run(fetch_all())
asyncio is memory bound (like any other program). You can not spawn more task that memory can hold. My guess is that you hit a memory limit. Check dmesg for more information.
1 millions RPS doesn't mean there is 1M tasks. A task can do several request in the same second.