Different result between urllib and aiohttp - python

So basically i'm trying to get the currently playing track from online radio direct link (Example -
http://air.radiorecord.ru:8101/rr_320).
Firstly i found something in the internet, written with urllib, my application is asynchronous so i needed to use aiohttp. With urllib it worked perfectly, while aiohttp sometimes just can't find anything. Pls help :(
before:
def get_now(self, session):
request = urllib.Request(self.data.get('url'),headers={'Icy-MetaData': 1} ) # request metadata
response = urllib.urlopen(request)
metadata = response.headers
metaint = int(response.headers['icy-metaint'])
for _ in range(10): # title may be empty initially, try several times
response.read(metaint) # skip to metadata
metadata_length = struct.unpack('B', response.read(1))[0] * 16 # length byte
metadata = response.read(metadata_length).rstrip(b'\0')
# extract title from the metadata
m = re.search(br"StreamTitle='([^']*)';", metadata)
if m:
title = m.group(1)
if title:
break
else:
return "No title found"
return title.decode('utf8', errors='replace')
except:
return "No title found"
after:
async def get_now(self, session):
async with session.get(self.stream_url, headers={'Icy-MetaData': "1"}) as resp:
content = resp.content
metadata = resp.headers
metaint = int(metadata['icy-metaint'])
for _ in range(30):
await content.read(metaint)
metadata_length = struct.unpack('B', await content.read(1))[0] * 16 # length byte
metadata = (await content.read(metadata_length)).rstrip(b'\0')
m = re.search(br"StreamTitle='([^']*)';", metadata)
if m:
title = m.group(1)
if title:
return title.decode('utf8', errors='replace')
else:
return "No title found"
return "Nothing found"

The snippet below is always able to detect the current track (in around 400ms) but instead of processing only part of the chunk it checks the whole chunk as it's read:
import aiohttp
import asyncio
import re
async def get_now(stream_url, session):
headers={"Icy-MetaData": "1"}
async with session.get(stream_url, headers=headers) as resp:
for _ in range(10):
data = await resp.content.read(8192)
m = re.search(br"StreamTitle='([^']*)';", data.rstrip(b"\0"))
if m:
title = m.group(1)
if title:
return title.decode("utf8", errors="replace")
else:
return "No title found"
return "Nothing found"
async def get_track():
session = aiohttp.ClientSession()
stream_url = "http://air.radiorecord.ru:8101/rr_320"
result = await get_now(stream_url, session)
print(f"result: {result}")
await session.close()
asyncio.run(get_track())
Result on my computer (CPU usage is very low on a quite old CPU: i7-3517U):
[ionut#ionut-pc ~]$ time python test.py
result: Record Club - Nejtrino & Baur
real 0m0.401s
user 0m0.198s
sys 0m0.031s

Related

Why does my code keep running even after it reaches the end?

I am having some troubles with my program as when it reaches the end of the third() function, it continues to try to execute transactions. I tried having it return None to break out of the seemly infinite loop that it is in with no success. I am sure that I am missing something very simple here and am guessing it has something to do with the recursion that I used. Thanks for any help that you can provide.
import asyncio
import base64
import json
import os
import os.path
import time
import httpcore
import requests
from typing import Awaitable
import solana
import httpx
from rich import print
from solana.keypair import Keypair
from solana.publickey import PublicKey
from solana.rpc.api import Client
from solana.rpc.async_api import AsyncClient
from solana.rpc.commitment import Confirmed
from solana.rpc.types import TxOpts
from solana.transaction import Transaction
# Notes
# This is meant as a bare bones hello world and as such does not have :
#
# - error handling on http calls
# - checks / retries to ensure solana transactions go through
# - logging - just your basic print statement here. But at least you get the Rich pretty printing variant :)
#
# Libraries used
# - https://www.python-httpx.org/ - cause it's shinier and better than requests
# - https://michaelhly.github.io/solana-py/
# - https://github.com/Textualize/rich for pretty printing - because it rocks.
# I use poetry to manage dependencies but am not including the project file here for brevity.
# Mint constants
USDC_MINT = "EPjFWdd5AufqSSqeM2qN1xzybapC8G4wEGGkZwyTDt1v", 6
SOL_MINT = "So11111111111111111111111111111111111111112", 9
FAB_MINT = "EdAhkbj5nF9sRM7XN7ewuW8C9XEUMs8P7cnoQ57SYE96", 9
FUSD_MINT = "B7mXkkZgn7abwz1A3HnKkb18Y6y18WcbeSkh1DuLMkee", 8
# This works ok - most of the time
rpc_host = "https://api.mainnet-beta.solana.com"
filename = r"C:\Users\myname\.config\solana\burner.json"
def get_wallet_keypair(filename: str) -> Keypair:
"""Load a keypair from a filesystem wallet."""
if not os.path.isfile(filename):
raise Exception(f"Wallet file '{filename}' is not present.")
with open(filename) as json_file:
data = json.load(json_file)
mid = len(data) // 2
secret_key = data[:mid]
secret_bytes = bytes(secret_key)
keypair = Keypair.from_secret_key(secret_bytes)
print(f"Public Key is: {keypair.public_key}")
return keypair
async def get_quote(
input_mint: str, output_mint: str, amount: int, slippage: int = 0.2):
url_query = f"https://quote-api.jup.ag/v1/quote?outputMint={output_mint}&inputMint={input_mint}&amount={amount}&slippage={slippage}"
print(url_query)
async with httpx.AsyncClient() as client:
r = await client.get(url_query)
return r.json()
async def get_transaction(route: dict, user_key: str) -> dict:
swap_url = "https://quote-api.jup.ag/v1/swap"
input = {"route": route, "userPublicKey": user_key, "wrapUnwrapSOL": True}
print(json.dumps(input, indent=2))
async with httpx.AsyncClient() as client:
r = await client.post(swap_url, json=input,
timeout=6.0) # slightly longer timout as the free rpc server can be a bit laggy
return r.json()
def send_transaction(payer: Keypair, cc: Client, swap_transaction: str, opts: TxOpts) -> str:
""" Send a serialized transaction to the RPC node """
trans = Transaction.deserialize(base64.b64decode(swap_transaction))
result = cc.send_transaction(trans, payer, opts=opts)
txid = result["result"]
print(f"transaction details :https://solscan.io/tx/{txid}")
return txid
async def async_main(from_mint, from_decimals, to_mint, quantity):
cc = Client(rpc_host)
print(f" Converting {quantity} {from_mint} to {to_mint} with {from_decimals} Decimals")
quote_quantity = quantity * (10 ** from_decimals)
r = await get_quote(str(from_mint), str(to_mint), quote_quantity, slippage=2)
quote, outAmount = r["data"][0], int(r['data'][0]['outAmountWithSlippage']) / (10 ** from_decimals)
print("Out Amount =", outAmount)
if quote := r["data"][0]:
print(quote)
# get the relevant transaction details
trans = await get_transaction(quote, str(pubkey))
setup_transaction = trans["setupTransaction"] if "setupTransaction" in trans else None
swap_transaction = trans["swapTransaction"] if "swapTransaction" in trans else None
cleanup_transaction = trans["cleanupTransaction"] if "cleanupTransaction" in trans else None
opts = TxOpts(skip_preflight=True)
# Setup transaction. Will create any missing accounts if required.
if setup_transaction:
print("Sending setup transaction")
#print(setup_transaction)
send_transaction(payer, cc, setup_transaction, opts)
# This one actually does the business
if swap_transaction:
print("Sending swap transaction")
txid = send_transaction(payer, cc, swap_transaction, opts)
# Wait for the transaction to complete before looking it up on chain.
# Clearly this is *not* the right way to do this. Retry in a loop or something fancy.
await asyncio.sleep(20)
result = cc.get_transaction(txid, commitment=Confirmed)
print(result)
# Haven't seen one of these needed yet. Hopefully the jup.ag devs can explain when it's required.
if cleanup_transaction:
print("Sending send transaction")
send_transaction(payer, cc, cleanup_transaction, opts)
print("Swap Complete !")
return outAmount
def get_balance(input_mint):
url = "https://api.mainnet-beta.solana.com"
headers = {'Content-type': 'application/json'}
if input_mint == "So11111111111111111111111111111111111111112":
data = {"jsonrpc": "2.0", "id": 1, "method": "getBalance", "params": [f"{pubkey}"]}
response = requests.post(url, data=json.dumps(data), headers=headers)
response = response.text
parsed = json.loads(response)
# print(json.dumps(parsed, indent=4, sort_keys=True))
accountBal = (parsed['result']['value']) / 10 ** SOL_MINT[1]
print(accountBal)
else:
data = {"jsonrpc": "2.0", "id": 1, "method": "getTokenAccountsByOwner",
"params": [f"{pubkey}",
{"mint": f"{input_mint}"}, {"encoding": "jsonParsed"}]}
response = requests.post(url, data=json.dumps(data), headers=headers)
response = response.text
parsed = json.loads(response)
# print(json.dumps(parsed, indent=4, sort_keys=True))
accountBal = parsed['result']['value'][0]['account']['data']['parsed']['info']['tokenAmount']['uiAmount']
print(accountBal)
return accountBal
# usdc buys fusd fusd is sold for sol sol is sold for usdc
# (from_mint, from_decimals, to_mint, quantity):
class swaps:
def __init__(self, input_mint, decimals, output_mint, amount):
self.input_mint = input_mint
self.decimals = decimals
self.output_mint = output_mint
self.amount = amount
def swap(self):
asyncio.run(async_main(self.input_mint, self.decimals, self.output_mint, self.amount))
def first(count, previous = 0):
try:
if get_balance(USDC_MINT[0]) <= 1:
time.sleep(1)
count += 1
if count >= 60:
third(0)
first(count)
except TypeError:
first(0)
step1 = swaps(USDC_MINT[0], USDC_MINT[1], FUSD_MINT[0], get_balance(USDC_MINT[0]) if previous == 0 else previous)
try:
step1.swap()
except httpx.ReadTimeout:
print("Retrying")
time.sleep(10)
first(0)
second(0)
def second(count, previous = 0):
try:
if get_balance(FUSD_MINT[0]) <= 1:
time.sleep(1)
count += 1
if count >= 60:
first(0)
second(count)
except TypeError:
second(0)
step2 = swaps(FUSD_MINT[0], FUSD_MINT[1], SOL_MINT[0], get_balance(FUSD_MINT[0]) if previous == 0 else previous)
try:
step2.swap()
except:
print("Retrying")
time.sleep(10)
second(0)
count = 0
third(0)
def third(count, previous = 0):
if get_balance(SOL_MINT[0]) < .6:
time.sleep(1)
count += 1
if count >= 60:
second(0)
third(count)
step3 = swaps(SOL_MINT[0], SOL_MINT[1], USDC_MINT[0], get_balance(SOL_MINT[0]) - 0.5 if previous == 0 else previous)
try:
step3.swap()
except:
print("Retrying")
time.sleep(10)
third(previous)
print("All Swaps Completed")
return None
payer = get_wallet_keypair(filename)
pubkey = payer.public_key
loops = 0
if __name__ == "__main__":
previousBalence = get_balance(USDC_MINT[0])
print(f"Starting Balence: {previousBalence}")
#for loops in range(5):
first(0)
loops += 1
endBalance = get_balance((USDC_MINT[0]))
print(f"End balence is {endBalance}")
totalProfit = endBalance-previousBalence
print(f"Total Profit is: {totalProfit}")
Edit: The output when the code continues is it keeps trying to swap fUSD for SOL and SOL for USDC over and over again.
Solution: https://pastebin.com/8id7gfe4

Extract complete Data from API using python into Json format

I want to extract the data using API and store the data into Json format.
Code:
def run():
responses = asyncio.run(fetch_all())
return responses
output = run()
count = int(output[0]['result']['stats']['count'])
print(count)
import time
async def fetch(session, url):
async with session.get(url) as response:
resp = await response.json()
return resp
async def fetch_all():
limit = 100
async with aiohttp.ClientSession(auth=aiohttp.BasicAuth('***','***')) as session:
tasks = []
for i in range(int(count/limit) + 1):
tasks.append(
fetch(
session,
f"https:XYZ&sysparm_limit={limit}&sysparm_offset={i*limit}",
)
)
responses = await asyncio.gather(*tasks, return_exceptions=True)
return responses
def run():
start = time.time()
responses = asyncio.run(fetch_all())
end = time.time()
return {"responses":responses, "time_duration":end- start}
sap_prod = run()
final_output = []
for i in range(len(sap_prod['responses'])):
final_output = final_output +sap_prod['responses'][i]['result']
json_object = json.dumps(final_output, indent = 4)
Error:
TypeError: 'ClientPayloadError' object is not subscriptable
Data is huge so my IDE is crashing, How can I dump the output into a Json using loops?
Also suggest any alternate method to get complete data from APIs
In fetch you can write the data to a file, producing a line-delimited json file
async def fetch(session, url):
async with session.get(url) as response:
resp = await response.json()
with open('myfile.jsonl', 'a') as f:
f.write(json.dumps(resp) + '\n')
The way after all of the tasks are done you will have a file with many json lines in it, to read it you can do
data = []
with open('myfile.jsonl') as f:
for line in f:
data.append(json.loads(line))

python asyncio aiohttp timeout

Word of notice: This is my first approach with asyncio, so I might have done something really stupid.
Scenario is as follows:
I need to "http-ping" a humongous list of urls to check if they respond 200 or any other value. I get timeouts for each and every request, though tools like gobuster report 200,403, etc.
My code is sth similar to this:
import asyncio,aiohttp
import datetime
#-------------------------------------------------------------------------------------
async def get_data_coroutine(session,url,follow_redirects,timeout_seconds,retries):
#print('#DEBUG '+datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')+' '+url)
try:
async with session.get(url,allow_redirects=False,timeout=timeout_seconds) as response:
status = response.status
#res = await response.text()
if( status==404):
pass
elif(300<=status and status<400):
location = str(response).split("Location': \'")[1].split("\'")[0]
print('#HIT '+datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')+' '+str(status)+' '+url+' ---> '+location)
if(follow_redirects==True):
return await get_data_coroutine(session,location,follow_redirects,timeout_seconds,retries)
else:
print('#HIT '+datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')+' '+str(status)+' '+url)
return None
except asyncio.exceptions.TimeoutError as e:
print('#ERROR '+datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')+' '+' '+' '+url+' TIMEOUT '+str(e))
return None
#---------------------------------------------------------------------------
async def main(loop):
base_url = 'http://192.168.59.37'
extensions = ['','.html','php']
fd = open('/usr/share/wordlists/dirb/common.txt','r')
words_without_suffix = [x.strip() for x in fd.readlines()]#[-5:] #DEBUG!
words_with_suffix = [base_url+'/'+x+y for x in words_without_suffix for y in extensions]
follow = True
total_timeout = aiohttp.ClientTimeout(total=60*60*24)
timeout_seconds = 10
retries = 1
async with aiohttp.ClientSession(loop=loop,timeout=total_timeout) as session:
tasks = [get_data_coroutine(session,url,follow,timeout_seconds,retries) for url in words_with_suffix]
await asyncio.gather(*tasks)
print('DONE')
#---------------------------------------------------------------------------
if(__name__=='__main__'):
loop = asyncio.get_event_loop()
result = loop.run_until_complete(main(loop))
Did I do something really wrong?
Any word of advice?
Thank you SO much!
Actually, I ended up finding an open issue in aio-libs/aiohttp:
https://github.com/aio-libs/aiohttp/issues/3203
This way, they suggest a workaround that achieves my needs:
session_timeout = aiohttp.ClientTimeout(total=None,sock_connect=timeout_seconds,sock_read=timeout_seconds)
async with aiohttp.ClientSession(timeout=session_timeout) as session:
async with session.get(url,allow_redirects=False,timeout=1) as response:
...
To answer your question - no you did nothing wrong. I can't see anything wrong with your code in terms of http request/response/timeout handling.
If indeed all your requests are timing out to the host (http://192.168.59.37) I suspect the issues are you are experiencing are most likely down to how your network is resolving requests (or how your code is building the url).
You can confirm whether requests are independently succeeding/failing using a tool like curl, eg:
curl "http://192.168.59.37/abc.html"
I tested it locally by using
python3 -m http.server 8080
and placing an empty files 'abc' and 'abc.html' in the same directory, updating the base_url
base_url = "http://127.0.0.1:8080"
with my minor updates (code below) here's the output.
http://127.0.0.1:8080/.bashrc.php
#404
http://127.0.0.1:8080/.bashrc
#404
http://127.0.0.1:8080/.bashrc.html
#404
http://127.0.0.1:8080/abc
#HIT 2020-11-03 12:57:33 200 http://127.0.0.1:8080/abc
http://127.0.0.1:8080/zt.php
#404
http://127.0.0.1:8080/zt.html
#404
http://127.0.0.1:8080/zt
#404
http://127.0.0.1:8080/abc.html
#HIT 2020-11-03 12:57:33 200 http://127.0.0.1:8080/abc.html
http://127.0.0.1:8080/abc.php
#404
DONE
My updates are mostly minor but it might help with further debugging.
For debug, print the url. Important to determine if the code was building the url correctly. This highlighted to me that 'php' extension is missing a ".", so it would be looking for abcphp, not abc.php.
Use response.ok to test a successful http response, your code wasn't handling 500 errors (instead it was returning hit).
using python f-string for cleaner formatting
import asyncio
import aiohttp
import datetime
async def get_data_coroutine(session, url, follow_redirects, timeout_seconds, retries):
try:
async with session.get(
url, allow_redirects=False, timeout=timeout_seconds
) as response:
print(url)
now = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
if response.ok:
print(f"#HIT {now} {response.status} {url}")
else:
status = response.status
if status == 404:
print("#404")
elif 300 <= status and status < 400:
location = str(response).split("Location': '")[1].split("'")[0]
print(f"#HIT {now} {status} {url} ---> {location}")
if follow_redirects is True:
return await get_data_coroutine(
session, location, follow_redirects, timeout_seconds, retries
)
else:
print("#ERROR ", response.status)
return None
except asyncio.TimeoutError as e:
now = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
print(f"#ERROR {now} {url} TIMEOUT ", e)
return None
async def main(loop):
base_url = "http://127.0.0.1:8080"
extensions = ["", ".html", ".php"]
fd = open("/usr/share/wordlists/dirb/common.txt", "r")
words_without_suffix = [x.strip() for x in fd.readlines()]
words_with_suffix = [
base_url + "/" + x + y for x in words_without_suffix for y in extensions
]
follow = True
total_timeout = aiohttp.ClientTimeout(total=60 * 60 * 24)
timeout_seconds = 10
retries = 1
async with aiohttp.ClientSession(loop=loop, timeout=total_timeout) as session:
tasks = [
get_data_coroutine(session, url, follow, timeout_seconds, retries)
for url in words_with_suffix
]
await asyncio.gather(*tasks)
print("DONE")
if __name__ == "__main__":
loop = asyncio.get_event_loop()
result = loop.run_until_complete(main(loop))

How to paginate asyncio requests in python where total is provided in header

*Just to clarify in advance, I use Postman to test my requests and they return the results im looking for.
I'm connecting to an API using Python. The API will only return 500 records per request and it will provide the total number of records in the first response header 'x-test-count'
I'm obviously not python savvy and feel that im handling pagination completely wrong. Take a look at the async get function. Basically, it takes the total count from the first response and loops through running
async with session.get(paging_url) as response:
page_results = await response.json()
pages.extend(page_results)
It does return results but only 500. So it would seem that its not capturing each iteration.
class Queue:
def __init__(self, id, type):
self.id = id
self.type = type
self.requests = []
class Test:
def __init__(self):
self.queue = []
self.queue_list = []
self.coroutines = []
self.headers = {
'Content-Type': 'application/json',
'x-test-token': self.token,
}
def get_id(self, type=''):
id = datetime.now().strftime('%Y%m-%d%H-%M%S-') + str(uuid4())
if type != '':
id = type + '-' + id
return id
def url_encode(self, url):
# doesn't like encoding urls using yarl. I'm manually handling them below with UTF-8 encode
url = url.replace(' ', '%20')
#url = url.replace('?', '%3F')
return url
def queue_create(self, type=''):
id = self.get_id(type='queue')
if type == '':
self.debug('Error: queue_create was not given a type')
return
id = Queue(id=id, type=type)
self.debug('queue_create instantiated new queue class named: ' + id)
# TODO: Add to list of active queues to track for create and destroy
# Return name of new object
return id
def queue_run(self, name=''):
self.debug('Starting queue_run')
if name == '':
self.debug('Error: queue_run asked to run without providing a name')
#return
**async def get(url, headers):
async with aiohttp.ClientSession(headers=headers, connector=aiohttp.TCPConnector(verify_ssl=False)) as session:
async with session.get(url) as response:
self.debug('HTTP Response: ' + str(response.status))
# Set pagination vars to 1
current_page = 1
page_range = 1
# Check the status code. If other than 200, stop
assert response.status == 200
# Get the count of records. If not provided, set last_page to 1
try:
page_range = int(response.headers['x-test-count'])
self.debug(response.headers['x-test-count'])
except:
self.debug('x-test-count not provided, defaulted to 1')
first_page_results = await response.json()
if page_range == 1:
self.debug('Returning first page results only')
return first_page_results
else:
self.debug('Total results: ' + str(page_range) + '. Performing additional requests.')
pages = []
for records in range(1,page_range,500):
remaining_records = page_range - records
if remaining_records > 500:
paging_size = 500
else:
paging_size = remaining_records
# Create the paging URL
paging_url = url + '&size=' + str(paging_size) + '&from=' + str(records)
# Run paged requests
async with session.get(paging_url) as response:
page_results = await response.json()
# combine paged requests
pages.extend(page_results)
# Clear paging URL
paging_url = ''
return pages**
# Establish the loop
loop = asyncio.get_event_loop()
# Establish coroutines and populate with queries from queue
coroutines = []
for query in self.queue:
# Removed a lot of the actual code here. Basically, this establishes the URL and appends coroutines
coroutines.append(get(url, headers=headers))
# Start the asyncio loop
results = loop.run_until_complete(asyncio.gather(*coroutines))
return results
def add_request(self, type, endpoint, query='', deleted=False, data='', full=False, paging_size='', paging_from=''):
self.debug('Starting add_request')
self.debug('Queue before append: ', item=self.queue)
self.queue.append([type, endpoint, query, deleted, data, full, paging_size, paging_from])
self.debug('Queue after append: ', item=self.queue)
return self.queue
So to run, it looks something like this
Test = Test()
Test.add_request('read', 'personnel', '', full=True ,deleted=False)
response = Test.queue_run()

aiohttp: rate limiting requests-per-second by domain

I am writing a web crawler that is running parallel fetches for many different domains. I want to limit the number of requests-per-second that are made to each individual domain, but I do not care about the total number of connections that are open, or the total requests per second that are made across all domains. I want to maximize the number of open connections and requests-per-second overall, while limiting the number of requests-per-second made to individual domains.
All of the currently existing examples I can find either (1) limit the number of open connections or (2) limit the total number of requests-per-second made in the fetch loop. Examples include:
aiohttp: rate limiting parallel requests
aiohttp: set maximum number of requests per second
Neither of them do what I am requesting which is to limit requests-per-second on a per domain basis. The first question only answers how to limit requests-per-second overall. The second one doesn't even have answers to the actual question (the OP asks about requests per second and the answers all talk about limiting # of connections).
Here is the code that I tried, using a simple rate limiter I made for a synchronous version, which doesn't work when the DomainTimer code is run in an async event loop:
from collections import defaultdict
from datetime import datetime, timedelta
import asyncio
import async_timeout
import aiohttp
from urllib.parse import urlparse
from queue import Queue, Empty
from HTMLProcessing import processHTML
import URLFilters
SEED_URLS = ['http://www.bbc.co.uk', 'http://www.news.google.com']
url_queue = Queue()
for u in SEED_URLS:
url_queue.put(u)
# number of pages to download per run of crawlConcurrent()
BATCH_SIZE = 100
DELAY = timedelta(seconds = 1.0) # delay between requests from single domain, in seconds
HTTP_HEADERS = {'Referer': 'http://www.google.com',
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:59.0) Gecko/20100101 Firefox/59.0'}
class DomainTimer():
def __init__(self):
self.timer = None
def resetTimer(self):
self.timer = datetime.now()
def delayExceeded(self, delay):
if not self.timer: #We haven't fetched this before
return True
if (datetime.now() - self.timer) >= delay:
return True
else:
return False
crawl_history = defaultdict(dict) # given a URL, when is last time crawled?
domain_timers = defaultdict(DomainTimer)
async def fetch(session, url):
domain = urlparse(url).netloc
print('here fetching ' + url + "\n")
dt = domain_timers[domain]
if dt.delayExceeded(DELAY) or not dt:
with async_timeout.timeout(10):
try:
dt.resetTimer() # reset domain timer
async with session.get(url, headers=HTTP_HEADERS) as response:
if response.status == 200:
crawl_history[url] = datetime.now()
html = await response.text()
return {'url': url, 'html': html}
else:
# log HTTP response, put into crawl_history so
# we don't attempt to fetch again
print(url + " failed with response: " + str(response.status) + "\n")
return {'url': url, 'http_status': response.status}
except aiohttp.ClientConnectionError as e:
print("Connection failed " + str(e))
except aiohttp.ClientPayloadError as e:
print("Recieved bad data from server # " + url + "\n")
else: # Delay hasn't passed yet: skip for now & put # end of q
url_queue.put(url);
return None
async def fetch_all(urls):
"""Launch requests for all web pages."""
tasks = []
async with aiohttp.ClientSession() as session:
for url in urls:
task = asyncio.ensure_future(fetch(session, url))
tasks.append(task) # create list of tasks
return await asyncio.gather(*tasks) # gather task responses
def batch_crawl():
"""Launch requests for all web pages."""
start_time = datetime.now()
# Here we build the list of URLs to crawl for this batch
urls = []
for i in range(BATCH_SIZE):
try:
next_url = url_queue.get_nowait() # get next URL from queue
urls.append(next_url)
except Empty:
print("Processed all items in URL queue.\n")
break;
loop = asyncio.get_event_loop()
asyncio.set_event_loop(loop)
pages = loop.run_until_complete(fetch_all(urls))
crawl_time = (datetime.now() - start_time).seconds
print("Crawl completed. Fetched " + str(len(pages)) + " pages in " + str(crawl_time) + " seconds.\n")
return pages
def parse_html(pages):
""" Parse the HTML for each page downloaded in this batch"""
start_time = datetime.now()
results = {}
for p in pages:
if not p or not p['html']:
print("Received empty page")
continue
else:
url, html = p['url'], p['html']
results[url] = processHTML(html)
processing_time = (datetime.now() - start_time).seconds
print("HTML processing finished. Processed " + str(len(results)) + " pages in " + str(processing_time) + " seconds.\n")
return results
def extract_new_links(results):
"""Extract links from """
# later we could track where links were from here, anchor text, etc,
# and weight queue priority based on that
links = []
for k in results.keys():
new_urls = [l['href'] for l in results[k]['links']]
for u in new_urls:
if u not in crawl_history.keys():
links.append(u)
return links
def filterURLs(urls):
urls = URLFilters.filterDuplicates(urls)
urls = URLFilters.filterBlacklistedDomains(urls)
return urls
def run_batch():
pages = batch_crawl()
results = parse_html(pages)
links = extract_new_links(results)
for l in filterURLs(links):
url_queue.put(l)
return results
There are no errors or exceptions thrown, and the rate-limiting code works fine in for synchronous fetches, but the DomainTimer has no apparent effect when run in async loop. The delay of one request-per-second per domain is not upheld...
How would I modify this synchronous rate limiting code to work within the async event loop? Thanks!
It's hard to debug your code since it contains many unrelated stuff, it's easier to show idea on a new simple example.
Main idea:
write your Semaphore-like class using __aenter__, __aexit__
that accepts url (domain)
use domain-specific Lock to prevent multiple requests to the same domain
sleep before allowing next request according to domain's last request and RPS
track time of last request for each domain
Code:
import asyncio
import aiohttp
from urllib.parse import urlparse
from collections import defaultdict
class Limiter:
# domain -> req/sec:
_limits = {
'httpbin.org': 4,
'eu.httpbin.org': 1,
}
# domain -> it's lock:
_locks = defaultdict(lambda: asyncio.Lock())
# domain -> it's last request time
_times = defaultdict(lambda: 0)
def __init__(self, url):
self._host = urlparse(url).hostname
async def __aenter__(self):
await self._lock
to_wait = self._to_wait_before_request()
print(f'Wait {to_wait} sec before next request to {self._host}')
await asyncio.sleep(to_wait)
async def __aexit__(self, *args):
print(f'Request to {self._host} just finished')
self._update_request_time()
self._lock.release()
#property
def _lock(self):
"""Lock that prevents multiple requests to same host."""
return self._locks[self._host]
def _to_wait_before_request(self):
"""What time we need to wait before request to host."""
request_time = self._times[self._host]
request_delay = 1 / self._limits[self._host]
now = asyncio.get_event_loop().time()
to_wait = request_time + request_delay - now
to_wait = max(0, to_wait)
return to_wait
def _update_request_time(self):
now = asyncio.get_event_loop().time()
self._times[self._host] = now
# request that uses Limiter instead of Semaphore:
async def get(url):
async with Limiter(url):
async with aiohttp.ClientSession() as session: # TODO reuse session for different requests.
async with session.get(url) as resp:
return await resp.text()
# main:
async def main():
coros = [
get('http://httpbin.org/get'),
get('http://httpbin.org/get'),
get('http://httpbin.org/get'),
get('http://httpbin.org/get'),
get('http://httpbin.org/get'),
get('http://eu.httpbin.org/get'),
get('http://eu.httpbin.org/get'),
get('http://eu.httpbin.org/get'),
get('http://eu.httpbin.org/get'),
get('http://eu.httpbin.org/get'),
]
await asyncio.gather(*coros)
if __name__ == '__main__':
loop = asyncio.get_event_loop()
try:
loop.run_until_complete(main())
finally:
loop.run_until_complete(loop.shutdown_asyncgens())
loop.close()
I developed a library named octopus-api (https://pypi.org/project/octopus-api/), that enables you to rate limit and set the number of connections to the endpoint using aiohttp under the hood. The goal of it is to simplify all the aiohttp setup needed.
Here is an example of how to use it, where the get_ethereum is the user-defined request function. It could have also been a web crawler function request or what ever fits:
from octopus_api import TentacleSession, OctopusApi
from typing import Dict, List
if __name__ == '__main__':
async def get_ethereum(session: TentacleSession, request: Dict):
async with session.get(url=request["url"], params=request["params"]) as response:
body = await response.json()
return body
client = OctopusApi(rate=50, resolution="sec", connections=6)
result: List = client.execute(requests_list=[{
"url": "https://api.pro.coinbase.com/products/ETH-EUR/candles?granularity=900&start=2021-12-04T00:00:00Z&end=2021-12-04T00:00:00Z",
"params": {}}] * 1000, func=get_ethereum)
print(result)
The TentacleSession works the same as how you write POST, GET, PUT and PATCH for aiohttp.ClientSession.
Let me know if it helps your issue related to rate limits and connection for crawling.

Categories

Resources