How to use SOCKS proxies to make requests with aiohttp? - python
I am trying to use aiohttp to make asynchronous HTTP requests over multiple SOCKS proxies. Basically, I am creating a pool of Tor clients with different IP addresses, and want to be able to route HTTP requests through them using aiohttp.
Based on the suggestions here and here, I have been trying to use aiosocks, but the examples in those threads do not work (if they ever did) because they are based on an old version of aiosocks with a different API. Documentation and examples of using aiosocks online are very sparse (it doesn't seem widely used). But I haven't been able to find any other solutions for using aiohttp with SOCKS proxies.
Below is the code I have so far (sorry for the large amount of code - I tried to slim down the example as much as I could!). First I initialize the Tor clients with stem:
from datetime import datetime
import stem.process
from TorUtils import printCircuits, cleanShutdown
NUM_TOR_CLIENTS = 3
# create list of (source_port, control_port) tuples
tor_ports = [(str(9050 + i), str(9050 + NUM_TOR_CLIENTS + i)) for i in range(NUM_TOR_CLIENTS)]
# Every ISO 3166 country code except for {US} and {CA}
country_codes = '{AF}, {AX}, {AL}, {DZ}, {AS}, {AD}, {AO}, {AI}, {AQ}, {AG}, {AR}, {AM}, {AW}, {AU}, {AT}, {AZ}, {BS}, {BH}, {BD}, {BB}, {BY}, {BE}, {BZ}, {BJ}, {BM}, {BT}, {BO}, {BQ}, {BA}, {BW}, {BV}, {BR}, {IO}, {BN}, {BG}, {BF}, {BI}, {KH}, {CM}, {CV}, {KY}, {CF}, {TD}, {CL}, {CN}, {CX}, {CC}, {CO}, {KM}, {CG}, {CD}, {CK}, {CR}, {CI}, {HR}, {CU}, {CW}, {CY}, {CZ}, {DK}, {DJ}, {DM}, {DO}, {EC}, {EG}, {SV}, {GQ}, {ER}, {EE}, {ET}, {FK}, {FO}, {FJ}, {FI}, {FR}, {GF}, {PF}, {TF}, {GA}, {GM}, {GE}, {DE}, {GH}, {GI}, {GR}, {GL}, {GD}, {GP}, {GU}, {GT}, {GG}, {GN}, {GW}, {GY}, {HT}, {HM}, {VA}, {HN}, {HK}, {HU}, {IS}, {IN}, {ID}, {IR}, {IQ}, {IE}, {IM}, {IL}, {IT}, {JM}, {JP}, {JE}, {JO}, {KZ}, {KE}, {KI}, {KP}, {KR}, {KW}, {KG}, {LA}, {LV}, {LB}, {LS}, {LR}, {LY}, {LI}, {LT}, {LU}, {MO}, {MK}, {MG}, {MW}, {MY}, {MV}, {ML}, {MT}, {MH}, {MQ}, {MR}, {MU}, {YT}, {MX}, {FM}, {MD}, {MC}, {MN}, {ME}, {MS}, {MA}, {MZ}, {MM}, {NA}, {NR}, {NP}, {NL}, {NC}, {NZ}, {NI}, {NE}, {NG}, {NU}, {NF}, {MP}, {NO}, {OM}, {PK}, {PW}, {PS}, {PA}, {PG}, {PY}, {PE}, {PH}, {PN}, {PL}, {PT}, {PR}, {QA}, {RE}, {RO}, {RU}, {RW}, {BL}, {SH}, {KN}, {LC}, {MF}, {PM}, {VC}, {WS}, {SM}, {ST}, {SA}, {SN}, {RS}, {SC}, {SL}, {SG}, {SX}, {SK}, {SI}, {SB}, {SO}, {ZA}, {GS}, {SS}, {ES}, {LK}, {SD}, {SR}, {SJ}, {SZ}, {SE}, {CH}, {SY}, {TW}, {TJ}, {TZ}, {TH}, {TL}, {TG}, {TK}, {TO}, {TT}, {TN}, {TR}, {TM}, {TC}, {TV}, {UG}, {UA}, {AE}, {GB}, {UM}, {UY}, {UZ}, {VU}, {VE}, {VN}, {VG}, {VI}, {WF}, {EH}, {YE}, {ZM}, {ZW}'
tor_configs = [{'SOCKSPort': p[0], 'ControlPort': p[1], 'DataDirectory': './.tordata' + p[0],
'CookieAuthentication' : '1', 'MaxCircuitDirtiness': '3600', 'ExcludeNodes': country_codes,
'EntryNodes': '{us}, {ca}', 'ExitNodes': '{us}, {ca}', 'StrictNodes': '1',
'GeoIPExcludeUnknown': '1', 'EnforceDistinctSubnets': '0'
} for p in tor_ports]
print(f"Spawning {NUM_TOR_CLIENTS} tor clients ...")
start_time = datetime.now()
tor_clients = []
for cfg in tor_configs:
tor_clients.append({'config': cfg, 'process': stem.process.launch_tor_with_config(config = cfg)})
... and then I am trying to use the following code to make the HTTP requests with aiohttp:
from collections import defaultdict, deque
from datetime import datetime, timedelta
import asyncio
import aiohttp
import aiosocks
from aiosocks.connector import ProxyConnector, ProxyClientRequest
import async_timeout
TIMEOUT = 10
async def _get(url, session, proxy, request_limiter):
try:
async with request_limiter: # semaphore to limit number of concurrent requests
async with async_timeout.timeout(TIMEOUT):
async with session.get(url, proxy=proxy, proxy_auth=None) as resp:
status = int(resp.status)
headers = dict(resp.headers)
content_type = str(resp.content_type)
text = await resp.text()
return {'url': url, 'status': status, 'headers': headers, 'text': str(text), 'errors': None}
except asyncio.TimeoutError as e:
queue.visited_urls[url] = datetime.now()
return {'url': url, 'status': None, 'headers': None, 'text': None, 'errors': str(e)}
async def _getPagesTasks(url_list, tor_clients, request_limiter, loop):
"""Launch requests for all web pages."""
#deque rotates continuously through SOCKS sessions for each tor client ...
sessions = deque()
for tor_client in tor_clients:
conn = ProxyConnector()
session = aiohttp.ClientSession(connector=conn, request_class=ProxyClientRequest)
sessions.append({'proxy': 'http://127.0.0.1:' + tor_client['config']['SOCKSPort'], 'session': session})
tasks = []
task_count = 0
for url in url_list:
s = sessions.popleft();
session = s['session']
proxy = s['proxy']
task = loop.create_task(_get(url, session, proxy, request_limiter))
tasks.append(task)
task_count += 1
session.append(s)
results = await asyncio.gather(*tasks)
for s in sessions:
s.close()
return results
def getPages(url_list, tor_clients):
"""Given a URL list, dispatch pool of tor clients to concurrently fetch URLs"""
request_limiter = asyncio.Semaphore(len(tor_clients)) # limit to one request per client at a time
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
responses = loop.run_until_complete(_getPagesTasks(url_list, tor_clients, request_limiter, loop))
loop.close()
return responses
This code is not running, however. When I try to run it, I get the error below. I'm wondering if I'm doing something wrong, or if this is some problem with aiosocks (which seems like it's been unmaintained for a while, and might be targetting an older version of aiohttp or something ...):
~/Code/gis project/code/TorGetQueue.py in _getPagesTasks(url_list, tor_clients, request_limiter, loop)
50 sessions = deque()
51 for client in tor_clients:
---> 52 conn = ProxyConnector()
53 session = aiohttp.ClientSession(connector=conn, request_class=ProxyClientRequest)
54 sessions.append({'proxy': 'http://127.0.0.1:' + client['config']['SOCKSPort'], 'session': session})
~/.local/share/virtualenvs/code-pIyQci_2/lib/python3.6/site-packages/aiosocks/connector.py in __init__(self, verify_ssl, fingerprint, resolve, use_dns_cache, family, ssl_context, local_addr, resolver, keepalive_timeout, force_close, limit, limit_per_host, enable_cleanup_closed, loop, remote_resolve)
54 force_close=force_close, limit=limit, loop=loop,
55 limit_per_host=limit_per_host, use_dns_cache=use_dns_cache,
---> 56 enable_cleanup_closed=enable_cleanup_closed)
57
58 self._remote_resolve = remote_resolve
TypeError: __init__() got an unexpected keyword argument 'resolve'
What am I doing wrong here? Is there an easier way to use SOCKS proxies with aiohttp? What do I need to change to make this code work with aiosocks?
Thanks!
I tried using aiosocks for my project to get the same error as yours only to later discover that aiosocks has been abandoned.
You can use aiosocksy instead.
import asyncio
import aiohttp
from aiosocksy import Socks5Auth
from aiosocksy.connector import ProxyConnector, ProxyClientRequest
async def fetch(url):
connector = ProxyConnector()
socks = 'socks5://127.0.0.1:9050'
async with aiohttp.ClientSession(connector=connector, request_class=ProxyClientRequest) as session:
async with session.get(url, proxy=socks) as response:
print(await response.text())
loop = asyncio.get_event_loop()
loop.run_until_complete(fetch('http://httpbin.org/ip'))
Related
How to optimize my performances, using asynchronous python code
I'm looking to optimize my code in order to process the info faster. First time playing with asynchronous requests. And also still new to Python. I hope my code makes sense. I'm using FastAPI as a framework. And aiohttp to send my requests. Right now, I'm only interested in getting the total of results per word searched. I will be dumping the json into a DB afterwards. My code is sending requests to the public crossref API (crossref) As an example, I'm searching for the terms from 2022-06-02 to 2022-06-03 (inclusive). The terms being searched are: 'paper' (3146 results), 'ammonium' (1430 results) and 'bleach' (23 results). Example: https://api.crossref.org/works?rows=1000&sort=created&mailto=youremail#domain.com&query=paper&filter=from-index-date:2022-06-02,until-index-date:2022-06-03&cursor=* This returns 3146 rows. I need to search for only one term at a time. I did not try to split it per day as well to see if it's faster. There is also a recursive context in this. This is where I feel like I'm mishandling the asynchronous concept. Here is why I need a recursive call. Deep paging requests Deep paging using cursors can be used to iterate over large result sets, without any limits on their size. To use deep paging make a query as normal, but include the cursor parameter with a value of *, for example: https://api.crossref.org/works?rows=1000&sort=created&mailto=youremail#domain.com&query=ammonium&filter=from-index-date:2022-06-02,until-index-date:2022-06-03&cursor=* A next-cursor field will be provided in the JSON response. To get the next page of results, pass the value of next-cursor as the cursor parameter. For example: https://api.crossref.org/works?rows=1000&sort=created&mailto=youremail#domain.com&query=ammonium&filter=from-index-date:2022-06-02,until-index-date:2022-06-03&cursor=<value of next-cursor parameter> Advice from the CrossRef doc Clients should check the number of returned items. If the number of returned items is equal to the number of expected rows then the end of the result set has been reached. Using next-cursor beyond this point will result in responses with an empty items list. My processing time is still through the roof with just 3 words (and 7 requests), it's over 15sec. I'm trying to turn that down to under 5 seconds if possible? Using postman, the longest request took about 4 seconds to come back This is what I have so far if you want to try it out. schema.py class CrossRefSearchRequest(BaseModel): keywords: List[str] date_from: Optional[datetime] = None date_to: Optional[datetime] = None controler.py import time from fastapi import FastAPI, APIRouter, Request app = FastAPI(title="CrossRef API", openapi_url=f"{settings.API_V1_STR}/openapi.json") api_router = APIRouter() service = CrossRefService() #api_router.post("/search", status_code=201) async def search_keywords(*, search_args: CrossRefSearchRequest) -> dict: fixed_search_args = { "sort": "created", "rows": "1000", "cursor": "*" } results = await service.cross_ref_request(search_args, **fixed_search_args) return {k: len(v) for k, v in results.items()} # sets the header X-Process-Time, in order to have the time for each request #app.middleware("http") async def add_process_time_header(request: Request, call_next): start_time = time.time() response = await call_next(request) process_time = time.time() - start_time response.headers["X-Process-Time"] = str(process_time) return response app.include_router(api_router) if __name__ == "__main__": # Use this for debugging purposes only import uvicorn uvicorn.run(app, host="0.0.0.0", port=8001, log_level="debug") service.py from datetime import datetime, timedelta def _setup_date_default(date_from_req: datetime, date_to_req: datetime): yesterday = datetime.utcnow()- timedelta(days=1) date_from = yesterday if date_from_req is None else date_from_req date_to = yesterday if date_to_req is None else date_to_req return date_from.strftime(DATE_FORMAT_CROSS_REF), date_to.strftime(DATE_FORMAT_CROSS_REF) class CrossRefService: def __init__(self): self.client = CrossRefClient() # my recursive call for the next cursor async def _send_client_request(self ,final_result: dict[str, list[str]], keywords: [str], date_from: str, date_to: str, **kwargs): json_responses = await self.client.cross_ref_request_date_range(keywords, date_from, date_to, **kwargs) for json_response in json_responses: message = json_response.get('message', {}) keyword = message.get('query').get('search-terms') next_cursor = message.get('next-cursor') total_results = message.get('total-results') search_results = message.get('items', [{}]) if total_results > 0 else [] if final_result[keyword] is None: final_result[keyword] = search_results else: final_result[keyword].extend(search_results) if total_results > int(kwargs['rows']) and len(search_results) == int(kwargs['rows']): kwargs['cursor'] = next_cursor await self._send_client_request(final_result, [keyword], date_from, date_to, **kwargs) async def cross_ref_request(self, request: CrossRefSearchRequest, **kwargs) -> dict[str, list[str]]: date_from, date_to = _setup_date(request.date_from, request.date_to) results: dict[str, list[str]] = dict.fromkeys(request.keywords) await self._send_client_request(results, request.keywords, date_from, date_to, **kwargs) return results client.py import asyncio from aiohttp import ClientSession async def _send_request_task(session: ClientSession, url: str): try: async with session.get(url) as response: await response.read() return response # exception handler to come except Exception as e: print(f"exception for {url}") print(str(e)) class CrossRefClient: base_url = "https://api.crossref.org/works?" \ "query={}&" \ "filter=from-index-date:{},until-index-date:{}&" \ "sort={}&" \ "rows={}&" \ "cursor={}" def __init__(self) -> None: self.headers = { "User-Agent": f"my_app/v0.1 (example.com/; mailto:youremail#domain.com) using FastAPI" } async def cross_ref_request_date_range( self, keywords: [str], date_from: str, date_to: str, **kwargs ) -> list: async with ClientSession(headers=self.headers) as session: tasks = [ asyncio.create_task( _send_request_task(session, self.base_url.format( keyword, date_from, date_to, kwargs['sort'], kwargs['rows'], kwargs['cursor'] )), name=TASK_NAME_BASE.format(keyword, date_from, date_to) ) for keyword in keywords ] responses = await asyncio.gather(*tasks) return [await response.json() for response in responses] How to optimize this better and use asynchronous calls better? Also this recursive loop might not be the best way to do it neither. Any ideas on that too? I implemented a solution for synchronous calls and it's even slower. So I guess I'm not too far away. Thanks!
Your code looks fine and you are not misusing the asynchronous concept. Perhaps you are limited by the number of client session, which is limited to 100 connections at a time. Take a look at https://docs.aiohttp.org/en/stable/client_reference.html#aiohttp.BaseConnector Maybe the server upstream is just answering slowly to a massive amount of requests.
python asyncio & httpx
I am very new to asynchronous programming and I was playing around with httpx. I have the following code and I am sure I am doing something wrong - just don't know what it is. There are two methods, one synchronous and other asynchronous. They are both pull from google finance. On my system I am seeing the time spent as following: Asynchronous: 5.015218734741211 Synchronous: 5.173618316650391 Here is the code: import httpx import asyncio import time # #-------------------------------------------------------------------- # #-------------------------------------------------------------------- # def sync_pull(url): r = httpx.get(url) print(r.status_code) # #-------------------------------------------------------------------- # #-------------------------------------------------------------------- # async def async_pull(url): async with httpx.AsyncClient() as client: r = await client.get(url) print(r.status_code) # #-------------------------------------------------------------------- # #-------------------------------------------------------------------- # if __name__ == "__main__": goog_fin_nyse_url = 'https://www.google.com/finance/quote/' tickers = ['F', 'TWTR', 'CVX', 'VZ', 'GME', 'GM', 'PG', 'AAL', 'MARK', 'AAP', 'THO', 'NGD', 'ZSAN', 'SEAC', ] print("Running asynchronously...") async_start = time.time() for ticker in tickers: url = goog_fin_nyse_url + ticker + ':NYSE' asyncio.run(async_pull(url)) async_end = time.time() print(f"Time lapsed is: {async_end - async_start}") print("Running synchronously...") sync_start = time.time() for ticker in tickers: url = goog_fin_nyse_url + ticker + ':NYSE' sync_pull(url) sync_end = time.time() print(f"Time lapsed is: {sync_end - sync_start}") I had hoped the asynchronous method approach would require a fraction of the time the synchronous approach is requiring. What am I doing wrong?
When you say asyncio.run(async_pull) you're saying run 'async_pull' and wait for the result to come back. Since you do this once per each ticker in your loop, you're essentially using asyncio to run things synchronously and won't see performance benefits. What you need to do is create several async calls and run them concurrently. There are several ways to do this, the easiest is to use asyncio.gather (see https://docs.python.org/3/library/asyncio-task.html#asyncio.gather) which takes in a sequence of coroutines and runs them concurrently. Adapting your code is fairly straightforward, you create an async function to take a list of urls and then call async_pull on each of them and then pass that in to asyncio.gather and await the results. Adapting your code to this looks like the following: import httpx import asyncio import time def sync_pull(url): r = httpx.get(url) print(r.status_code) async def async_pull(url): async with httpx.AsyncClient() as client: r = await client.get(url) print(r.status_code) async def async_pull_all(urls): return await asyncio.gather(*[async_pull(url) for url in urls]) if __name__ == "__main__": goog_fin_nyse_url = 'https://www.google.com/finance/quote/' tickers = ['F', 'TWTR', 'CVX', 'VZ', 'GME', 'GM', 'PG', 'AAL', 'MARK', 'AAP', 'THO', 'NGD', 'ZSAN', 'SEAC', ] print("Running asynchronously...") async_start = time.time() results = asyncio.run(async_pull_all([goog_fin_nyse_url + ticker + ':NYSE' for ticker in tickers])) async_end = time.time() print(f"Time lapsed is: {async_end - async_start}") print("Running synchronously...") sync_start = time.time() for ticker in tickers: url = goog_fin_nyse_url + ticker + ':NYSE' sync_pull(url) sync_end = time.time() print(f"Time lapsed is: {sync_end - sync_start}") Running this way, the asynchronous version runs in about a second for me as opposed to seven synchronously.
Here's a nice pattern I use (I tend to change it a little each time). In general, I make a module async_utils.py and just import the top-level fetching function (e.g. here fetch_things), and then my code is free to forget about the internals (other than error handling). You can do it in other ways, but I like the 'functional' style of aiostream, and often find the repeated calls to the process function take certain defaults I set using functools.partial. Note: async currying with partials is Python 3.8+ only You can pass in a tqdm.tqdm progress bar to pbar (initialised with known size total=len(things)) to have it update when each async response is processed. import asyncio import httpx from aiostream import stream from functools import partial __all__ = ["fetch", "process", "async_fetch_urlset", "fetch_things"] async def fetch(session, url, raise_for_status=False): response = await session.get(str(url)) if raise_for_status: response.raise_for_status() return response async def process_thing(data, things, pbar=None, verbose=False): # Map the response back to the thing it came from in the things list source_url = data.history[0].url if data.history else data.url thing = next(t for t in things if source_url == t.get("thing_url")) # Handle `data.content` here, where `data` is the `httpx.Response` if verbose: print(f"Processing {source_url=}") build.update({"computed_value": "result goes here"}) if pbar: pbar.update() async def async_fetch_urlset(urls, things, pbar=None, verbose=False, timeout_s=10.0): timeout = httpx.Timeout(timeout=timeout_s) async with httpx.AsyncClient(timeout=timeout) as session: ws = stream.repeat(session) xs = stream.zip(ws, stream.iterate(urls)) ys = stream.starmap(xs, fetch, ordered=False, task_limit=20) process = partial(process_thing, things=things, pbar=pbar, verbose=verbose) zs = stream.map(ys, process) return await zs def fetch_things(urls, things, pbar=None, verbose=False): return asyncio.run(async_fetch_urlset(urls, things, pbar, verbose)) In this example, the input is a list of dicts (with string keys and values), things: list[dict[str,str]], and the key "thing_url" is accessed to retrieve the URL. Having a dict or object is desirable instead of just the URL string for when you want to 'map' the result back to the object it came from. The process_thing function is able to modify the input list things in-place (i.e. any changes are not scoped within the function, they change it back in the scope that called it). You'll often find errors arise during async runs that you don't get when running synchronously, so you'll need to catch them, and re-try. A common gotcha is to retry at the wrong level (e.g. around the entire loop) In particular, you'll want to import and catch httpcore.ConnectTimeout, httpx.ConnectTimeout, httpx.RemoteProtocolError, and httpx.ReadTimeout. Increasing the timeout_s parameter will reduce the frequency of the timeout errors by letting the AsyncClient 'wait' for longer, but doing so may in fact slow down your program (it won't "fail fast" quite as fast). Here's an example of how to use the async_utils module given above: from async_utils import fetch_things import httpx import httpcore # UNCOMMENT THIS TO SEE ALL THE HTTPX INTERNAL LOGGING #import logging #log = logging.getLogger() #log.setLevel(logging.DEBUG) #log_format = logging.Formatter('[%(asctime)s] [%(levelname)s] - %(message)s') #console = logging.StreamHandler() #console.setLevel(logging.DEBUG) #console.setFormatter(log_format) #log.addHandler(console) things = [ {"url": "https://python.org", "name": "Python"}, {"url": "https://www.python-httpx.org/", "name": "HTTPX"}, ] #log.debug("URLSET:" + str(list(t.get("url") for t in things))) def make_urlset(things): """Make a URL generator (empty if all have been fetched)""" urlset = (t.get("url") for t in things if "computed_value" not in t) return urlset retryable_errors = ( httpcore.ConnectTimeout, httpx.ConnectTimeout, httpx.RemoteProtocolError, httpx.ReadTimeout, ) # ASYNCHRONOUS: max_retries = 100 for i in range(max_retries): print(f"Retry {i}") try: urlset = make_urlset(things) foo = fetch_things(urls=urlset, things=things, verbose=True) except retryable_errors as exc: print(f"Caught {exc!r}") if i == max_retries - 1: raise except Exception: raise # SYNCHRONOUS: #for t in things: # resp = httpx.get(t["url"]) In this example I set a key "computed_value" on a dictionary once the async response has successfully been processed which then prevents that URL from being entered into the generator on the next round (when make_urlset is called again). In this way, the generator gets progressively smaller. You can also do it with lists but I find a generator of the URLs to be pulled works reliably. For an object you'd change the dictionary key assignment/access (update/in) to attribute assignment/access (settatr/hasattr).
I wanted to post working version of the coding using futures - virtually the same run-time: import httpx import asyncio import time # #-------------------------------------------------------------------- # Synchronous pull #-------------------------------------------------------------------- # def sync_pull(url): r = httpx.get(url) print(r.status_code) # #-------------------------------------------------------------------- # Asynchronous Pull #-------------------------------------------------------------------- # async def async_pull(url): async with httpx.AsyncClient() as client: r = await client.get(url) print(r.status_code) # #-------------------------------------------------------------------- # Build tasks queue & execute coroutines #-------------------------------------------------------------------- # async def build_task() -> None: goog_fin_nyse_url = 'https://www.google.com/finance/quote/' tickers = ['F', 'TWTR', 'CVX', 'VZ', 'GME', 'GM', 'PG', 'AAL', 'MARK', 'AAP', 'THO', 'NGD', 'ZSAN', 'SEAC', ] tasks= [] # ## Following block of code will create a queue full of function ## call for ticker in tickers: url = goog_fin_nyse_url + ticker + ':NYSE' tasks.append(asyncio.ensure_future(async_pull(url))) start_time = time.time() # ## This block of code will derefernce the function calls ## from the queue, which will cause them all to run ## rapidly await asyncio.gather(*tasks) # ## Calculate time lapsed finish_time = time.time() elapsed_time = finish_time - start_time print(f"\n Time spent processing: {elapsed_time} ") # Start from here if __name__ == "__main__": asyncio.run(build_task())
Threading using Python limiting the number of threads and passing list of different values as arguments
I am here basically accessing the api call with various values coming from the list list_of_string_ids I am expecting to create 20 threads, tell them to do something, write the values to DB and then have them all returning zero and going again to take the next data etc. I have problem getting this to work using threading. Below is a code which is working correctly as expected, however it is taking very long to finish execration (around 45 minutes or more). The website I am getting the data from allows Async I/O using rate of 20 requests. I assume this can make my code 20x faster but not really sure how to implement it. import requests import json import time import threading import queue headers = {'Content-Type': 'application/json', 'Authorization': 'Bearer TOKEN'} start = time.perf_counter() project_id_number = 123 project_id_string = 'pjiji4533' name = "Assignment" list_of_string_ids = [132,123,5345,123,213,213,...,n] # Len of list is 20000 def construct_url_threaded(project_id_number, id_string): url = f"https://api.test.com/{}/{}".format(project_id_number,id_string) r = requests.get(url , headers=headers) # Max rate allowed is 20 requests at once. json_text = r.json() comments = json.dumps(json_text, indent=2) for item in json_text['data']: # DO STUFF for string_id in all_string_ids_list: construct_url_threaded(project_id_number=project_id_number, id_string=string_id) My trial is below def main(): q = queue.Queue() threads = [threading.Thread(target=create_url_threaded, args=(project_id_number,string_id, q)) for i in range(5) ] #5 is for testing for th in threads: th.daemon = True th.start() result1 = q.get() result2 = q.get()
asynchroneous error handling and response processing of an unbounded list of tasks using zeep
So here is my use case: I read from a database rows containing information to make a complex SOAP call (I'm using zeep to do these calls). One row from the database corresponds to a request to the service. There can be up to 20 thousand lines, so I don't want to read everything in memory before making the calls. I need to process the responses - when the response is OK, I need to store some returned information back into my database, and when there is an exception I need to process the exception for that particular request/response pair. I need also to capture some external information at the time of the request creation, so that I know where to store the response from the request. In my current code I'm using the delightful property of gather() that makes the results come in the same order. I read the relevant PEPs and Python documentation but I'm still very confused, as there seems to be multiple ways to solve the same problem. I also went through countless exercises on the web, but the examples are all trivial - it's either asyncio.sleep() or some webscraping with a finite list of urls. The solution that I have come up so far kinda works - the asyncio.gather() method is very, very, useful, but I have not been able to 'feed' it from a generator. I'm currently just counting to an arbitrary size and then starting a .gather() operation. I've transcribed the code, with boring parts left out and I've tried to anonymise the code I've tried solutions involving semaphores, queues, different event loops, but I'm failing every time. Ideally I'd like to be able to create Futures 'continuously' - I think I'm missing the logic of 'convert this awaitable call to a future' I'd be grateful for any help! import asyncio from asyncio import Future import zeep from zeep.plugins import HistoryPlugin history = HistoryPlugin() max_concurrent_calls = 5 provoke_errors = True def export_data_async(db_variant: str, order_nrs: set): st = time.time() results = [] loop = asyncio.get_event_loop() def get_client1(service_name: str, system: Systems = Systems.ACME) -> Tuple[zeep.Client, zeep.client.Factory]: client1 = zeep.Client(wsdl=system.wsdl_url(service_name=service_name), transport=transport, plugins=[history], ) factory_ns2 = client1.type_factory(namespace='ns2') return client1, factory_ns2 table = 'ZZZZ' moveback_table = 'EEEEEE' moveback_dict = create_default_empty_ordered_dict('attribute1 attribute2 attribute3 attribute3') client, factory = get_client1(service_name='ACMEServiceName') if log.isEnabledFor(logging.DEBUG): client.wsdl.dump() zeep_log = logging.getLogger('zeep.transports') zeep_log.setLevel(logging.DEBUG) with Db(db_variant) as db: db.open_db(CON_STRING[db_variant]) db.init_table_for_read(table, order_list=order_nrs) counter_failures = 0 tasks = [] sids = [] results = [] def handle_future(future: Future) -> None: results.extend(future.result()) def process_tasks_concurrently() -> None: nonlocal tasks, sids, counter_failures, results futures = asyncio.gather(*tasks, return_exceptions=True) futures.add_done_callback(handle_future) loop.run_until_complete(futures) for i, response_or_fault in enumerate(results): if type(response_or_fault) in [zeep.exceptions.Fault, zeep.exceptions.TransportError]: counter_failures += 1 log_webservice_fault(sid=sids[i], db=db, err=response_or_fault, object=table) else: db.write_dict_to_table( moveback_table, {'sid': sids[i], 'attribute1': response_or_fault['XXX']['XXX']['xxx'], 'attribute2': response_or_fault['XXX']['XXX']['XXXX']['XXX'], 'attribute3': response_or_fault['XXXX']['XXXX']['XXX'], } ) db.commit_db_con() tasks = [] sids = [] results = [] return for row in db.rows(table): if int(row.id) % 2 == 0 and provoke_errors: payload = faulty_message_payload(row=row, factory=factory, ) else: payload = message_payload(row=row, factory=factory, ) tasks.append(client.service.myRequest( MessageHeader=factory.MessageHeader(**message_header_arguments(row=row)), myRequestPayload=payload, _soapheaders=[security_soap_header], )) sids.append(row.sid) if len(tasks) == max_concurrent_calls: process_tasks_concurrently() if tasks: # this is the remainder of len(db.rows) % max_concurrent_calls process_tasks_concurrently() loop.run_until_complete(transport.session.close()) db.execute_this_statement(statement=update_sql) db.commit_db_con() log.info(db.activity_log) if counter_failures: log.info(f"{table :<25} Count failed: {counter_failures}") print("time async: %.2f" % (time.time() - st)) return results Failed attempt with Queue: (blocks at await client.service) loop = asyncio.get_event_loop() counter = 0 results = [] async def payload_generator(db_variant: str, order_nrs: set): # code that generates the data for the request yield counter, row, payload async def service_call_worker(queue, results): while True: counter, row, payload = await queue.get() results.append(await client.service.myServicename( MessageHeader=calculate_message_header(row=row)), myPayload=payload, _soapheaders=[security_soap_header], ) ) print(colorama.Fore.BLUE + f'after result returned {counter}') # Here do the relevant processing of response or error queue.task_done() async def main_with_q(): n_workers = 3 queue = asyncio.Queue(n_workers) e = pprint.pformat(queue) p = payload_generator(DB_VARIANT, order_list_from_args()) results = [] workers = [asyncio.create_task(service_call_worker(queue, results)) for _ in range(n_workers)] async for c in p: await queue.put(c) await queue.join() # wait for all tasks to be processed for worker in workers: worker.cancel() if __name__ == '__main__': try: loop.run_until_complete(main_with_q()) loop.run_until_complete(transport.session.close()) finally: loop.close()
How to rotate proxies on a Python requests
I'm trying to do some scraping, but I get blocked every 4 requests. I have tried to change proxies but the error is the same. What should I do to change it properly? Here is some code where I try it. First I get proxies from a free web. Then I go do the request with the new proxy but it doesn't work because I get blocked. from fake_useragent import UserAgent import requests def get_player(id,proxy): ua=UserAgent() headers = {'User-Agent':ua.random} url='https://www.transfermarkt.es/jadon-sancho/profil/spieler/'+str(id) try: print(proxy) r=requests.get(u,headers=headers,proxies=proxy) execpt: .... code to manage the data .... Getting proxies def get_proxies(): ua=UserAgent() headers = {'User-Agent':ua.random} url='https://free-proxy-list.net/' r=requests.get(url,headers=headers) page = BeautifulSoup(r.text, 'html.parser') proxies=[] for proxy in page.find_all('tr'): i=ip=port=0 for data in proxy.find_all('td'): if i==0: ip=data.get_text() if i==1: port=data.get_text() i+=1 if ip!=0 and port!=0: proxies+=[{'http':'http://'+ip+':'+port}] return proxies Calling functions proxies=get_proxies() for i in range(1,100): player=get_player(i,proxies[i//4]) .... code to manage the data .... I know that proxies scrape is well because when i print then I see something like: {'http': 'http://88.12.48.61:42365'} I would like to don't get blocked.
I recently had this same issue, but using proxy servers online as recommended in other answers is always risky (from privacy standpoint), slow, or unreliable. Instead, you can use the requests-ip-rotator python library to proxy traffic through AWS API Gateway, which gives you a new IP each time: pip install requests-ip-rotator This can be used as follows (for your site specifically): import requests from requests_ip_rotator import ApiGateway, EXTRA_REGIONS gateway = ApiGateway("https://www.transfermarkt.es") gateway.start() session = requests.Session() session.mount("https://www.transfermarkt.es", gateway) response = session.get("https://www.transfermarkt.es/jadon-sancho/profil/spieler/your_id") print(response.status_code) # Only run this line if you are no longer going to run the script, as it takes longer to boot up again next time. gateway.shutdown() Combined with multithreading/multiprocessing, you'll be able to scrape the site in no time. The AWS free tier provides you with 1 million requests per region, so this option will be free for all reasonable scraping.
import requests from itertools import cycle list_proxy = ['socks5://Username:Password#IP1:20000', 'socks5://Username:Password#IP2:20000', 'socks5://Username:Password#IP3:20000', 'socks5://Username:Password#IP4:20000', ] proxy_cycle = cycle(list_proxy) # Prime the pump proxy = next(proxy_cycle) for i in range(1, 10): proxy = next(proxy_cycle) print(proxy) proxies = { "http": proxy, "https":proxy } r = requests.get(url='https://ident.me/', proxies=proxies) print(r.text)
The problem with using free proxies from sites like this is websites know about these and may block just because you're using one of them you don't know that other people haven't gotten them blacklisted by doing bad things with them the site is likely using some form of other identifier to track you across proxies based on other characteristics (device fingerprinting, proxy-piercing, etc) Unfortunately, there's not a lot you can do other than be more sophisticated (distribute across multiple devices, use VPN/TOR, etc) and risk your IP being blocked for attempting DDOS-like traffic or, preferably, see if the site has an API for access
Presumably you have your own pool of proxies - what is the best way to rotate them? First, blindly picking random proxy we risk of repeating connection from the same proxy multiple times in a row. To add, most connection pattern based blocking is using proxy subnet (3rd number) rather than host - it's best to prevent repeats at subnet level. It's also a good idea to track proxy performance as not all proxies are equal - we want to use our better performing proxies more often and let dead proxies cooldown. All of this can be done with weighted randomization which is implemented by Python's random.choices() function: import random from time import time from typing import List, Literal class Proxy: """container for a proxy""" def __init__(self, ip, type_="datacenter") -> None: self.ip: str = ip self.type: Literal["datacenter", "residential"] = type_ _, _, self.subnet, self.host = ip.split(":")[0].split('.') self.status: Literal["alive", "unchecked", "dead"] = "unchecked" self.last_used: int = None def __repr__(self) -> str: return self.ip def __str__(self) -> str: return self.ip class Rotator: """weighted random proxy rotator""" def __init__(self, proxies: List[Proxy]): self.proxies = proxies self._last_subnet = None def weigh_proxy(self, proxy: Proxy): weight = 1_000 if proxy.subnet == self._last_subnet: weight -= 500 if proxy.status == "dead": weight -= 500 if proxy.status == "unchecked": weight += 250 if proxy.type == "residential": weight += 250 if proxy.last_used: _seconds_since_last_use = time() - proxy.last_used weight += _seconds_since_last_use return weight def get(self): proxy_weights = [self.weigh_proxy(p) for p in self.proxies] proxy = random.choices( self.proxies, weights=proxy_weights, k=1, )[0] proxy.last_used = time() self.last_subnet = proxy.subnet return proxy If we mock run this Rotator we can see how weighted randoms distribute our connections: from collections import Counter if __name__ == "__main__": proxies = [ # these will be used more often Proxy("xx.xx.121.1", "residential"), Proxy("xx.xx.121.2", "residential"), Proxy("xx.xx.121.3", "residential"), # these will be used less often Proxy("xx.xx.122.1"), Proxy("xx.xx.122.2"), Proxy("xx.xx.123.1"), Proxy("xx.xx.123.2"), ] rotator = Rotator(proxies) # let's mock some runs: _used = Counter() _failed = Counter() def mock_scrape(): proxy = rotator.get() _used[proxy.ip] += 1 if proxy.host == "1": # simulate proxies with .1 being significantly worse _fail_rate = 60 else: _fail_rate = 20 if random.randint(0, 100) < _fail_rate: # simulate some failure _failed[proxy.ip] += 1 proxy.status = "dead" mock_scrape() else: proxy.status = "alive" return for i in range(10_000): mock_scrape() for proxy, count in _used.most_common(): print(f"{proxy} was used {count:>5} times") print(f" failed {_failed[proxy]:>5} times") # will print: # xx.xx.121.2 was used 2629 times # failed 522 times # xx.xx.121.3 was used 2603 times # failed 508 times # xx.xx.123.2 was used 2321 times # failed 471 times # xx.xx.122.2 was used 2302 times # failed 433 times # xx.xx.121.1 was used 1941 times # failed 1187 times # xx.xx.122.1 was used 1629 times # failed 937 times # xx.xx.123.1 was used 1572 times # failed 939 times By using weighted randoms we can create a connection pattern that appears random but smart. We can apply generic patterns like not proxies from the same IP family in a row as well as custom per-target logic like priotizing North American IPs for NA targets etc. For more on this see my blog How to Rotate Proxies in Web Scraping