I have a python script that monitors USB ports (checks which devices are connected). The script performs some scraping and displays a desktop notification. I would like it to run automatically on every boot. This is the code:
#! /usr/bin/python
import glib
import re
import subprocess
import requests
import bs4
import datetime
import sys
import os
import time
from selenium import webdriver
from pyudev import Context, Monitor
from selenium.common.exceptions import NoSuchElementException
def demote():
def result():
os.setgid(100)
os.setuid(1000)
return result
def inotify(title, message):
subprocess.call(['notify-send', '{}\n'.format(title), '{0}\n'.format(message)], preexec_fn=demote())
#os.system('notify-send ' + title + ' ' + message)
def get_network_data(tout):
"""Scrapes balance data from ISP website."""
if tout is not None:
try:
# Do some scraping
if data_found:
full_msg = '{0}\n{1}'.format(my_balance.capitalize(), airtime_balance.capitalize())
inotify('My Balance', full_msg)
#subprocess.call(['notify-send', 'My Balance', '\n{0}\n{1}'.format(my_balance.capitalize(), airtime_balance.capitalize())], preexec_fn=demote())
else:
print('Could not retrieve data from page...')
full_msg = '{0}'.format('Error: Could not retrieve data from page.')
inotify('My Balance', full_msg)
#subprocess.call(['notify-send', 'My Balance', '\n{0}'.format('Error: Could not retrieve data from page.')], preexec_fn=demote())
except NoSuchElementException:
print('Could not locate element...')
full_msg = '{0}'.format('Error: Could not locate element - acc.')
inotify('My Balance', full_msg)
#subprocess.call(['notify-send', 'iMonitor:get_network_data', '\n{0}'.format('Error: Could not locate element - acc.')], preexec_fn=demote())
else:
print('Could not find USB device...')
full_msg = '\n{0}'.format('Error: Could not find USB device.')
inotify('My Balance', full_msg)
#subprocess.call(['notify-send', 'iMonitor', '\n{0}'.format('Error: Could not find USB device.')], preexec_fn=demote())
def identify_phone(observer, device):
"""Identifies if specific USB device (phone) is connected (tethered)."""
global last_updated, initial_search, msg_count
current_time = datetime.datetime.now()
time_diff = current_time - last_updated
if (time_diff.seconds > 300) or initial_search:
try:
time.sleep(0.25)
tout = subprocess.check_output("lsusb | grep 1234:5678", shell=True)
except subprocess.CalledProcessError:
tout = None
last_updated = datetime.datetime.now()
initial_search = False
get_network_data(tout)
if time_diff.seconds > 10:
msg_count = 1
if not initial_search and msg_count == 1:
wait_time = datetime.datetime.fromtimestamp(600 - time_diff.seconds)
message = wait_time.strftime('You may have to wait %-M minute(s), %-S second(s) before another check is done.')
print('Could not retrieve data from page...')
full_msg = '\n{0}'.format(message)
inotify('My Balance', full_msg)
#subprocess.call(['notify-send', 'iMonitor:Identify Phone', '\n{0}'.format(message)], preexec_fn=demote())
msg_count += 1
try:
initial_search = True
last_updated = datetime.datetime.now()
msg_count = 1
try:
from pyudev.glib import MonitorObserver
except ImportError:
from pyudev.glib import GUDevMonitorObserver as MonitorObserver
context = Context()
monitor = Monitor.from_netlink(context)
monitor.filter_by(subsystem='usb')
observer = MonitorObserver(monitor)
observer.connect('device-added', identify_phone)
monitor.start()
glib.MainLoop().run()
except KeyboardInterrupt:
print('\nShutdown requested.\nExiting gracefully...')
sys.exit(0)
However, the script runs as root and, having tried to change the uid and guid, I have not managed to get it to display desktop notifications to the logged in (normal) user. Any help would be appreciated.
PS:
OS - OpenSUSE 42.1
KDE version - KDE Plasma 5.5.5
I assume you need the script to run in KDE in which case you can simply copy or link the script into the ~/.kde/Autostart/ directory in KDE 4. In KDE 5 the directory has been moved to ~/.config/autostart.
I guess You can use cron job with #reboot modifier. It will run every time your system will boot up.
Related
I decided to create a checker for Instagram accounts.
Please tell me how you can change the ip when you restart the browser. I have a Tor profile. That is, the ip automatically changes every 10 minutes. How can I make the ip change 1 time per minute. Is this even possible?
Maybe there is some kind of set_preference setting or how in general you can change the ip when restarting the Firefox browser with the Tor settings.
import time
from selenium import webdriver
from selenium.webdriver.firefox.firefox_profile import FirefoxProfile
file = open('Good2.txt', encoding='utf-8-sig').read().split('\n')
goods = open('good_acc.txt', 'a+')
def settings_browser():
""" Настройки браузера FireFox. """
profile = FirefoxProfile(r'C:\Users\ASUS\Desktop\Scrape\Tor Browser\Browser\TorBrowser\Data\Browser\profile.default')
profile.set_preference('network.proxy.type', 1)
profile.set_preference('network.proxy.socks', '127.0.0.1')
profile.set_preference('network.proxy.socks_port', 9050)
profile.set_preference("network.proxy.socks_remote_dns", False)
# get a huge speed increase by not downloading images
profile.set_preference("permissions.default.image", 2)
profile.update_preferences()
return profile
def check_email():
""" Принимает всплывающее окно куки. Чекает валидность почт, если почта валидна, то сохраняет в файл 'good_acc.txt'. """
driver = webdriver.Firefox(firefox_profile=settings_browser(), executable_path=r'C:\Users\ASUS\Desktop\Scrape\geckodriver.exe')
for login in file:
driver.get("https://www.instagram.com/accounts/password/reset/")
body = driver.find_elements_by_class_name('pbNvD.FrS-d.gD9tr')
for bd in body:
if bd.find_element_by_class_name('aOOlW.bIiDR').text == 'Принять все':
bd.find_element_by_class_name('aOOlW.bIiDR').click()
time.sleep(7)
authorization = driver.find_elements_by_class_name("AHCwU")
pops = driver.find_elements_by_class_name("_-rjm")
username = login.split(":")[0]
password = login.split(":")[1]
for data in authorization:
# почта логин
data_login = data.find_element_by_name('cppEmailOrUsername')
data_login.click()
data_login.send_keys(username)
time.sleep(1)
# кнопка входа
clock_button = data.find_element_by_class_name('sqdOP.L3NKy.y3zKF')
clock_button.click()
time.sleep(2)
for pop in pops:
if 'Мы отправили ссылку для восстановления' in pop.find_element_by_class_name('tA2fc').text:
# Почта зарегана
goods.write(username + ' : ' + password + '\n')
print('Валидный аккаунт ' + username + ' : ' + password)
elif 'Подождите несколько минут, прежде чем пытаться снова.' in pop.find_element_by_class_name('tA2fc').text:
driver.quit()
driver = webdriver.Firefox(firefox_profile=settings_browser(), executable_path=r'C:\Users\ASUS\Desktop\Scrape\geckodriver.exe')
print('Успешная перезагрузка драйвера из-за "Подождите несколько минут, прежде чем пытаться снова."')
elif 'feedback_required' in pop.find_element_by_class_name('tA2fc').text:
driver.quit()
driver = webdriver.Firefox(firefox_profile=settings_browser(), executable_path=r'C:\Users\ASUS\Desktop\Scrape\geckodriver.exe')
print('Успешная перезагрузка драйвера из-за "feedback_required"')
else:
# Почта не зарегана
print(f"Не валидный аккаунт " + username)
goods.close()
def main():
check_email()
if __name__ == '__main__':
main()
If you set ControlPort: 9051 (and Passord: ...) in config file (on Linux /etc/tor/torrc) then you can use even standard socket to send signal to tor to change IP.
import socket
s = socket.socket()
s.connect(('127.0.0.1', 9051))
s.send('AUTHENTICATE "your_passord"\r\nSIGNAL NEWNYM\r\n'.encode())
It needs few seconds to get new IP from tor network.
And after few seconds proxy should use new IP.
You may also use module stem for this (it also needs settings in torrc)
from stem import Signal
from stem.control import Controller
with Controller.from_port(port=9051) as controller:
controller.authenticate(password='your_password')
controller.signal(Signal.NEWNYM)
More: Python: How to use Tor Network with requests to change IP?
EDIT:
import socket
def main():
# send signal to `tor` to change IP
s = socket.socket()
s.connect(('127.0.0.1', 9051))
s.send('AUTHENTICATE "your_passord"\r\nSIGNAL NEWNYM\r\n'.encode())
# wait few seconds for new IP
time.sleep(3)
check_email()
I'm writing a script to access a website using proxies with multiple threads but now I'm stuck in multiple threads, when I run the script below, it opens 5 browsers but all 5 use 1 proxy, I want 5 browsers to use different proxies, can someone help me complete it? thank you
Here is my script :
from selenium import webdriver
from selenium import webdriver
import time , random
import threading
def e():
a = open("sock2.txt", "r")
for line in a.readlines():
b = line
prox = b.split(":")
IP = prox[0]
PORT = int(prox[1].strip("\n"))
print(IP)
print(PORT)
profile = webdriver.FirefoxProfile()
profile.set_preference("network.proxy.type", 1)
profile.set_preference("network.proxy.socks", IP)
profile.set_preference("network.proxy.socks_port", PORT)
try:
driver = webdriver.Firefox(firefox_profile=profile)
driver.get("http://www.whatsmyip.org/")
except:
print("Proxy Connection Error")
driver.quit()
else:
time.sleep(random.randint(40, 70))
driver.quit()
for i in range(5):
t = threading.Thread(target=e)
t.start()
(Wish everyone has a happy and lucky new year)
Dominik Lašo captured it correctly - each threads processes the file from the beginning. Here's probably how it should look like:
from selenium import webdriver
from selenium import webdriver
import time , random
import threading
def e(ip, port):
profile = webdriver.FirefoxProfile()
profile.set_preference("network.proxy.type", 1)
profile.set_preference("network.proxy.socks", IP)
profile.set_preference("network.proxy.socks_port", PORT)
try:
driver = webdriver.Firefox(firefox_profile=profile)
driver.get("http://www.whatsmyip.org/")
except:
print("Proxy Connection Error")
driver.quit()
else:
time.sleep(random.randint(40, 70))
driver.quit()
my_threads = []
with open("sock2.txt", "r") as fd:
for line in fd.readlines():
line = line.strip()
if not line:
continue
prox = line.split(":")
ip = prox[0]
port = int(prox[1])
print('-> {}:{}'.format(ip, port))
t = threading.Thread(target=e, args=(ip, port,))
t.start()
my_threads.append(t)
for t in my_threads:
t.join()
( I personaly think that a problem is there that when you start a program, it will go to new thread, which will go throught the textfile from beginning, becasue you aint deleting them )
I have cane across the same problem, when I was doing the same thing as you do now. I know you would rather want help with your code, but I am in hurry to test it and want to help you ;) , so here is a code that works for me ... There is even task killer for a chrome ( you just have to edit it to firefox )
If I were you, I would start the thread after opening the file, cuz it looks liek you are opening the same file from 1st line everytime the tread starts
links = [ // Link you want to go to ]
def funk(xxx , website):
link = website
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument('--proxy-server=%s' % str(xxx))
chromedriver = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'chromedriver')
chrome = webdriver.Chrome(chromedriver, chrome_options=chrome_options)
try :
// Do stuff
except:
print('exception')
chrome.close()
for link in links:
f = open('proxies.txt')
line = f.readline()
x = 1
xx = 0
while line:
if number_of_used_proxies < 10:
print(line)
line = f.readline()
try:
threading.Timer(40, funk, [line, link]).start()
except Exception as e:
print(e)
time.sleep(1)
x += 1
number_of_used_proxies += 1
else:
time.sleep(100)
for x in range(1, 10):
try:
xzxzx = 'os.system("taskkill /f /im chrome.exe")'
os.system("killall 'Google Chrome'")
except:
print("NoMore")
time.sleep(10)
number_of_used_proxies = 0
f.close()
Hope it helps :)
vantuong: Here's how you can solve the problem with ThreadPoolExecutor.
Reference: https://docs.python.org/3/library/concurrent.futures.html
from selenium import webdriver
import time, random
#import threading
import concurrent.futures
MAX_WORKERS = 5
def get_proxys(data_file):
proxys = []
with open(data_file, "r") as fd:
for line in fd.readlines():
line = line.strip()
if not line:
continue
prox = line.split(":")
ip = prox[0]
port = int(prox[1])
proxys.append((ip, port))
return proxys
def e(ip, port):
profile = webdriver.FirefoxProfile()
profile.set_preference("network.proxy.type", 1)
profile.set_preference("network.proxy.socks", IP)
profile.set_preference("network.proxy.socks_port", PORT)
try:
driver = webdriver.Firefox(firefox_profile=profile)
driver.get("http://www.whatsmyip.org/")
except:
print("Proxy Connection Error")
driver.quit()
else:
time.sleep(random.randint(40, 70))
driver.quit()
with concurrent.futures.ThreadPoolExecutor(max_workers=MAX_WORKERS) as executor:
proxys = get_proxys('sock2.txt')
tasks = {executor.submit(e, proxy[0], proxy[1]): proxy for proxy in proxys}
for task in concurrent.futures.as_completed(tasks):
proxy = tasks[task]
try:
data = task.result()
except Exception as exc:
print('{} generated an exception: {}'.format(proxy, exc))
else:
print('{} completed successfully'.format(proxy))
Fun exercise: Try playing around with different values of MAX_WORKERS.
Wrote this crawler in Python, it dumps several parameters to JSON output file based on the input list of domains.
Have this question:
Do I need to close the HTTP connection in each thread? Input data is ca. 5 Million items. It process at the beginning at a rate ca. 50 iterations per second, but later after some time it drops to 1-2 per second and/or hangs (no kernel messages and no errors on stdout)? Can this be code or is network limiting related? I suspect software since when I restart it, it starts again with high rate (ca. 50 iteration per second)
Any tips how to improve the code below are also welcome, especially improve on speed and crawling throughput.
Code in questions:
import urllib2
import pprint
from tqdm import tqdm
import lxml.html
from Queue import Queue
from geoip import geolite2
import pycountry
from tld import get_tld
resfile = open("out.txt",'a')
concurrent = 200
def doWork():
while True:
url = q.get()
status = getStatus(url)
doSomethingWithResult(status)
q.task_done()
def getStatus(ourl):
try:
response = urllib2.urlopen("http://"+ourl)
peer = response.fp._sock.fp._sock.getpeername()
ip = peer[0]
header = response.info()
html = response.read()
html_element = lxml.html.fromstring(html)
generator = html_element.xpath("//meta[#name='generator']/#content")
try:
match = geolite2.lookup(ip)
if match is not None:
country= match.country
try:
c=pycountry.countries.lookup(country)
country=c.name
except:
country=""
except:
country=""
try:
res=get_tld("http://www"+ourl, as_object=True)
tld=res.suffix
except:
tld=""
try:
match = re.search(r'[\w\.-]+#[\w\.-]+', html)
email=match.group(0)
except:
email=""
try:
item= generator[0]
val = "{ \"Domain\":\"http://"+ourl.rstrip()+"\",\"IP:\""+ip+"\"," + "\"Server\":"+ "\""+str(header.getheader("Server")).replace("None","")+"\",\"PoweredBy\":" + "\""+str(header.getheader("X-Powered-By")).replace("None","")+"\""+",\"MetaGenerator\":\""+item+"\",\"Email\":\""+email+"\",\"Suffix\":\""+tld+"\",\"CountryHosted\":\""+country+"\" }"
except:
val = "{ \"Domain\":\"http://"+ourl.rstrip()+"\",\"IP:\""+ip+"\"," + "\"Server\":"+ "\""+str(header.getheader("Server")).replace("None","")+"\",\"PoweredBy\":" + "\""+str(header.getheader("X-Powered-By")).replace("None","")+"\""+",\"MetaGenerator\":\"\",\"Email\":\""+email+"\",\"Suffix\":\""+tld+"\",\"CountryHosted\":\""+country+"\" }"
return val
except Exception as e:
#print "error"+str(e)
pass
def doSomethingWithResult(status):
if status:
resfile.write(str(status)+"\n")
q = Queue(concurrent * 2)
for i in range(concurrent):
t = Thread(target=doWork)
t.daemon = True
t.start()
try:
for url in tqdm(open('list.txt')):
q.put(url.strip())
status = open("status.txt",'w')
status.write(str(url.strip()))
q.join()
except KeyboardInterrupt:
sys.exit(1)
Update 1:
Closing the Socket and FileDescriptor makes it work better, does not seem to hang anymore after some time. Performance is 50 reqs/sec on home laptop and ca 100 req/sec on a VPS
from threading import Thread
import httplib, sys
import urllib2
import pprint
from tqdm import tqdm
import lxml.html
from Queue import Queue
from geoip import geolite2
import pycountry
from tld import get_tld
import json
resfile = open("out.txt",'a')
concurrent = 200
def doWork():
while True:
url = q.get()
status = getStatus(url)
doSomethingWithResult(status)
q.task_done()
def getStatus(ourl):
try:
response = urllib2.urlopen("http://"+ourl)
realsock = response.fp._sock.fp._sock
peer = response.fp._sock.fp._sock.getpeername()
ip = peer[0]
header = response.info()
html = response.read()
realsock.close()
response.close()
html_element = lxml.html.fromstring(html)
generator = html_element.xpath("//meta[#name='generator']/#content")
try:
match = geolite2.lookup(ip)
if match is not None:
country= match.country
try:
c=pycountry.countries.lookup(country)
country=c.name
except:
country=""
except:
country=""
try:
res=get_tld("http://www"+ourl, as_object=True)
tld=res.suffix
except:
tld=""
try:
match = re.search(r'[\w\.-]+#[\w\.-]+', html)
email=match.group(0)
except:
email=""
try:
item= generator[0]
val = "{ \"Domain\":"+json.dumps("http://"+ourl.rstrip())+",\"IP\":\""+ip+"\",\"Server\":"+json.dumps(str(header.getheader("Server")).replace("None",""))+",\"PoweredBy\":" +json.dumps(str(header.getheader("X-Powered-By")).replace("None",""))+",\"MetaGenerator\":"+json.dumps(item)+",\"Email\":"+json.dumps(email)+",\"Suffix\":\""+tld+"\",\"CountryHosted\":\""+country+"\" }"
except:
val = "{ \"Domain\":"+json.dumps("http://"+ourl.rstrip())+",\"IP\":\""+ip+"\"," + "\"Server\":"+json.dumps(str(header.getheader("Server")).replace("None",""))+",\"PoweredBy\":" +json.dumps(str(header.getheader("X-Powered-By")).replace("None",""))+",\"MetaGenerator\":\"\",\"Email\":"+json.dumps(email)+",\"Suffix\":\""+tld+"\",\"CountryHosted\":\""+country+"\" }"
return val
except Exception as e:
print "error"+str(e)
pass
def doSomethingWithResult(status):
if status:
resfile.write(str(status)+"\n")
q = Queue(concurrent * 2)
for i in range(concurrent):
t = Thread(target=doWork)
t.daemon = True
t.start()
try:
for url in tqdm(open('list.txt')):
q.put(url.strip())
status = open("status.txt",'w')
status.write(str(url.strip()))
q.join()
except KeyboardInterrupt:
sys.exit(1)
The handles will be automatically garbage collected, but, you will be better off closing the handles yourself, especially as you are doing this in a tight loop.
You also asked for suggestions for improvement. A big one would be to stop using urllib2 and start using requests instead.
There are many possible options, why your crawling rate drops.
1.) Take care not to crawl to much data from the same domain. Some web servers are configured just to allow one connection per IP address in parallel.
2.) Try to send randomized browser-like http headers (user-agent, referrer, ...) to prevent web server scraping protection, if set.
3.) Use a mature http (parallel) library, like pycurl (has MultiCurl) or requests (grequests). They perform faster for sure.
I'm using libmproxy to capture http traffic. I would like to use web driver to load a web page while the proxy is running. The proxy code is working great but I am unable to launch the page. I assume the script is becoming stuck in a loop at m.run(). How can I move to the web driver code while the proxy is running?
import unittest
import sys
from libmproxy import proxy, dump, cmdline
from libmproxy.version import VERSION
from optparse import OptionParser
from selenium import webdriver
class Test(unittest.TestCase):
def setUp(self):
parser = OptionParser(
usage = "%prog [options] [filter]",
version="%%prog %s"%VERSION,
)
cmdline.common_options(parser)
parser.add_option(
"--keepserving",
action="store_true", dest="keepserving", default=False,
help="Continue serving after client playback or file read. We exit by default."
)
options, args = parser.parse_args()
if options.quiet:
options.verbose = 0
proxyconfig = proxy.process_proxy_options(parser, options)
if options.no_server:
server = proxy.DummyServer(proxyconfig)
else:
try:
server = proxy.ProxyServer(proxyconfig, options.port, options.addr)
except proxy.ProxyServerError, v:
print >> sys.stderr, "mitmdump:", v.args[0]
sys.exit(1)
try:
dumpopts = dump.Options(**cmdline.get_common_options(options))
except cmdline.OptionException, v:
parser.error(v.message)
dumpopts.keepserving = options.keepserving
if args:
filt = " ".join(args)
else:
filt = None
try:
PROXY_HOST = "localhost"
PROXY_PORT = 8080
#driver.get("http://msn.com")
#f = open('/Users/cnave/Documents/capture/dump.txt', 'w')
#sys.stdout('/Users/cnave/Documents/capture/dump.txt', 'w')
#open('/Users/cnave/Documents/capture/dump.txt', 'w')
m = dump.DumpMaster(server, dumpopts, filt)
m.run()
#sys.stdout('/Users/cnave/Documents/capture/dump')
fp = webdriver.FirefoxProfile()
# Direct = 0, Manual = 1, PAC = 2, AUTODETECT = 4, SYSTEM = 5
fp.set_preference("network.proxy.type", 1)
fp.set_preference("network.proxy.http", PROXY_HOST)
fp.set_preference("network.proxy.http_port", PROXY_PORT)
fp.set_preference("network.proxy.no_proxies_on", "") # set this value as desired
driver = webdriver.Firefox(firefox_profile=fp)
driver.get('http://google.com')
except dump.DumpError, e:
print >> sys.stderr, "mitmdump:", e
sys.exit(1)
except KeyboardInterrupt:
pass
self.dm = dump.DumpMaster(server, dumpopts, filt)
// run the MITM proxy in a background thread
thread.start_new_thread(self.dm.run,())
// and you maybe need to shutdown the proxy in tearDown()
self.dm.shutdown()
I have a producer, consumer application where the producer reads a database and puts the results into a website. On success, The consumer then gets the id of the transaction and updates the db.
The program runs as required until it attempts to execute the update. It fails sometimes with this error 'HY000', 'The driver did not supply an error!'
The code can comfortably write to file without any issues.
What could i do to fix this? We need to update the db.
Thanks
Notes...using python 2.7 with pyodbc on mssql 2008.
code below
#!/usr/bin/env python
from urlparse import urlparse
from threading import Thread
import httplib, sys
import Queue
import urllib2
import urllib
from time import localtime, strftime
from ConfigParser import SafeConfigParser
from functions import Functions
from pyodbcclass import db_mssql
now = strftime("%y-%m-%d-%H%M%S", localtime())
k = db_mssql()
thread_list = []
thread_list2 = []
getFromDBQueue = Queue.Queue()
updateDBQueue = Queue.Queue()
number_of_consumer_threads = 3
def putURL():
querySql = "select distinct top 3 id,url from tblURL where processed=0 order by id asc"
re = k.query2(querySql)
if re:
for r in re:
id = r.id
params = urllib.urlencode({'user': user, 'password': password})
ourl = urlini + "?%s" % params
urlplusid = {'url':ourl.strip(),'id':id}
getFromDBQueue.put(urlplusid)
def getURL(thread_id):
while 1:
try:
URL_toget = getFromDBQueue.get(block=False)
url2 = URL_toget['url']
msgid2 = URL_toget['id']
except Queue.Empty:
print "thread exiting, id: " + str(thread_id) + "++getFromDB++"
sys.exit()
status,url = getStatus(url2)
if status == 200:
updateDBQueue.put(msgid2)
print(status)
def updateDB(thread_id):
while 1:
try:
id2 = updateDBQueue.get(block=False)
if id2:
params = ['true',id2]
sqlupdate = "UPDATE tblURL SET processed=? WHERE id=?"
k.execute3(sqlupdate,params)
except Queue.Empty:
print "thread exiting, id: " + str(thread_id) + "**update**"
sys.exit()
# fill the queue with work and block until we are done filling the queue
producer_thread = Thread(target=putURL)
producer_thread.start()
producer_thread.join()
# we can now start consumers
for i in range(number_of_consumer_threads):
getfromDB = Thread(target=getURL, args=(i,))
getfromDB.start()
thread_list.append(getfromDB)
for i in range(number_of_consumer_threads):
update = Thread(target=updateDB, args=(i,))
update.start()
thread_list2.append(update)
for thread in thread_list:
thread.join()
for thread2 in thread_list2:
thread2.join()