save multi thread output to txt file - python

i write this simple code ... i need to save the code output to text file in my pc how i can do that ?
import threading
import time
def qan(hey):
while True:
d = hey + 1
print d
time.sleep(1)
def printd(printme):
while True:
print printme + "\n"
time.sleep(1)
t1 = threading.Thread(target=qan, args=(1,))
t2 = threading.Thread(target=printd, args=("hey",))
t2.start()
t1.start()
and this is my code output
hey
2 2 hey
2hey
2

Use some buffer with data:
import threading
import time
buffer = []
def qan(hey):
while True:
d = hey + 1
buffer.append(d)
time.sleep(1)
def printd(printme):
while True:
buffer.append(printme + "\n")
time.sleep(1)
t1 = threading.Thread(target=qan, args=(1,))
t2 = threading.Thread(target=printd, args=("hey",))
t2.start()
t1.start()
with open('output.txt') as f:
f.write(''.join(buffer))

Related

Python 3 | Mutithreading starts before thread.start() is decleared

Trying to have 2 methods running at once. One is a timer method and the other writes data to a CSV. I am trying to use Treading to run them both at once, but the thread starts before it is called.
Code;
with open("C:\\ProgramData\\Example.txt", "r", encoding="utf8") as file:
array = for line in file.readlines()]))
fieldnames = 'Col1','Col2','Col3'
with open("C:\\ProgramData\\example.csv", 'w', newline='', encoding="utf8") as csvfile:
writer = csv.writer(csvfile)
writer.writerow(fieldnames)
writer.writerows(array)
csvfile.close()
def timer():
import time
import sys
time_start = time.time()
seconds = 0
minutes = 0
while True:
try:
sys.stdout.write("\r{minutes} Minutes {seconds} Seconds".format(minutes=minutes, seconds=seconds))
sys.stdout.flush()
time.sleep(1)
seconds = int(time.time() - time_start) - minutes * 60
if seconds >= 60:
minutes += 1
seconds = 0
except KeyboardInterrupt as e:
break
if __name__=="__main__":
print("Not running")
t1 = threading.Thread(target=timer())
print("clearly running")
t2 = threading.Thread(target=regx())
t1.setName('t1')
t2.setName('t2')
t1.start()
t2.start()
t1.join()
t2.join()
# pool =Pool(processes=2)
# pool.map(timer(),regx())
The output from the console;
Not running
2 Minutes 32 Seconds
Process finished with exit code -1
Can anyone help me fix this?
Thanks
Don't use () unless you want to run the method immediately. If you want to reference the method itself (like to pass it to Thread), leave off the ().
Try this code:
if __name__=="__main__":
print("Not running")
t1 = threading.Thread(target=timer)
print("clearly running")
t2 = threading.Thread(target=regx)
t1.setName('t1')
t2.setName('t2')
t1.start()
t2.start()
t1.join()
t2.join()

Queue.put inside a worker thread failing

Inside a worker thread I am generating a data frame . Trying to put this into the queue passed to the worker thread is failing. In fact trying to put any values into the queue is failing.
The part of the code that is failing inside the worker thread task1() is given below:
df = pd.DataFrame([[1,2,3,4],[3,4,5,6]])
qmdlvalues.put(df)
mdltiming = time.time() - start
qmdlparams.put(paramval)
qtiming.put(mdltiming)
Complete code
import threading
import queue
from sklearn.manifold import TSNE
import os
import time
def write_tsne_op(opdata,fname,header):
with open(fname, 'w') as outfile:
outfile.write(header)
for data_slice in opdata:
np.savetxt(outfile, data_slice,delimiter=",")
def task1(qmdlvalues,qmdlparams,qtiming,paramval):
start = time.time()
#tmpmdl1 = TSNE(perplexity=100,early_exaggeration=1, n_components=2,random_state=0,verbose=1)
#qmdlvalues.put(tmpmdl1.fit_transform(dense_mx))
df = pd.DataFrame([[1,2,3,4],[3,4,5,6]])
qmdlvalues.put(df)
mdltiming = time.time() - start
qmdlparams.put(paramval)
qtiming.put(mdltiming)
print(df)
print(str(mdltiming))
print(paramval)
def task2(qmdlvalues,qmdlparams,qtiming,paramval):
start = time.time()
#tmpmdl2 = TSNE(perplexity=100,early_exaggeration=10, n_components=2,random_state=0,verbose=1)
#qmdlvalues.put(tmpmdl2.fit_transform(dense_mx2))
qmdlvalues.put(pd.DataFrame([[1,2,3,4],[3,4,5,6]]))
qmdlparams.put(paramval)
mdltiming = time.time() - start
qtiming.put(mdltiming)
if __name__ == "__main__":
dense_mx2 = dense_mx
dense_mx3 = dense_mx
qmdlvl = queue.Queue()
qmdlch = queue.Queue()
qtme = queue.Queue()
mdlvalues = pd.DataFrame()
t1 = threading.Thread(target=task1,args=(qmdlvl,qmdlch,qtme,"#perplex: 100 early exag: 1 timing:$_plex100_exag1.csv"), name='t1')
t2 = threading.Thread(target=task2,args=(qmdlvl,qmdlch,qtme,"#perplex: 100 early exag: 10 timing:$_plex100_exag10.cv"), name='t2')
# starting threads
t1.start()
t2.start()
while True:
if qmdlvl.empty():
print("Queue closed. Exiting thread.")
break
try:
item = qmdlvl.get(timeout=.5)
except:
continue
print("Got item:", item)
# wait until all threads finish
t1.join()
t2.join()
Below is the actual output I am getting from the code in the main
while True:
if qmdlvl.empty():
print("Queue closed. Exiting thread.")
break
try:
item = qmdlvl.get(timeout=.5)
except:
continue
print("Got item:", item)
ID of process running main program: 6456
Main thread name: MainThread
Queue closed. Exiting thread.
I want to able to put the data frame into a queue inside the worker thread and access the same data frame in the main thread.
There are parameter mis-matches in my earlier code those have been corrected an a full working code presented below.
I stored the output of t-SNE directly into the queue and retrieved the same in the main thread. The next progression would be convert this to thread pool and sub-classing.
import threading
import queue
from sklearn.manifold import TSNE
import os
import time
def write_tsne_op(opdata,fname,header):
with open(fname, 'w') as outfile:
outfile.write(header)
for data_slice in opdata:
np.savetxt(outfile, data_slice,delimiter=",")
def task1(ip_matrix,qmdlvalues,qmdlparam,plex,exag,qmdltime,qmdlhrfn,hderfname):
string=""
start=0
end=0
mdltiming=0
start = time.time()
tmpmdl1 = TSNE(perplexity=plex,early_exaggeration=exag, n_components=2,random_state=0,verbose=1)
qmdlvalues.put(tmpmdl1.fit_transform(ip_matrix))
string = str(plex)+ "$" + str(exag)
qmdlparam.put(string)
qmdlhrfn.put(hderfname)
end = time.time()
mdltimig = end - start
print(str(mdltiming)+"time")
qmdltime.put(mdltiming)
def task2(ip_matrix,qmdlvalues,qmdlparam,plex,exag,qmdltime,qmdlhrfn,hderfname):
string=""
start=0
end=0
mdltiming=0
start = time.time()
tmpmdl2 = TSNE(perplexity=plex,early_exaggeration=exag, n_components=2,random_state=0,verbose=1)
qmdlvalues.put(tmpmdl2.fit_transform(ip_matrix))
string = str(plex)+ "$" + str(exag)
qmdlparam.put(string)
qmdlhrfn.put(hderfname)
end = time.time()
mdltimig = end - start
qmdltime.put(mdltiming)
def task3(ip_matrix,qmdlvalues,qmdlparam,plex,exag,qmdltime,qmdlhrfn,hderfname):
string=""
start=0
end=0
mdltiming=0
start = time.time()
tmpmdl3 = TSNE(perplexity=plex,early_exaggeration=exag, n_components=2,random_state=0,verbose=1)
qmdlvalues.put(tmpmdl3.fit_transform(ip_matrix))
string = str(plex)+ "$" + str(exag)
qmdlparam.put(string)
qmdlhrfn.put(hderfname)
end = time.time()
mdltimig = end - start
qmdltime.put(mdltiming)
def task4(ip_matrix,qmdlvalues,qmdlparam,plex,exag,qmdltime,qmdlhrfn,hderfname):
string=""
start=0
end=0
mdltiming=0
start = time.time()
tmpmdl4 = TSNE(perplexity=plex,early_exaggeration=exag, n_components=2,random_state=0,verbose=1)
qmdlvalues.put(tmpmdl4.fit_transform(ip_matrix))
string = str(plex)+ "$" + str(exag)
qmdlparam.put(string)
qmdlhrfn.put(hderfname)
end = time.time()
mdltimig = end - start
qmdltime.put(mdltiming)
if __name__ == "__main__":
# print ID of current process
print("ID of process running main program: {}".format(os.getpid()))
# print name of main thread
print("Main thread name: {}".format(threading.main_thread().name))
dense_mx2 = dense_mx
dense_mx3 = dense_mx
dense_mx4 = dense_mx
qmdlvl = queue.Queue()
qmdlch = queue.Queue()
qmdltme = queue.Queue()
qmdlhdrfname = queue.Queue()
perplex = 200
# creating threads
exag=10
t1 = threading.Thread(target=task1,args=(dense_mx,qmdlvl,qmdlch,perplex,exag,qmdltme,qmdlhdrfname,"#perplex: 200 early exag: 10 timing:$_plex200_exag10.csv"), name='t1')
exag=30
t2 = threading.Thread(target=task2,args=(dense_mx2,qmdlvl,qmdlch,perplex,exag,qmdltme,qmdlhdrfname,"#perplex: 200 early exag: 30 timing:$_plex200_exag30.cv"), name='t2')
exag=50
t3 = threading.Thread(target=task3,args=(dense_mx3,qmdlvl,qmdlch,perplex,exag,qmdltme,qmdlhdrfname,"#perplex: 200 early exag: 50 timing:$_plex200_exag50.csv"), name='t3')
exag=100
t4 = threading.Thread(target=task4,args=(dense_mx4,qmdlvl,qmdlch,perplex,exag,qmdltme,qmdlhdrfname,"#perplex: 200 early exag: 100 timing:$_plex200_exag100.cv"), name='t4')
# starting threads
t1.start()
t2.start()
t3.start()
t4.start()
# wait until all threads finish
t1.join()
t2.join()
t3.join()
t4.join()
while True:
if qmdlvl.empty():
print("Queue closed. Exiting thread.")
break
try:
item1 = qmdlvl.get(timeout=.5)
item2 = qmdlch.get(timeout=.5)
item3 = qmdltme.get(timeout=.5)
header,fname = qmdlhdrfname.get(timeout=.5).split('$')
except:
continue
write_tsne_op(item1,fname,header)

How to use multithreading for LCD output on the raspberry pi

Writing to the 16x2 LCD display on the raspberryp pi can take some time to finish, especially with the module I wrote that automatically scrolls text that exceeds the length of the display.
I need to use multithreading, or something similar, to send the output to the display and continue with the rest of the program. I've tried a couple things with multithreading, but haven't quite got it.
This is the working code without any multithreading. The method I want to be multithreaded is "TextToLCD.ProcessFrameBuffer".
piBell.py
#!/usr/bin/env python3
import time
import rekognition
import TextToLCD
import PiPhoto
import json
import logging
import re
import threading
from queue import Queue
logFormatter = logging.Formatter("%(asctime)s [%(name)-8.8s]/[%(funcName)-12.12s] [%(levelname)-5.5s] %(message)s")
rootLogger = logging.getLogger('piBell')
fileHandler = logging.FileHandler("{0}/{1}.log".format("./", "piBell"), 'a')
fileHandler.setFormatter(logFormatter)
rootLogger.addHandler(fileHandler)
consoleHandler = logging.StreamHandler()
consoleHandler.setFormatter(logFormatter)
rootLogger.addHandler(consoleHandler)
reFace = re.compile('face|head|selfie|portrait|person', re.IGNORECASE)
def main(debugMode='INFO'):
TextToLCD.Clear()
rootLogger.setLevel(debugMode)
imgRotation = 270
imgPath = './'
imgName = 'image.jpg'
TextToLCD.ProcessFrameBuffer(["Scanning:", "................."], debugMode)
PiPhoto.GetPhoto(imgPath + imgName, imgRotation, "INFO")
rootLogger.info("Sending image to rekognition.")
TextToLCD.ProcessFrameBuffer(["Processing","................."], debugMode)
jsonLabels = rekognition.get_labels(imgPath + imgName)
rootLogger.info("Obtained JSON payload from rekognition.")
rootLogger.debug(json.dumps(jsonLabels))
if len(json.dumps(jsonLabels)) > 0:
if IsFace(jsonLabels):
if TestFace(imgPath + imgName):
TextToLCD.ProcessFrameBuffer(['Hello', ' :)'], debugMode)
celeb = IsCelebrity(imgPath + imgName)
if celeb:
TextToLCD.ProcessFrameBuffer(["You look like:", celeb], debugMode)
else:
rootLogger.info("No face detected.")
TextToLCD.ProcessFrameBuffer(['No face detected', ' :('], debugMode)
else:
rootLogger.info("No face detected.")
TextToLCD.ProcessFrameBuffer(['No face detected', ' :('], debugMode)
else:
rootLogger.error("JSON payload from rekognition was empty.")
def IsFace(jsonPayload):
for value in jsonPayload:
rootLogger.info("Label: " + value['Name'] + ", Confidence: " + str(round(value['Confidence'])))
rootLogger.debug(json.dumps(jsonPayload))
if reFace.match(value['Name']) and round(value['Confidence']) > 75:
rootLogger.info("Possible face match.")
return True
return False
def TestFace(img):
jsonFaces = rekognition.get_faces(img)
rootLogger.debug(json.dumps(jsonFaces))
if len(json.dumps(jsonFaces)) > 2:
for item in jsonFaces:
if item['Confidence']:
if item['Confidence'] > 75:
rootLogger.info("Face detected. Confidence: " + str(round(item['Confidence'])))
return True
else:
rootLogger.info("No facial data obtained.")
return False
def IsCelebrity(img):
celebMatchAccuracy = 25
jsonCelbFaces = rekognition.get_celebrities(img)
rootLogger.debug(json.dumps(jsonCelbFaces))
if len(json.dumps(jsonCelbFaces)) > 2:
for item in jsonCelbFaces:
if item['MatchConfidence']:
if item['MatchConfidence'] > celebMatchAccuracy and item['Name']:
rootLogger.info("Celebirity match detected: " + item['Name'] + ", Confidence: " + str(round(item['MatchConfidence'])))
return item['Name']
else:
rootLogger.info("No celebirity match found.")
return False
if __name__ == "__main__":
main('INFO')
First of all, it would be nice to see your LCD functions.
You are using hardware, so hardware is a limited resource.
For this you will need some kind of access control, this could be implemented with a Lock Object or Event Object.
You have two choices when you used the display:
Run the current writing to the end
Interrupt the current writing
import threading
import time
def parallelWithLock(lock:threading.Lock, name:str):
with lock:
for i in range(5):
print(f"{name}: {i}")
time.sleep(0.5)
# doWantYouWant(...)
def parallelWithInterrupt(event:threading.Event,lock:threading.Lock,name:str):
event.set()
i = 0
with lock:
event.clear()
while True:# or writing
if event.isSet():
print(f"{name} Interrupted!")
break
print(f"{name}: {i}")
time.sleep(1)
i += 1
#doWantYouWant(...)
if __name__ == '__main__':
lock = threading.Lock()
t1 = threading.Thread(target=parallelWithLock,args=(lock,"Thread_1"))
t2 = threading.Thread(target=parallelWithLock,args=(lock,"Thread_2"))
t1.start()
t2.start()
t1.join()
t2.join()
event = threading.Event()
lock = threading.Lock()
t3 = threading.Thread(target=parallelWithInterrupt,args=(event,lock,"Thread_3"))
t4 = threading.Thread(target=parallelWithInterrupt,args=(event,lock,"Thread_4"))
t5 = threading.Thread(target=parallelWithInterrupt, args=(event,lock, "Thread_4"))
t3.start()
time.sleep(5)
t4.start()
time.sleep(3)
t5.start()
t3.join()
t4.join()
time.sleep(2)
event.set()
t5.join()

Do locks block the access to an entire array/list/dict in python?

I have 4 threads which call a common function with different arguments each.
But all 4 of these threads make changes to the same list.
They may or may not be accessing the same index/key in the list.
Do I need to implements lock? and how do I proceed?
Here is my code:
import itertools
from threading import Thread
winr = {}
two = {}
winr=[{},{},{},{},{}]
def openfile():
for i in range(0,5):
filename='C:\Users\Rishabh\Desktop\\'+str(i+1)+'.txt'
with open(filename) as f:
lines = f.readlines()
for x in lines:
xt=x.split(" ")
winr[i][str(xt[0])]=[str(xt[1]),str(xt[2])]
def getmatch(start,thread_id,last_match):
match_id=start*4+thread_id
while match_id<last_match:
while True:
try:
import urllib2,json
response = urllib2.urlopen('https://api.steampowered.com/IDOTA2Match_570/GetMatchDetails/V001/?match_id='+str(match_id)+'&key=C4B5D001E352AB612DCECABBFAB949B1')
except urllib2.HTTPError, err:
continue
break
r=[]
d=[]
team={}
html = response.read()
dic = json.loads(html)
if 'result' in dic and 'radiant_win' in dic['result'] and dic['result']['human_players']==10:
print match_id
for a in dic['result']['players']:
if a['player_slot']>127:
d.append(str(a['hero_id']))
else:
r.append(str(a['hero_id']))
if dic['result']['radiant_win']=='true':
team[0]=r
team[1]=d
else:
team[0]=d
team[1]=r
updatearrays(team)
else:
print "error",match_id
match_id=match_id+4
# do stuff
def updatearrays(team):
team[0].sort()
team[1].sort()
winner=[]
loser=[]
for i in range(1,6):
w=[]
r=[]
winner=list(itertools.combinations(team[0],i))
loser=list(itertools.combinations(team[1],i))
for d in winner:
w.append('.'.join(str(a) for a in d))
for d in loser:
r.append('.'.join(str(a) for a in d))
for k in w:
if k in winr[i-1]:
winr[i-1][k][0]=int(winr[i-1][k][0])+1
winr[i-1][k][1]=int(winr[i-1][k][1])+1
else:
winr[i-1][k]=[1,1]
for k in r:
if k in winr[i-1]:
winr[i-1][k][0]=int(winr[i-1][k][0])+1
else:
winr[i-1][k]=[1,0]
winner=[]
loser=[]
def updatefile():
# print winr
for i in range(0,5):
filename='C:\Users\Rishabh\Desktop\\'+str(i+1)+'.txt'
with open(filename,"w") as f:
for j in winr[i]:
tstring = str(j)+" "+str(winr[i][j][0])+" "+str(winr[i][j][1])+" \n"
f.write(tstring)
openfile()
t1 = Thread(target=getmatch, args=(0,1,1000))
t2 = Thread(target=getmatch, args=(0,2,1000))
t3 = Thread(target=getmatch, args=(0,3,1000))
t4 = Thread(target=getmatch, args=(0,4,1000))
t1.start()
t2.start()
t3.start()
t4.start()
t1.join()
t2.join()
t3.join()
t4.join()
updatefile()
The common function I was referring to is updatearray()

Only 1 Thread started in for loop

So Im trying to code a really simple Internet Download Manager Spoof with Python 2.7
It is supposed to query a files HTTP header, get the byte range and spread the download among a no.of threads(I hard-coded 2 for simplicity) according to the byte range and later join the file parts together again.
The problem is my console log tells me that only 1 thread is started.
[EDIT] The problem has been solved. Find the working code below.
Here is my source:
from __future__ import print_function
import threading
import urllib
import urllib2
import time
threads = []
# url to open
url = "http://www.sample-videos.com/video/mp4/720/big_buck_bunny_720p_1mb.mp4"
u = urllib.urlopen(url)
# define file
file_name = "test.mp4"
f = open(file_name, 'wb')
# open url and get header info
def get_file_size(url):
stream_size = u.info()['Content-Length']
end = stream_size
return end
start = 0
#get stream size
end = get_file_size(url)
# specify block size
block_sz = 512
#algo to divide work among 2 threads
def calculate_no_of_bytes_for_thread1():
full_stream_size = end
first_thread = {'start':0, 'end':(int(full_stream_size)/2)}
print(first_thread)
return first_thread
#algo to divide work among 2 threads
def calculate_no_of_bytes_for_thread2():
full_stream_size = end
second_thread= {'start':int(full_stream_size)/2,'end': int(full_stream_size)}
print(second_thread)
return second_thread
# download function
def download_thread(url ,id,start,end):
current_size = int(float(start)/1024)
total_size = int(float(end)/1024)
print ("Start at_"+str(current_size) + "Ends at_" + str(total_size))
# specify request range and init stream
req = urllib2.Request(url)
req.headers['Range'] = 'bytes=%s-%s' % (start, end)
data = urllib2.urlopen(req)
while True:
buffer = u.read(block_sz)
if not buffer:
break
start += len(buffer)
f.write(buffer)
thread_id = id
#percentage = (current_size * 100 / total_size)
status = str(thread_id) + "_" + str(current_size) + "_" +str(total_size)
print (status)
#starts 2 threads
def start_threads():
for i in range(2):
#if first loop, start thread 1
if(i==1):
start = calculate_no_of_bytes_for_thread1().get('start')
end = calculate_no_of_bytes_for_thread1().get('end')
print("Thread 1 started")
t = threading.Thread(target=download_thread, args=(url,i,start,end))
t.start()
threads.append( t)
#if second loop, start thread 1
if(i==2):
start = calculate_no_of_bytes_for_thread2().get('start')
end = calculate_no_of_bytes_for_thread2().get('end')
print("Thread 2 started")
t = threading.Thread(target=download_thread, args=(url,i,start,end))
t.start()
threads.append( t)
# Join threads back (order doesn't matter, you just want them all)
for i in threads:
i.join()
#start benchmarking
start_time = time.clock()
start_threads()
print ("Finito!")
end_time = time.clock()
benchmark = str(end_time - start_time)
print ("Download took_" +benchmark)
f.close()
And the output:
{'start': 0, 'end': 527868}
{'start': 0, 'end': 527868}
Thread 1 started
Start at_0Ends at_515
1_0_515
1_0_515
Finito!
Download took_6.97844422658
Working code:
from __future__ import print_function
import threading
import urllib
import urllib2
import time
threads = []
parts = {}
# url to open
url = "http://www.sample-videos.com/audio/mp3/india-national-anthem.mp3"
u = urllib.urlopen(url)
# define file
file_name = "test.mp3"
f = open(file_name, 'wb')
# open url and get header info
def get_file_size(url):
stream_size = u.info()['Content-Length']
file_size = stream_size
return file_size
start = 0
#get stream size
end = get_file_size(url)
# specify block size
block_sz = 512
#algo to divide work among 2 threads
def calculate_no_of_bytes_for_thread1():
full_stream_size = end
first_thread = {'start':0, 'end':(int(full_stream_size)/2)}
print(first_thread)
return first_thread
#algo to divide work among 2 threads
def calculate_no_of_bytes_for_thread2():
full_stream_size = end
second_thread= {'start':int(full_stream_size)/2,'end': int(full_stream_size)}
print(second_thread)
return second_thread
# download function
def download_thread(url ,id,start,end):
current_size = int(float(start)/1024)
total_size = int(float(end)/1024)
print ("Start at_"+str(current_size) + "Ends at_" + str(total_size))
# specify request range and init stream
req = urllib2.Request(url)
req.headers['Range'] = 'bytes=%s-%s' % (start, end)
while True:
buffer = u.read(block_sz)
if not buffer:
break
start += len(buffer)
f.write(buffer)
thread_id = id
status = "Thread ID_" +str(thread_id) + "Downloaded_" + str(int(start/1024)) + "Total_" +str(total_size)
print (status)
#starts 2 threads
def start_threads():
for i in range(2):
#if first loop, start thread 1
if(i==0):
start = calculate_no_of_bytes_for_thread1().get('start')
end = calculate_no_of_bytes_for_thread1().get('end')
print("Thread 1 started")
t = threading.Thread(target=download_thread, args=(url,i,start,end))
t.start()
threads.append( t)
#if second loop, start thread 2
if(i==1):
start = calculate_no_of_bytes_for_thread2().get('start')
end = calculate_no_of_bytes_for_thread2().get('end')
print("Thread 2 started")
t = threading.Thread(target=download_thread, args=(url,i,start,end))
t.start()
threads.append( t)
# Join threads back (order doesn't matter, you just want them all)
for i in threads:
i.join()
# Sort parts and you're done
# result = ''
# for i in range(2):
# result += parts[i*block_sz]
#start benchmarking
start_time = time.clock()
start_threads()
print ("Finito!")
end_time = time.clock()
benchmark = str(end_time - start_time)
print ("Download took_" +benchmark)
f.close()
You have:
for i in range(2):
if(i==1):
...
if(i==2):
...
But range(2) iterates over [0,1] not [1,2].
Save some trouble and just remove those 3 lines. The code to start the two threads can just run serially.

Categories

Resources