I have a Python 3 script which has the following code:
from subprocess import check_output, Popen, CalledProcessError
from os import getenv, mkdir, path
from time import sleep, time
from threading import Thread
def pgrep(proc_name):
try:
check_output(["pgrep", proc_name])
return True
except CalledProcessError:
return False
def watch_dog(proc_name, polling_rate, CONFIG_DIR):
print("Thread " + proc_name + " Started")
start_time = 0
end_time = 0
total_time = 0
while True:
if (pgrep(proc_name) and (start_time == 0)):
start_time = time()
print("TIMING " + proc_name)
elif ((not pgrep(proc_name)) and (start_time != 0)):
end_time = time()
total_time = str(end_time - start_time)
print("Done timing " + proc_name)
print("TIME: " + total_time)
try:
with open(CONFIG_DIR + proc_name + "-time.log", "w+") as log_file:
log_file.write(total_time)
log_file.write("\n")
log_file.flush()
except:
print("CANNOT WRITE TO FILE")
start_time = 0
end_time = 0
total_time = 0
sleep(polling_rate)
for each in processes:
globals()[each] = Thread(target=watch_dog, args=(each, polling_rate, CONFIG_DIR,))
globals()[each].start()
Everything works perfectly, except that I cannot write to any files from the watch_dog function. I have done some research, and most things are pointing towards needing a separate thread to write to the file others are wanting to write to. However, all these examples I find are for specifically multiple threads writing to one file. I, however, want one file per thread.
Is there some way to circumvent this issue or do I have to have a thread for writing to each separate file?
Discovered what my problem was. On this line:
with open(CONFIG_DIR + proc_name + "-time.log", "w+") as log_file:
CONFIG_DIR does not end with a forward slash ("/") and so the files WERE being created, just in the parent directory and with a name I did not expect. it was a simple fix:
with open(CONFIG_DIR + "/" + proc_name + "-time.log", "w+") as log_file:
Moral of the story: check your file paths.
Related
After repeated ip scans the program crashes. The threads are not killed and I don't know how to to this. Any idea?
import time
import datetime
import subprocess,os,threading
from queue import Queue
ipbase = "192.168.101.{0}"
startadd = 20
stoppadd = 100
def ipscan(): #looking for available IP adresses
lock=threading.Lock()
_start=time.time()
def check(n):
with open(os.devnull, "wb") as limbo:
ip=ipbase.format(n)
result=subprocess.Popen(["ping", "-n", "2", "-w", "300", ip],stdout=limbo, stderr=limbo).wait(timeout=10)
with lock:
if not result:
print (ip, "active")
else:
pass
def threader():
while True:
worker=q.get()
check(worker)
q.task_done()
print("Scan IP...")
print("Address scan from " + ipbase + str(startadd) + " until " + ipbase + str(stoppadd))
q=Queue()
for _ in range(startadd,stoppadd):
t=threading.Thread(target=threader)
t.daemon=True
t.start()
for worker in range(startadd,stoppadd):
q.put(worker)
q.join()
if __name__ == "__main__":
starttime = datetime.datetime.now()
print (starttime)
print("first check of available ip adresses..")
ipscan() #looking for available IP adresses
cyclebegin = datetime.datetime.now()
acttime = datetime.datetime.now()
sampletime = 3
while (1):
if ((acttime - cyclebegin) > datetime.timedelta(seconds=sampletime)):
dtime = acttime - cyclebegin
print ("delta-seconds: ",dtime.seconds)
cyclebegin = datetime.datetime.now()
ipscan()
acttime = datetime.datetime.now()
After ipscan is finished with q.join() the tasks should be finished but not killed, as I understood. With the repeated call of ipscan the limit of the threads is exeeded. What do I have to modify to prevent this?
I am trying to limit the CPU usage of one python script using ulimit -t. The script contains one time.sleep() statement and it is not killed after the specified time limit. Here is the simplified python script named test.py:
import time
while True:
time.sleep(0.1)
and I run the command as following:
ulimit -v 400000; ulimit -t 30; python test.py
The script keeps running forever. Are there any explanations for this? Thanks.
The answer of mata is correct. I updated my real code which contains a hidden thing that ulimit -t does not count the running time of spawned subprocesses.
#!/usr/bin/env python
# Run: python smt.py filename.smt2 timeout
# timeout is in seconds
import os
import subprocess
import sys
import stat
import time
current_path = os.path.dirname(os.path.realpath(__file__))
def remove_tmp (filename, version):
try:
os.remove(filename + '.' + version + '.tmp')
except OSError:
pass
try:
os.remove(os.path.splitext(filename)[0] + '.' + version + '.out')
except OSError:
pass
try:
os.remove(os.path.splitext(filename)[0] + '.' + version + '.in')
except OSError:
pass
def run_raSAT (filename, bounds, sbox, timeout):
startTime = time.time()
raSATResult = "unknown"
# remove tmps files:
remove_tmp(filename, "0.2")
remove_tmp(filename, "0.3")
proc2 = subprocess.Popen([os.path.join(current_path, "./raSAT-0.2"), filename, bounds, 'sbox=' + str(sbox), 'tout=' + str(timeout-(time.time() - startTime))])
proc3 = subprocess.Popen([os.path.join(current_path, "./raSAT-0.3"), filename, bounds])
while True:
if proc2.poll():
# try read output of 0.2
try:
with open(filename + '.0.2.tmp', 'r') as outfile:
raSATResult = outfile.read().rstrip()
outfile.close()
if raSATResult == "unknown":
sbox /= 10
remove_tmp(filename, "0.2")
proc2 = subprocess.Popen([os.path.join(current_path, "./raSAT-0.2"), filename, bounds, 'sbox=' + str(sbox), 'tout=' + str(timeout-(time.time() - startTime))])
except IOError:
pass
if proc3.poll():
# try read output of 0.3
try:
with open(filename + '.0.3.tmp', 'r') as outfile:
raSATResult = outfile.read().rstrip()
outfile.close()
except IOError:
pass
if raSATResult == "sat" or raSATResult == "unsat":
if not proc3.poll():
proc3.kill()
if not proc2.poll():
proc2.kill()
break
time.sleep(0.01)
return raSATResult, sbox
def run(filename, initLowerBound, initUpperBound, sbox, timeout):
lowerBound = initLowerBound
upperBound = initUpperBound
raSATResult = "unknown"
startTime = time.time()
while (raSATResult == 'unknown'):
(raSATResult, sbox) = run_raSAT(filename, 'lb=' + str(lowerBound) + ' ' + str(upperBound), sbox, timeout - (time.time() - startTime))
if raSATResult == 'unsat':
(raSATResult, sbox) = run_raSAT(filename, 'lb=-inf inf', sbox, timeout - (time.time() - startTime))
print (raSATResult)
# remove tmps files:
remove_tmp(filename, "0.2")
remove_tmp(filename, "0.3")
# get timeout from environment
timeout = float(os.environ.get('STAREXEC_CPU_LIMIT'))
run(sys.argv[1], -10, 10, 0.1, timeout)
ulimit -t sets the CPU time limit. While your program is sleeping it doesn't use any CPU time, so that time doesn't count. It will only occupy a few CPU cycles to go to sleep again, that's why it's not killed.
You can't specify a real time limit using ulimit.
I'm trying to implement basic multiprocessing and I've run into an issue. The python script is attached below.
import time, sys, random, threading
from multiprocessing import Process
from Queue import Queue
from FrequencyAnalysis import FrequencyStore, AnalyzeFrequency
append_queue = Queue(10)
database = FrequencyStore()
def add_to_append_queue(_list):
append_queue.put(_list)
def process_append_queue():
while True:
item = append_queue.get()
database.append(item)
print("Appended to database in %.4f seconds" % database.append_time)
append_queue.task_done()
return
def main():
database.load_db()
print("Database loaded in %.4f seconds" % database.load_time)
append_queue_process = Process(target=process_append_queue)
append_queue_process.daemon = True
append_queue_process.start()
#t = threading.Thread(target=process_append_queue)
#t.daemon = True
#t.start()
while True:
path = raw_input("file: ")
if path == "exit":
break
a = AnalyzeFrequency(path)
a.analyze()
print("Analyzed file in %.4f seconds" % a._time)
add_to_append_queue(a.get_results())
append_queue.join()
#append_queue_process.join()
database.save_db()
print("Database saved in %.4f seconds" % database.save_time)
sys.exit(0)
if __name__=="__main__":
main()
The AnalyzeFrequency analyzes the frequencies of words in a file and get_results() returns a sorted list of said words and frequencies. The list is very large, perhaps 10000 items.
This list is then passed to the add_to_append_queue method which adds it to a queue. The process_append_queue takes the items one by one and adds the frequencies to a "database". This operation takes a bit longer than the actual analysis in main() so I am trying to use a seperate process for this method. When I try and do this with the threading module, everything works perfectly fine, no errors. When I try and use Process, the script hangs at item = append_queue.get().
Could someone please explain what is happening here, and perhaps direct me toward a fix?
All answers appreciated!
UPDATE
The pickle error was my fault, it was just a typo. Now I am using the Queue class within multiprocessing but the append_queue.get() method still hangs.
NEW CODE
import time, sys, random
from multiprocessing import Process, Queue
from FrequencyAnalysis import FrequencyStore, AnalyzeFrequency
append_queue = Queue()
database = FrequencyStore()
def add_to_append_queue(_list):
append_queue.put(_list)
def process_append_queue():
while True:
database.append(append_queue.get())
print("Appended to database in %.4f seconds" % database.append_time)
return
def main():
database.load_db()
print("Database loaded in %.4f seconds" % database.load_time)
append_queue_process = Process(target=process_append_queue)
append_queue_process.daemon = True
append_queue_process.start()
#t = threading.Thread(target=process_append_queue)
#t.daemon = True
#t.start()
while True:
path = raw_input("file: ")
if path == "exit":
break
a = AnalyzeFrequency(path)
a.analyze()
print("Analyzed file in %.4f seconds" % a._time)
add_to_append_queue(a.get_results())
#append_queue.join()
#append_queue_process.join()
print str(append_queue.qsize())
database.save_db()
print("Database saved in %.4f seconds" % database.save_time)
sys.exit(0)
if __name__=="__main__":
main()
UPDATE 2
This is the database code:
class FrequencyStore:
def __init__(self):
self.sorter = Sorter()
self.db = {}
self.load_time = -1
self.save_time = -1
self.append_time = -1
self.sort_time = -1
def load_db(self):
start_time = time.time()
try:
file = open("results.txt", 'r')
except:
raise IOError
self.db = {}
for line in file:
word, count = line.strip("\n").split("=")
self.db[word] = int(count)
file.close()
self.load_time = time.time() - start_time
def save_db(self):
start_time = time.time()
_db = []
for key in self.db:
_db.append([key, self.db[key]])
_db = self.sort(_db)
try:
file = open("results.txt", 'w')
except:
raise IOError
file.truncate(0)
for x in _db:
file.write(x[0] + "=" + str(x[1]) + "\n")
file.close()
self.save_time = time.time() - start_time
def create_sorted_db(self):
_temp_db = []
for key in self.db:
_temp_db.append([key, self.db[key]])
_temp_db = self.sort(_temp_db)
_temp_db.reverse()
return _temp_db
def get_db(self):
return self.db
def sort(self, _list):
start_time = time.time()
_list = self.sorter.mergesort(_list)
_list.reverse()
self.sort_time = time.time() - start_time
return _list
def append(self, _list):
start_time = time.time()
for x in _list:
if x[0] not in self.db:
self.db[x[0]] = x[1]
else:
self.db[x[0]] += x[1]
self.append_time = time.time() - start_time
Comments suggest you're trying to run this on Windows. As I said in a comment,
If you're running this on Windows, it can't work - Windows doesn't
have fork(), so each process gets its own Queue and they have nothing
to do with each other. The entire module is imported "from scratch" by
each process on Windows. You'll need to create the Queue in main(),
and pass it as an argument to the worker function.
Here's fleshing out what you need to do to make it portable, although I removed all the database stuff because it's irrelevant to the problems you've described so far. I also removed the daemon fiddling, because that's usually just a lazy way to avoid shutting down things cleanly, and often as not will come back to bite you later:
def process_append_queue(append_queue):
while True:
x = append_queue.get()
if x is None:
break
print("processed %d" % x)
print("worker done")
def main():
import multiprocessing as mp
append_queue = mp.Queue(10)
append_queue_process = mp.Process(target=process_append_queue, args=(append_queue,))
append_queue_process.start()
for i in range(100):
append_queue.put(i)
append_queue.put(None) # tell worker we're done
append_queue_process.join()
if __name__=="__main__":
main()
The output is the "obvious" stuff:
processed 0
processed 1
processed 2
processed 3
processed 4
...
processed 96
processed 97
processed 98
processed 99
worker done
Note: because Windows doesn't (can't) fork(), it's impossible for worker processes to inherit any Python object on Windows. Each process runs the entire program from its start. That's why your original program couldn't work: each process created its own Queue, wholly unrelated to the Queue in the other process. In the approach shown above, only the main process creates a Queue, and the main process passes it (as an argument) to the worker process.
queue.Queue is thread-safe, but doesn't work across processes. This is quite easy to fix, though. Instead of:
from multiprocessing import Process
from Queue import Queue
You want:
from multiprocessing import Process, Queue
So Im trying to code a really simple Internet Download Manager Spoof with Python 2.7
It is supposed to query a files HTTP header, get the byte range and spread the download among a no.of threads(I hard-coded 2 for simplicity) according to the byte range and later join the file parts together again.
The problem is my console log tells me that only 1 thread is started.
[EDIT] The problem has been solved. Find the working code below.
Here is my source:
from __future__ import print_function
import threading
import urllib
import urllib2
import time
threads = []
# url to open
url = "http://www.sample-videos.com/video/mp4/720/big_buck_bunny_720p_1mb.mp4"
u = urllib.urlopen(url)
# define file
file_name = "test.mp4"
f = open(file_name, 'wb')
# open url and get header info
def get_file_size(url):
stream_size = u.info()['Content-Length']
end = stream_size
return end
start = 0
#get stream size
end = get_file_size(url)
# specify block size
block_sz = 512
#algo to divide work among 2 threads
def calculate_no_of_bytes_for_thread1():
full_stream_size = end
first_thread = {'start':0, 'end':(int(full_stream_size)/2)}
print(first_thread)
return first_thread
#algo to divide work among 2 threads
def calculate_no_of_bytes_for_thread2():
full_stream_size = end
second_thread= {'start':int(full_stream_size)/2,'end': int(full_stream_size)}
print(second_thread)
return second_thread
# download function
def download_thread(url ,id,start,end):
current_size = int(float(start)/1024)
total_size = int(float(end)/1024)
print ("Start at_"+str(current_size) + "Ends at_" + str(total_size))
# specify request range and init stream
req = urllib2.Request(url)
req.headers['Range'] = 'bytes=%s-%s' % (start, end)
data = urllib2.urlopen(req)
while True:
buffer = u.read(block_sz)
if not buffer:
break
start += len(buffer)
f.write(buffer)
thread_id = id
#percentage = (current_size * 100 / total_size)
status = str(thread_id) + "_" + str(current_size) + "_" +str(total_size)
print (status)
#starts 2 threads
def start_threads():
for i in range(2):
#if first loop, start thread 1
if(i==1):
start = calculate_no_of_bytes_for_thread1().get('start')
end = calculate_no_of_bytes_for_thread1().get('end')
print("Thread 1 started")
t = threading.Thread(target=download_thread, args=(url,i,start,end))
t.start()
threads.append( t)
#if second loop, start thread 1
if(i==2):
start = calculate_no_of_bytes_for_thread2().get('start')
end = calculate_no_of_bytes_for_thread2().get('end')
print("Thread 2 started")
t = threading.Thread(target=download_thread, args=(url,i,start,end))
t.start()
threads.append( t)
# Join threads back (order doesn't matter, you just want them all)
for i in threads:
i.join()
#start benchmarking
start_time = time.clock()
start_threads()
print ("Finito!")
end_time = time.clock()
benchmark = str(end_time - start_time)
print ("Download took_" +benchmark)
f.close()
And the output:
{'start': 0, 'end': 527868}
{'start': 0, 'end': 527868}
Thread 1 started
Start at_0Ends at_515
1_0_515
1_0_515
Finito!
Download took_6.97844422658
Working code:
from __future__ import print_function
import threading
import urllib
import urllib2
import time
threads = []
parts = {}
# url to open
url = "http://www.sample-videos.com/audio/mp3/india-national-anthem.mp3"
u = urllib.urlopen(url)
# define file
file_name = "test.mp3"
f = open(file_name, 'wb')
# open url and get header info
def get_file_size(url):
stream_size = u.info()['Content-Length']
file_size = stream_size
return file_size
start = 0
#get stream size
end = get_file_size(url)
# specify block size
block_sz = 512
#algo to divide work among 2 threads
def calculate_no_of_bytes_for_thread1():
full_stream_size = end
first_thread = {'start':0, 'end':(int(full_stream_size)/2)}
print(first_thread)
return first_thread
#algo to divide work among 2 threads
def calculate_no_of_bytes_for_thread2():
full_stream_size = end
second_thread= {'start':int(full_stream_size)/2,'end': int(full_stream_size)}
print(second_thread)
return second_thread
# download function
def download_thread(url ,id,start,end):
current_size = int(float(start)/1024)
total_size = int(float(end)/1024)
print ("Start at_"+str(current_size) + "Ends at_" + str(total_size))
# specify request range and init stream
req = urllib2.Request(url)
req.headers['Range'] = 'bytes=%s-%s' % (start, end)
while True:
buffer = u.read(block_sz)
if not buffer:
break
start += len(buffer)
f.write(buffer)
thread_id = id
status = "Thread ID_" +str(thread_id) + "Downloaded_" + str(int(start/1024)) + "Total_" +str(total_size)
print (status)
#starts 2 threads
def start_threads():
for i in range(2):
#if first loop, start thread 1
if(i==0):
start = calculate_no_of_bytes_for_thread1().get('start')
end = calculate_no_of_bytes_for_thread1().get('end')
print("Thread 1 started")
t = threading.Thread(target=download_thread, args=(url,i,start,end))
t.start()
threads.append( t)
#if second loop, start thread 2
if(i==1):
start = calculate_no_of_bytes_for_thread2().get('start')
end = calculate_no_of_bytes_for_thread2().get('end')
print("Thread 2 started")
t = threading.Thread(target=download_thread, args=(url,i,start,end))
t.start()
threads.append( t)
# Join threads back (order doesn't matter, you just want them all)
for i in threads:
i.join()
# Sort parts and you're done
# result = ''
# for i in range(2):
# result += parts[i*block_sz]
#start benchmarking
start_time = time.clock()
start_threads()
print ("Finito!")
end_time = time.clock()
benchmark = str(end_time - start_time)
print ("Download took_" +benchmark)
f.close()
You have:
for i in range(2):
if(i==1):
...
if(i==2):
...
But range(2) iterates over [0,1] not [1,2].
Save some trouble and just remove those 3 lines. The code to start the two threads can just run serially.
Recently,I'm working on a gevent demo and I try to compare the efficiency between gevent and thread. Generally speakingļ¼the gevent code should be more efficient than the thread code. But when I use time command to profile the program, I get the unusual result(my command is time python FILENAME.py 50 1000,the last two parameters means pool number or thread number,so I change the two number in the table below). The result shows that the thread is more efficient than the gevent code,so I want to know why this happen and what's wrong with my program? Thanks.
gevent VS thread
My code is below(The main idea is use thread or gevent to send multi HTTP request):
******This is the thread version code******
# _*_ coding: utf-8 _*_
import sys
reload(sys)
sys.setdefaultencoding("utf8")
import requests
import threading
import time
import urllib2
finished = 0
def GetUrl(pagenum):
url = 'http://opendata.baidu.com/zhaopin/s?p=mini&wd=%B0%D9%B6%C8&pn=' + \
str(pagenum*20) + '&rn=20'
return url
def setUrlSet():
for i in xrange(requestnum):
urlnum = i % 38
urlset.append(GetUrl(urlnum))
def GetResponse(pagenum):
try:
r = requests.get(urlset[pagenum])
except Exception, e:
print e
pass
def DigJobByPagenum(pagenum, requestnum):
init_num = pagenum
print '%d begin' % init_num
while pagenum < requestnum:
GetResponse(pagenum)
pagenum += threadnum
print '%d over' % init_num
def NormalThread(threadnum):
startime = time.time()
print "%s is running..." % threading.current_thread().name
threads = []
global finished, requestnum
for i in xrange(threadnum):
thread = threading.Thread(target=DigJobByPagenum, args=(i, requestnum))
threads.append(thread)
for t in threads:
t.daemon = True
t.start()
for t in threads:
t.join()
finished += 1
endtime = time.time()
print "%s is stop.The total time is %0.2f" % \
(threading.current_thread().name, (endtime - startime))
def GetAvageTime(array):
alltime = 0.0
for i in array:
alltime += i
avageTime = alltime/len(array)
return avageTime
if __name__ == '__main__':
threadnum = int(sys.argv[1])
requestnum = int(sys.argv[2])
print 'threadnum : %s,requestnum %s ' % (threadnum, requestnum)
originStartTime = time.time()
urlset = []
setUrlSet()
NormalThread(threadnum)
******This is the gevent verison code******
# _*_ coding: utf-8 _*_
import sys
reload(sys)
sys.setdefaultencoding("utf8")
from gevent import monkey
monkey.patch_all()
import gevent
from gevent import pool
import requests
import time
finished = 0
def GetUrl(pagenum):
url = 'http://opendata.baidu.com/zhaopin/s?p=mini&wd=%B0%D9%B6%C8&pn=' + \
str(pagenum*20) + '&rn=20'
return url
def setUrlSet():
for i in xrange(requestnum):
urlnum = i % 38
urlset.append(GetUrl(urlnum))
def GetResponse(url):
startime = time.time()
r = requests.get(url)
print url
endtime = time.time()
spendtime = endtime - startime
NormalSpendTime.append(spendtime)
global finished
finished += 1
print finished
def GetAvageTime(array):
alltime = 0.0
for i in array:
alltime += i
avageTime = alltime/len(array)
return avageTime
def RunAsyncJob():
jobpool = pool.Pool(concurrent)
for url in urlset:
jobpool.spawn(GetResponse, url)
jobpool.join()
endtime = time.time()
allSpendTime = endtime - originStartime
print 'Total spend time is %0.3f, total request num is %s within %s \
seconds' % (allSpendTime, finished, timeoutNum)
print 'Each request time is %0.3f' % (GetAvageTime(NormalSpendTime))
if __name__ == '__main__':
concurrent = int(sys.argv[1])
requestnum = int(sys.argv[2])
timeoutNum = 100
NormalSpendTime = []
urlset = []
urlActionList = []
setUrlSet()
originStartime = time.time()
RunAsyncJob()
Try
gevent.monkey.patch_all(httplib=True)
It seems that by default gevent does not patch httplib (have a look at http://www.gevent.org/gevent.monkey.html : httplib=False) so you are actually doing blocking requests and you lose all advantages of the asynchronous framework. Although I'm not sure whether requests uses httplib.
If that doesn't work, then have a look at this lib:
https://github.com/kennethreitz/grequests
Re: httplib=False
You are already using requests library to make web calls. It has gevent flavour called grequests:
https://github.com/kennethreitz/grequests
Overall I don't immediately see much reason to prefer one style of threading to the other, if your pool is so small. Of course real threads are relatively heavy (start with 8MB stack), but you have to take that into proportion to the size of your job.
My take, try both (done), verify you are doing both right (to do) and let numbers do the talking.