I am new to python.
I am trying out Hbase thrift client using thrift. I got some code on net, which I just modify to work with latest version of thrift but when I run the code , it just exit, no threads are started.
Here is the code.
import json, traceback, sys, datetime, time, logging, threading, random
import logging.handlers
import thrift
sys.path.append('gen-py')
from thrift.transport.TSocket import TSocket
from thrift.transport.TTransport import TBufferedTransport
from thrift.protocol import TBinaryProtocol
from hbase import THBaseService
gWritenItems = 0
gStartT = 0
gEndT = 0
recordsPerBatch = 300 #reports per client per day
columns = 3
#config
concurrent = 10
records = 60000#6000000 #6 million
bytesPerRecord = 1024
mylock = threading.RLock()
class writeThread(threading.Thread):
def __init__(self, threadname, RecordsThreadwillwrite):
threading.Thread.__init__(self, name = threadname)
bytesPerColumn = int(bytesPerRecord/columns) - 11 #suppose 3 columns
self.columnvalue = "value_" + "x"*bytesPerColumn + "_endv"
self.tbwBatch = int (RecordsThreadwillwrite / recordsPerBatch)
self.transport = TBufferedTransport(TSocket('pnq-adongrevm1', 5151), 40960)
self.transport.open()
protocol = TBinaryProtocol.TBinaryProtocol(self.transport)
self.client = THBaseService.Client(protocol)
self.table = "example"
def run(self):
print "+%s start" % (self.getName())
global gEndT
global gWritenItems
threadWritenItem = 0
for loopidx in xrange(0, self.tbwBatch):
self.write_hbase() #write
threadWritenItem += recordsPerBatch
mylock.acquire()
gEndT = time.time()
gWritenItems += threadWritenItem
print "%s done, %s seconds past, %d reocrds saved" % (self.getName(), gEndT-gStartT, gWritenItems)
mylock.release()
self.transport.close()
def write_hbase(self): #write 50 rowkyes, and 3 column families in each rowkey
print self.getName(), "Start write"
batchmutations = []
for i in xrange(0, recordsPerBatch): # write to db, 300 items together
mutations = []
rowkey = "RK_%s_%s" % (random.random(), time.time())
for ii in xrange(0, columns):
mutations.append(THBaseService.TPut(row=rowkey, columnValues=[TColumnValue(family="f1", qualifier="%s"%ii, value=self.columnvalue)]))
self.client.putMultiple(self.table,mutations)
itemsPerThread = int(records / concurrent)
for threadid in xrange(0, concurrent):
gStartT = time.time()
t = writeThread("Thread_%s" % threadid, itemsPerThread)
t.start();
print "%d thread created, each thread will write %d records" % (concurrent, itemsPerThread)
I just get a message 10 thread created, each thread will write 6000 records
Yep, this is because you are not waiting for threads to finish their job, so the main thread just exits. Try this:
itemsPerThread = int(records / concurrent)
threads = []
for threadid in xrange(0, concurrent):
gStartT = time.time()
t = writeThread("Thread_%s" % threadid, itemsPerThread)
t.start();
threads.append(t)
# wait until all finish the job
for t in threads:
t.join()
EDIT Ha, I don't think I'm right here, because you didn't mark your threads as daemons. It should work even without joining. But have a look at this code:
class CustomThread(threading.Thread):
def run(self):
print "test"
for x in xrange(0, 10):
t = CustomThread()
t.start()
It will always reach print "test" line no matter what. So in your code it should always reach print "+%s start" % (self.getName()) no matter what. Are you sure it doesn't work? :)
If it doesn't, then there are only two possibilities:
There is a blocking operation and/or exception in your __init__ method. But then it would not reach final print;
concurrent variable is 0 for some reason (which is not consistent with the final print).
Related
I'm trying to implement basic multiprocessing and I've run into an issue. The python script is attached below.
import time, sys, random, threading
from multiprocessing import Process
from Queue import Queue
from FrequencyAnalysis import FrequencyStore, AnalyzeFrequency
append_queue = Queue(10)
database = FrequencyStore()
def add_to_append_queue(_list):
append_queue.put(_list)
def process_append_queue():
while True:
item = append_queue.get()
database.append(item)
print("Appended to database in %.4f seconds" % database.append_time)
append_queue.task_done()
return
def main():
database.load_db()
print("Database loaded in %.4f seconds" % database.load_time)
append_queue_process = Process(target=process_append_queue)
append_queue_process.daemon = True
append_queue_process.start()
#t = threading.Thread(target=process_append_queue)
#t.daemon = True
#t.start()
while True:
path = raw_input("file: ")
if path == "exit":
break
a = AnalyzeFrequency(path)
a.analyze()
print("Analyzed file in %.4f seconds" % a._time)
add_to_append_queue(a.get_results())
append_queue.join()
#append_queue_process.join()
database.save_db()
print("Database saved in %.4f seconds" % database.save_time)
sys.exit(0)
if __name__=="__main__":
main()
The AnalyzeFrequency analyzes the frequencies of words in a file and get_results() returns a sorted list of said words and frequencies. The list is very large, perhaps 10000 items.
This list is then passed to the add_to_append_queue method which adds it to a queue. The process_append_queue takes the items one by one and adds the frequencies to a "database". This operation takes a bit longer than the actual analysis in main() so I am trying to use a seperate process for this method. When I try and do this with the threading module, everything works perfectly fine, no errors. When I try and use Process, the script hangs at item = append_queue.get().
Could someone please explain what is happening here, and perhaps direct me toward a fix?
All answers appreciated!
UPDATE
The pickle error was my fault, it was just a typo. Now I am using the Queue class within multiprocessing but the append_queue.get() method still hangs.
NEW CODE
import time, sys, random
from multiprocessing import Process, Queue
from FrequencyAnalysis import FrequencyStore, AnalyzeFrequency
append_queue = Queue()
database = FrequencyStore()
def add_to_append_queue(_list):
append_queue.put(_list)
def process_append_queue():
while True:
database.append(append_queue.get())
print("Appended to database in %.4f seconds" % database.append_time)
return
def main():
database.load_db()
print("Database loaded in %.4f seconds" % database.load_time)
append_queue_process = Process(target=process_append_queue)
append_queue_process.daemon = True
append_queue_process.start()
#t = threading.Thread(target=process_append_queue)
#t.daemon = True
#t.start()
while True:
path = raw_input("file: ")
if path == "exit":
break
a = AnalyzeFrequency(path)
a.analyze()
print("Analyzed file in %.4f seconds" % a._time)
add_to_append_queue(a.get_results())
#append_queue.join()
#append_queue_process.join()
print str(append_queue.qsize())
database.save_db()
print("Database saved in %.4f seconds" % database.save_time)
sys.exit(0)
if __name__=="__main__":
main()
UPDATE 2
This is the database code:
class FrequencyStore:
def __init__(self):
self.sorter = Sorter()
self.db = {}
self.load_time = -1
self.save_time = -1
self.append_time = -1
self.sort_time = -1
def load_db(self):
start_time = time.time()
try:
file = open("results.txt", 'r')
except:
raise IOError
self.db = {}
for line in file:
word, count = line.strip("\n").split("=")
self.db[word] = int(count)
file.close()
self.load_time = time.time() - start_time
def save_db(self):
start_time = time.time()
_db = []
for key in self.db:
_db.append([key, self.db[key]])
_db = self.sort(_db)
try:
file = open("results.txt", 'w')
except:
raise IOError
file.truncate(0)
for x in _db:
file.write(x[0] + "=" + str(x[1]) + "\n")
file.close()
self.save_time = time.time() - start_time
def create_sorted_db(self):
_temp_db = []
for key in self.db:
_temp_db.append([key, self.db[key]])
_temp_db = self.sort(_temp_db)
_temp_db.reverse()
return _temp_db
def get_db(self):
return self.db
def sort(self, _list):
start_time = time.time()
_list = self.sorter.mergesort(_list)
_list.reverse()
self.sort_time = time.time() - start_time
return _list
def append(self, _list):
start_time = time.time()
for x in _list:
if x[0] not in self.db:
self.db[x[0]] = x[1]
else:
self.db[x[0]] += x[1]
self.append_time = time.time() - start_time
Comments suggest you're trying to run this on Windows. As I said in a comment,
If you're running this on Windows, it can't work - Windows doesn't
have fork(), so each process gets its own Queue and they have nothing
to do with each other. The entire module is imported "from scratch" by
each process on Windows. You'll need to create the Queue in main(),
and pass it as an argument to the worker function.
Here's fleshing out what you need to do to make it portable, although I removed all the database stuff because it's irrelevant to the problems you've described so far. I also removed the daemon fiddling, because that's usually just a lazy way to avoid shutting down things cleanly, and often as not will come back to bite you later:
def process_append_queue(append_queue):
while True:
x = append_queue.get()
if x is None:
break
print("processed %d" % x)
print("worker done")
def main():
import multiprocessing as mp
append_queue = mp.Queue(10)
append_queue_process = mp.Process(target=process_append_queue, args=(append_queue,))
append_queue_process.start()
for i in range(100):
append_queue.put(i)
append_queue.put(None) # tell worker we're done
append_queue_process.join()
if __name__=="__main__":
main()
The output is the "obvious" stuff:
processed 0
processed 1
processed 2
processed 3
processed 4
...
processed 96
processed 97
processed 98
processed 99
worker done
Note: because Windows doesn't (can't) fork(), it's impossible for worker processes to inherit any Python object on Windows. Each process runs the entire program from its start. That's why your original program couldn't work: each process created its own Queue, wholly unrelated to the Queue in the other process. In the approach shown above, only the main process creates a Queue, and the main process passes it (as an argument) to the worker process.
queue.Queue is thread-safe, but doesn't work across processes. This is quite easy to fix, though. Instead of:
from multiprocessing import Process
from Queue import Queue
You want:
from multiprocessing import Process, Queue
It tries to make two connections per thread now, still fails.
I think I solved the shared access thing because it uses self.x instead of local variables?
I'm not sure what the problem is :/, you don't happen to be a freelancer?
#!/usr/bin/python
from xml.etree.ElementTree import fromstring
from socks import socksocket, PROXY_TYPE_SOCKS5
from socket import socket, AF_INET, SOCK_STREAM
from linecache import getline
from threading import Thread, current_thread, Lock, activeCount
from os.path import isfile, getmtime
from urllib import urlopen
from time import time, sleep
from sys import exit
from json import loads
from random import randint, randrange, choice
from urlparse import parse_qs
from pprint import pprint
class myThread (Thread):
def __init__(self, threadID, name):
Thread.__init__(self)
self.threadID = threadID
self.name = name
def run(self):
self.user = parse_qs(getline('./_files/ids.txt', randint(1, idLen)).strip("\n"))
self.proxy = getline('./_files/proxies.txt', randint(1, proxyLen)).strip("\n").split(":")
self.user2 = parse_qs(getline('./_files/ids.txt', randint(1, idLen)).strip("\n"))
self.proxy2 = getline('./_files/proxies.txt', randint(1, proxyLen)).strip("\n").split(":")
try:
self.socket = socksocket(AF_INET, SOCK_STREAM)
self.socket.settimeout(5)
self.socket.setproxy(PROXY_TYPE_SOCKS5, self.proxy[0], int(self.proxy[1]))
self.socket2 = socksocket(AF_INET, SOCK_STREAM)
self.socket2.settimeout(5)
self.socket2.setproxy(PROXY_TYPE_SOCKS5, self.proxy2[0], int(self.proxy2[1]))
self.socket.connect((chatConnection[0], int(chatConnection[1])))
self.socket2.connect((chatConnection[0], int(chatConnection[1])))
send(self.socket, "<y r=\"%s\" v=\"0\" u=\"%s\" />\0" % (room, self.user["UserId"][0]))
send(self.socket2, "<y r=\"%s\" v=\"0\" u=\"%s\" />\0" % (room, self.user2["UserId"][0]))
self.data = read(self.socket)
self.data2 = read(self.socket2)
if self.data == "" or not self.data: return
if self.data2 == "" or not self.data2: return
self.xml = fromstring(self.data.strip(chr(0))).attrib
self.xml2 = fromstring(self.data2.strip(chr(0))).attrib
self.bSock = socket(AF_INET, SOCK_STREAM)
self.bSock.settimeout(5)
self.bSock2 = socket(AF_INET, SOCK_STREAM)
self.bSock2.settimeout(5)
self.bSock.connect(("127.0.0.1", 1337))
send(self.bSock, "<bot p=\"%s\" yi=\"%s\" au=\"%s\" />\0" % (self.xml["p"], self.xml["i"], self.xml["au"]))
self.data = read(self.bSock)
send(self.bSock, "<bot p=\"%s\" yi=\"%s\" au=\"%s\" />\0" % (self.xml2["p"], self.xml2["i"], self.xml2["au"]))
self.data2 = read(self.bSock)
self.data = self.data.replace("_lol", "")
self.l5 = self.data[self.data.find('l5="') + 4:]
self.l5 = self.l5[:self.l5.find('"')]
self.ya = self.data[self.data.find('c="') + 3:]
self.ya = self.ya[:self.ya.find('"')]
self.data2 = self.data2.replace("_lol", "")
self.l52 = self.data2[self.data2.find('l5="') + 4:]
self.l52 = self.l52[:self.l52.find('"')]
self.ya2 = self.data2[self.data2.find('c="') + 3:]
self.ya2 = self.ya2[:self.ya2.find('"')]
print self.ya2 + " : " + self.l52
self.bSock.close()
self.yaSock = socksocket(AF_INET, SOCK_STREAM)
self.yaSock.settimeout(5)
self.yaSock.setproxy(PROXY_TYPE_SOCKS5, self.proxy[0], int(self.proxy[1]))
self.yaSock.connect((chatConnection[0], int(chatConnection[1])))
self.yaSock2 = socksocket(AF_INET, SOCK_STREAM)
self.yaSock2.settimeout(5)
self.yaSock2.setproxy(PROXY_TYPE_SOCKS5, self.proxy2[0], int(self.proxy2[1]))
self.yaSock2.connect((chatConnection[0], int(chatConnection[1])))
send(self.yaSock, "<ya r=\"%s\" u=\"%s\" c=\"%s\" k=\"%s\" />\0" % (room, self.user["UserId"][0], self.ya, self.xml["k"]))
print read(self.yaSock)
self.yaSock.close()
send(self.yaSock2, "<ya r=\"%s\" u=\"%s\" c=\"%s\" k=\"%s\" />\0" % (room, self.user2["UserId"][0], self.ya2, self.xml2["k"]))
print read(self.yaSock2)
self.yaSock2.close()
self.j2 = "<j2 Y=\"2\" l5=\"" + self.l5 + "\" l4=\"1200\" l3=\"844\" l2=\"0\" cb=\"0\" q=\"1\" y=\"" + self.xml["i"] + "\" k=\"" + self.user["k1"][0] + "\" k3=\"0\" p=\"0\" c=\"" + room + "\" f=\"2\" u=\"" + self.user["UserId"][0] + "\" d0=\"0\" n=\"Zuhnny\" a=\"1\" h=\"xat sux\" v=\"0\" />\0"
self.j22 = "<j2 Y=\"2\" l5=\"" + self.l52 + "\" l4=\"1200\" l3=\"844\" l2=\"0\" cb=\"0\" q=\"1\" y=\"" + self.xml2["i"] + "\" k=\"" + self.user2["k1"][0] + "\" k3=\"0\" p=\"0\" c=\"" + room + "\" f=\"2\" u=\"" + self.user2["UserId"][0] + "\" d0=\"0\" n=\"Zuhnny\" a=\"1\" h=\"xat sux\" v=\"0\" />\0"
send(self.socket, self.j2)
send(self.socket2, self.j22)
while True:
print self.socket.recv(6096)
print self.socket2.recv(6096)
sleep(1)
send(self.socket, "<m t=\" F U C K X A T %s\" u=\"%s\" />\0" % (randint(0,5000), self.user["UserId"][0]))
send(self.socket2, "<m t=\" F U C K X A T %s\" u=\"%s\" />\0" % (randint(0,5000), self.user2["UserId"][0]))
except IOError, err: pass
except Exception, error: pass
def read(socket):
data = socket.recv(1024)
return data
def send(socket, data):
socket.sendall(data)
def getChatConnection(room):
print '\ntest\n'
if not isfile('./_files/ips.txt') or time() - getmtime('./_files/ips.txt') > 86400:
fh = open('./_files/ips.txt', 'w')
fh.write(urlopen('http://xat.com/web_gear/chat/ip2.htm?' + str(time())).read())
fh.close()
try:
fh = open('./_files/ips.txt', 'r')
iprules = loads(fh.read())
Fx = iprules[iprules["order"][0][0]]
xAddr = Fx[1][randint(0, len(Fx[1]) - 1)].split(':')
if len(xAddr) == 1: xAddr.append(10000)
if len(xAddr) == 2: xAddr.append(39)
xPort = xAddr[1] + randint(0, xAddr[2] - 1)
return (xAddr[0], 9999 + int(room) if int(room) < 8 else 10007 + (int(room) % 32))
except Exception, e:
print e
file = open("./_files/proxies.txt")
proxyLen = len(map(lambda(x): x.split(':'), file))
file2 = open("./_files/ids.txt")
idLen = len(map(lambda(x): x.split('\n'), file2))
threadLock = Lock()
threads = []
room = raw_input("Room ID to raid: ")
chatConnection = getChatConnection(room)
for x in range(1000):
threads.append(myThread(x, "Thread-" + str(x)).start())
# Wait for all threads to complete
for t in threads:
t.join()
print "Exiting Main Thread"
I have a guess at your problem. I don't think it actually is race conditions at all. I haven't read all of your code carefully, but I don't see any global or otherwise shared variables being mutated. But I do see a different problem.
You aren't buffering up your reads; you're just expecting that each bSock.recv(1024) is going to receive exactly one message. That isn't how TCP works; you may receive half of a message, or two messages, or the second half of the previous message and the first half of the next.
If you don't stress your computer or the network very hard, and your messages are all pretty small, it may (depending on the platform) work 99.9% of the time, meaning you don't notice any problem. But as soon as you stress things, it'll start to fail more often.
And you've got 400 threads, and from your old-style code (e.g., except Type, value) it looks like you may be on a system old enough that it's stuck on Python 2.5, which means you may be stressing the system very hard.
You need to fix this by receiving in a loop until you have one or more complete messages, then handling those messages, then returning to the loop, instead of handling each recv as if it were guaranteed to be exactly one complete message.
Fortunately, you're dealing with IRC, which (assuming you're not doing any DCC, etc.) has exactly one command per line, and Python has a nice wrapper around sockets that makes them look like line-buffered files. So you can do this:
bfile = bsock.makefile()
for line in bfile:
Now you know that line is guaranteed to be a complete line, even if it had to do three reads, and buffer up most of the third read until your next time through the loop.
You're doing the same thing in at least three places, so obviously you need to fix them all. Also, you need to make sure to close the socket and the file appropriately. And you need to detect when the other sides closes the socket. (The recv, or the next line, will return an empty string.)
Another possibility:
There is at least one thing all of the threads are sharing: that bsock socket. And they all do this 5 seconds after launch:
bSock.sendall("<bot p=\"%s\" au=\"%s\" yi=\"%s\" />\0" % (xml["p"], xml["au"], xml["i"]))
data = bSock.recv(1024)
What's to stop thread #42 from doing its sendall, then thread #23 doing its sendall, then thread #42 from doing its recv and getting the data intended for thread #42?
This is what's called a "critical section" or "atomic block": a chunk of code that only one thread can run at a time or everyone will get confused. The usual way around it is to share a Lock, and have each thread acquire the Lock before running this code. If thread #42 already has the lock, and thread #23 tries to acquire it, it will be blocked until thread #42 releases the lock, so there's no chance of them conflicting. So:
bSockLock = threading.Lock()
# ...
for x in range(400):
Thread(target = __init__, args=[chatConnection, bSock, bSockLock]).start()
# ...
def __init__(chatConnection, bSock):
# ...
for x in range(3):
start(chatConnection, proxies[x][0], proxies[x][1], [ids[x]["UserId"][0], ids[x]["k1"][0], ids[x]["k2"][0]], room, bSock, bSockLock)
# ...
def start(chatConnection, proxyIP, proxyPort, user, room, bSock, bSockLock):
# ...
with bSockLock:
bSock.sendall("<bot p=\"%s\" au=\"%s\" yi=\"%s\" />\0" % (xml["p"], xml["au"], xml["i"]))
data = bSock.recv(1024)
Recently,I'm working on a gevent demo and I try to compare the efficiency between gevent and thread. Generally speakingļ¼the gevent code should be more efficient than the thread code. But when I use time command to profile the program, I get the unusual result(my command is time python FILENAME.py 50 1000,the last two parameters means pool number or thread number,so I change the two number in the table below). The result shows that the thread is more efficient than the gevent code,so I want to know why this happen and what's wrong with my program? Thanks.
gevent VS thread
My code is below(The main idea is use thread or gevent to send multi HTTP request):
******This is the thread version code******
# _*_ coding: utf-8 _*_
import sys
reload(sys)
sys.setdefaultencoding("utf8")
import requests
import threading
import time
import urllib2
finished = 0
def GetUrl(pagenum):
url = 'http://opendata.baidu.com/zhaopin/s?p=mini&wd=%B0%D9%B6%C8&pn=' + \
str(pagenum*20) + '&rn=20'
return url
def setUrlSet():
for i in xrange(requestnum):
urlnum = i % 38
urlset.append(GetUrl(urlnum))
def GetResponse(pagenum):
try:
r = requests.get(urlset[pagenum])
except Exception, e:
print e
pass
def DigJobByPagenum(pagenum, requestnum):
init_num = pagenum
print '%d begin' % init_num
while pagenum < requestnum:
GetResponse(pagenum)
pagenum += threadnum
print '%d over' % init_num
def NormalThread(threadnum):
startime = time.time()
print "%s is running..." % threading.current_thread().name
threads = []
global finished, requestnum
for i in xrange(threadnum):
thread = threading.Thread(target=DigJobByPagenum, args=(i, requestnum))
threads.append(thread)
for t in threads:
t.daemon = True
t.start()
for t in threads:
t.join()
finished += 1
endtime = time.time()
print "%s is stop.The total time is %0.2f" % \
(threading.current_thread().name, (endtime - startime))
def GetAvageTime(array):
alltime = 0.0
for i in array:
alltime += i
avageTime = alltime/len(array)
return avageTime
if __name__ == '__main__':
threadnum = int(sys.argv[1])
requestnum = int(sys.argv[2])
print 'threadnum : %s,requestnum %s ' % (threadnum, requestnum)
originStartTime = time.time()
urlset = []
setUrlSet()
NormalThread(threadnum)
******This is the gevent verison code******
# _*_ coding: utf-8 _*_
import sys
reload(sys)
sys.setdefaultencoding("utf8")
from gevent import monkey
monkey.patch_all()
import gevent
from gevent import pool
import requests
import time
finished = 0
def GetUrl(pagenum):
url = 'http://opendata.baidu.com/zhaopin/s?p=mini&wd=%B0%D9%B6%C8&pn=' + \
str(pagenum*20) + '&rn=20'
return url
def setUrlSet():
for i in xrange(requestnum):
urlnum = i % 38
urlset.append(GetUrl(urlnum))
def GetResponse(url):
startime = time.time()
r = requests.get(url)
print url
endtime = time.time()
spendtime = endtime - startime
NormalSpendTime.append(spendtime)
global finished
finished += 1
print finished
def GetAvageTime(array):
alltime = 0.0
for i in array:
alltime += i
avageTime = alltime/len(array)
return avageTime
def RunAsyncJob():
jobpool = pool.Pool(concurrent)
for url in urlset:
jobpool.spawn(GetResponse, url)
jobpool.join()
endtime = time.time()
allSpendTime = endtime - originStartime
print 'Total spend time is %0.3f, total request num is %s within %s \
seconds' % (allSpendTime, finished, timeoutNum)
print 'Each request time is %0.3f' % (GetAvageTime(NormalSpendTime))
if __name__ == '__main__':
concurrent = int(sys.argv[1])
requestnum = int(sys.argv[2])
timeoutNum = 100
NormalSpendTime = []
urlset = []
urlActionList = []
setUrlSet()
originStartime = time.time()
RunAsyncJob()
Try
gevent.monkey.patch_all(httplib=True)
It seems that by default gevent does not patch httplib (have a look at http://www.gevent.org/gevent.monkey.html : httplib=False) so you are actually doing blocking requests and you lose all advantages of the asynchronous framework. Although I'm not sure whether requests uses httplib.
If that doesn't work, then have a look at this lib:
https://github.com/kennethreitz/grequests
Re: httplib=False
You are already using requests library to make web calls. It has gevent flavour called grequests:
https://github.com/kennethreitz/grequests
Overall I don't immediately see much reason to prefer one style of threading to the other, if your pool is so small. Of course real threads are relatively heavy (start with 8MB stack), but you have to take that into proportion to the size of your job.
My take, try both (done), verify you are doing both right (to do) and let numbers do the talking.
The purpose of my program is to download files with threads. I define the unit, and using len/unit threads, the len is the length of the file which is going to be downloaded.
Using my program, the file can be downloaded, but the threads are not stopping. I can't find the reason why.
This is my code...
#! /usr/bin/python
import urllib2
import threading
import os
from time import ctime
class MyThread(threading.Thread):
def __init__(self,func,args,name=''):
threading.Thread.__init__(self);
self.func = func;
self.args = args;
self.name = name;
def run(self):
apply(self.func,self.args);
url = 'http://ubuntuone.com/1SHQeCAQWgIjUP2945hkZF';
request = urllib2.Request(url);
response = urllib2.urlopen(request);
meta = response.info();
response.close();
unit = 1000000;
flen = int(meta.getheaders('Content-Length')[0]);
print flen;
if flen%unit == 0:
bs = flen/unit;
else :
bs = flen/unit+1;
blocks = range(bs);
cnt = {};
for i in blocks:
cnt[i]=i;
def getStr(i):
try:
print 'Thread %d start.'%(i,);
fout = open('a.zip','wb');
fout.seek(i*unit,0);
if (i+1)*unit > flen:
request.add_header('Range','bytes=%d-%d'%(i*unit,flen-1));
else :
request.add_header('Range','bytes=%d-%d'%(i*unit,(i+1)*unit-1));
#opener = urllib2.build_opener();
#buf = opener.open(request).read();
resp = urllib2.urlopen(request);
buf = resp.read();
fout.write(buf);
except BaseException:
print 'Error';
finally :
#opener.close();
fout.flush();
fout.close();
del cnt[i];
# filelen = os.path.getsize('a.zip');
print 'Thread %d ended.'%(i),
print cnt;
# print 'progress : %4.2f'%(filelen*100.0/flen,),'%';
def main():
print 'download at:',ctime();
threads = [];
for i in blocks:
t = MyThread(getStr,(blocks[i],),getStr.__name__);
threads.append(t);
for i in blocks:
threads[i].start();
for i in blocks:
# print 'this is the %d thread;'%(i,);
threads[i].join();
#print 'size:',os.path.getsize('a.zip');
print 'download done at:',ctime();
if __name__=='__main__':
main();
Could someone please help me understand why the threads aren't stopping.
I can't really address your code example because it is quite messy and hard to follow, but a potential reason you are seeing the threads not end is that a request will stall out and never finish. urllib2 allows you to specify timeouts for how long you will allow the request to take.
What I would recommend for your own code is that you split your work up into a queue, start a fixed number of thread (instead of a variable number), and let the worker threads pick up work until it is done. Make the http requests have a timeout. If the timeout expires, try again or put the work back into the queue.
Here is a generic example of how to use a queue, a fixed number of workers and a sync primitive between them:
import threading
import time
from Queue import Queue
def worker(queue, results, lock):
local_results = []
while True:
val = queue.get()
if val is None:
break
# pretend to do work
time.sleep(.1)
local_results.append(val)
with lock:
results.extend(local_results)
print threading.current_thread().name, "Done!"
num_workers = 4
threads = []
queue = Queue()
lock = threading.Lock()
results = []
for i in xrange(100):
queue.put(i)
for _ in xrange(num_workers):
# Use None as a sentinel to signal the threads to end
queue.put(None)
t = threading.Thread(target=worker, args=(queue,results,lock))
t.start()
threads.append(t)
for t in threads:
t.join()
print sorted(results)
print "All done"
I am using the multiprocessing module in python to spawn new processes, one for each year between 2000 to 2012. This was running successfully until last week. Now, the code runs fine without throwing any errors and seems to spawn new processes, but does not start them simultaneously. The CPU I am running this on uses ubuntu and has plenty of memory with 24 processors.
The processes seem to run sequentially instead of parallel. There have been no code changes in the past 3 months, so I am suspecting its an environment issue but am clueless about where to start debugging. Any suggestions?
Is it possible for some default setting of the kernel to prevent simultaneous execution of code? Some setting of python?
Code:
class ForEachPerson(multiprocessing.Process):
"""This class contains the funcs for the main processing."""
def __init__(self, year_queue, result_queue, dict_of_files, all, today):
multiprocessing.Process.__init__(self)
self.work_queue = year_queue
self.result_queue = result_queue
self.kill_received = False
self.dict = dict_of_files
self.all = all
self.today = today
def run(self):
while not self.kill_received:
try:
year = self.work_queue.get_nowait()
year_start_date = year[0]
year_end_date = year[1]
split = year_end_date.year
except Queue.Empty:
self.result_queue.close()
return
if self.all:
try:
null_pids = self.dict["null_pids"]
except KeyError:
null_pids = []
#For each employee calculate the data and write to file.
today = self.today
hie = hie_util.Build()
hie_op = open("output.csv", "wb")
hierarchy_op.write("....\n")
/* do function */
............
hierarchy_op.close()
timestr = ("%s End writing for %s"
% (str(datetime.datetime.now()), str(year)))
self.result_queue.put(timestr)
def Manage(years, dict_of_files, num_processes, all, today):
"""Responsible for creating & assigning tasks to worker processes."""
#load up year queue
year_queue = multiprocessing.Queue()
for year in years:
year_queue.put(year)
if num_processes > len(years):
num_processes = len(years)
# queue to pass to workers to store the results
result_queue = multiprocessing.Queue()
# spawn workers
workers = []
for i in range(num_processes):
worker = ForEachPerson(year_queue, result_queue, dict_of_files, all, today)
logging.info("Worker spawned for processor " + str(i + 1))
worker.start()
workers.append(worker)
# collect results off the queue
logging.info("results being collected")
results = []
while len(results) < len(years):
try:
result = result_queue.get()
logging.info(str(result[0]))
results.append(result[1])
except Queue.Empty:
pass
count = 0
for worker in workers:
logging.info("Terminating worker: " + str(count))
worker.terminate()
count += 1
return results
def RunHie():
"""Main control flow for building."""
logging.info("Start ")
sql_instance = hie_sql.SQLExportImport()
sql_instance.RunEtl()
# gather list of dates
date_full_list = DailyDates()
dict_of_files = ReadFiles()
# calculate hierarchy - run
num_processes = multiprocessing.cpu_count() - 1
results = Manage(date_full_list, dict_of_files, num_processes, 0, today[1])
logging.info("End")