Handle multiprocess in python - python

My code is processing some parallel perforce tasks while showing a progress bar and letting user to terminate the job whenever he wants, the problem is when user clicks the close button the thread function is not being killed but the lock is released and the main UI thread is being unlocked.
The p4.run_sync() is not terminating when Cancel button is clicked.
def P4SyncLibrary(args, que):
syncType = args[0]
view = args[1]
p4 = P4CreateConnection(disable_tmp_cleanup=True)
try:
p4.run_sync(view)
except P4Exception:
for e in p4.errors:
print "SyncError: - %s" %e
p4.disconnect()
que.put(None)
class CreateJob(QtGui.QDialog):
def __init__(self, thread, args):
QtGui.QDialog.__init__(self)
self.ui=Ui_ProgressBar()
self.ui.setupUi(self)
self.ui.cancel.clicked.connect(self.closeEvent)
self.ui.cancel.setIcon(QtGui.QIcon(QtGui.QPixmap("%s/delete.xpm" %resources)))
self.threadControl = ThreadControl(thread=thread, args=args)
self.connect(self.threadControl, QtCore.SIGNAL("__updateProgressBar(int)"), self.__updateProgressBar)
self.threadControl.finished.connect(self.closeEvent)
self.threadControl.start()
#QtCore.pyqtSlot(int)
def __updateProgressBar(self,val):
self.ui.progressBar.setValue(val)
self.setWindowTitle("Processing: {0}%".format(val))
def closeEvent(self, QCloseEvent=None):
if self.threadControl.isRunning():
self.threadControl.stop()
self.threadControl.wait()
if QCloseEvent: QtGui.QDialog.closeEvent(self, QCloseEvent)
else: self.close()
def getResults(self):
return self.threadControl.resultDict
class ThreadControl(QtCore.QThread):
stopFlag = 0
def __init__(self, thread=None, args=None):
super(ThreadControl, self).__init__()
self.args = args
self.thread = thread
self.resultDict = []
def run(self):
threads = {}
queue = multiprocessing.Queue()
for arg in self.args:
process = multiprocessing.Process(target=self.thread, args=(arg, queue))
process.start()
threads[process] = 1 ## ACTIVE thread
# WAIT TILL ALL PROCESSES COMPLETE
completedThreads = 0
total = len(threads.keys())
while completedThreads != total:
if self.stopFlag:
for t in threads.keys():
if threads[t] == 1:
t.terminate()
t.join()
threads[t] = 0
completedThreads += 1
else:
for t in threads.keys():
if self.stopFlag: break ## Process threads termination
elif threads[t] == 1 and not t.is_alive():
threads[t] = 0
completedThreads += 1
self.resultDict.append(queue.get())
self.emit(QtCore.SIGNAL('__updateProgressBar(int)'),(completedThreads*100)/total)
sleep(0.5) ## Prevent CPU from overloading
def stop(self):
self.stopFlag=1
a job is being created using instance of CreateJob
CreateJob(thread=P4SyncLibrary, args=P4Libraries).exec_()

The only solution I could give is to pass p4 object to calling thread as argument so that p4 server connection can disconnect when user wants to cancel the job.
def P4SyncLibrary(p4, args, que):
syncType = args[0]
view = args[1]
try:
p4.run_sync(view)
except P4Exception:
for e in p4.errors:
print "SyncError: - %s" %e
que.put(None)
class ThreadControl(QtCore.QThread):
...
def run(self):
threads = {}
queue = multiprocessing.Queue()
for arg in self.args:
connection = P4CreateConnection(disable_tmp_cleanup=True)
if connection.connected():
process = multiprocessing.Process(target=self.thread, args=(connection, arg, queue))
process.start()
threads[process] = {
'isAlive': True,
'connection': connection
}
# WAIT TILL ALL PROCESSES COMPLETE
completedThreads = 0
total = len(threads.keys())
while completedThreads != total:
if self._stop:
for t in threads.keys():
if threads[t]['isAlive']:
threads[t]['connection'].disconnect()
t.terminate()
t.join()
threads[t]['isAlive'] = False
completedThreads += 1
else:
for t in threads.keys():
if self._stop: break ## Process threads termination
elif threads[t]['isAlive'] and not t.is_alive():
threads[t]['connection'].disconnect()
threads[t]['isAlive'] = False
completedThreads += 1
self.results.append(queue.get())
self.emit(QtCore.SIGNAL('__updateProgressBar(int)'),(completedThreads*100)/total)
sleep(0.5) ## Prevent CPU from overloading

Related

Python custom signal handling in processes pool

I am dealing with the following problem:
I've implemented a dummy 'Thing' class that sleeps for 10 seconds and logs a message ('foo'). This class is instantiated in a worker function for a Processes Pool and the 'foo' method that implements the above mentioned logic is called.
What I want to achieve is a custom signal handling: as long as the processes haven't terminated, if CTRL+C (SIGINT) is sent, each process will log the signal and they will immediately terminate.
Half of the logic is working: while each process is sleeping, on SIGINT, they'll be interrupted and the Pool will be closed.
Problem: if ALL the process end successfully and SIGINT is sent, the message will be logged but the Pool won't be closed.
Code:
import logging
import signal
import os
import time
from multiprocessing import Pool, current_process
logger = logging.getLogger('test')
SIGNAL_NAMES = dict((k, v) for v, k in reversed(sorted(signal.__dict__.items()))
if v.startswith('SIG') and not v.startswith('SIG_'))
class Thing(object):
def __init__(self, my_id):
self.my_id = my_id
self.logger = logging.getLogger(str(my_id))
def foo(self):
time.sleep(10)
self.logger.info('[%s] Foo after 10 secs!', self.my_id)
class Daemon(object):
def __init__(self, no_processes, max_count):
signal.signal(signal.SIGINT, self.stop)
self.done = False
self.count = 0
self.max_count = max_count
self.pool = Pool(no_processes, initializer=self.pool_initializer)
def stop(self, signum, _):
""" Stop function for Daemon """
sig = SIGNAL_NAMES.get(signum) or signum
logger.info('[Daemon] Stopping (received signal %s', sig)
self.done = True
def _generate_ids(self):
""" Generator function of the IDs for the Processes Pool """
while not self.done:
if self.count < self.max_count:
my_id = "ID-{}".format(self.count)
logger.info('[Daemon] Generated ID %s', my_id)
time.sleep(3)
yield my_id
self.count += 1
time.sleep(1)
def run(self):
""" Main daemon run function """
pid = os.getpid()
logger.info('[Daemon] Started running on PID %s', str(pid))
my_ids = self._generate_ids()
for res in self.pool.imap_unordered(run_thing, my_ids):
logger.info("[Daemon] Finished %s", res or '')
logger.info('[Daemon] Closing & waiting processes to terminate')
self.pool.close()
self.pool.join()
def pool_initializer(self):
""" Pool initializer function """
signal.signal(signal.SIGINT, self.worker_signal_handler)
#staticmethod
def worker_signal_handler(signum, _):
""" Signal handler for the Process worker """
sig = SIGNAL_NAMES.get(signum) or signum
cp = current_process()
logger.info("[%s] Received in worker %s signal %s", WORKER_THING_ID or '', str(cp), sig)
global WORKER_EXITING
WORKER_EXITING = True
WORKER_EXITING = False
WORKER_THING_ID = None
def run_thing(arg):
""" Worker function for processes """
if WORKER_EXITING:
return
global WORKER_THING_ID
WORKER_THING_ID = arg
run_exception = None
logger.info('[%s] START Thing foo-ing', arg)
logging.getLogger('Thing-{}'.format(arg)).setLevel(logging.INFO)
try:
thing = Thing(arg)
thing.foo()
except Exception as e:
run_exception = e
finally:
WORKER_THING_ID = None
logger.info('[%s] STOP Thing foo-ing', arg)
if run_exception:
logger.error('[%s] EXCEPTION on Thing foo-ing: %s', arg, run_exception)
return arg
if __name__ == '__main__':
logging.basicConfig()
logger.setLevel(logging.INFO)
daemon = Daemon(4, 3)
daemon.run()
Your problem is logic in function _generate_ids(). The function never ends so pool.imap_unordered() never finishes by itself, only needs to be interrupted by CTRL-C.
Change it for something like this:
def _generate_ids(self):
""" Generator function of the IDs for the Processes Pool """
for i in range(self.max_count):
time.sleep(3)
my_id = "ID-{}".format(self.count)
logger.info('[Daemon] Generated ID %s', my_id)
if self.done:
break
self.count += 1
yield my_id
And the processes end by themselves normally.

Terminating all sub-processes after an event

I have a Python multiprocessing scenario which I have simplified for my question here. There are x number of jobs to be processed in 2 parts. In my code, the 2 job parts are actually HTTP requests where part 2 is dependent on the results of part 1. Finally, there is a 3rd part that simply reports on how long parts 1 and 2 took and calculates a running average of time taken across all jobs.
Taking advantage of multiprocessing, I set up 2x process workers for Job Part 1 and also 2x workers for Job Part 2 and only 1x Reporter worker. To communicate the time taken for each job part, I am using Queues.
The code I have works fine for a complete workthrough of the specified x number of jobs BUT, I would like to add a timeout/cancel event that should stop all the workers and terminates gracefully.
In my code, I use the Report Worker to check for this event and when it happens I thought it would simply be a case of consuming the remaining jobs in all the queues and adding a poison pill to signal them to terminate.
The sub-processes do terminate BUT it seems control is NOT passed back to the "main" program and I can still see the main process hanging until I do a ctrl-c cancel in the command prompt.
Please help see my code and tell me where I am going wrong:
#!/usr/bin/python3
import sys, time, datetime, os, math, multiprocessing, time, random
TYPE_JOB_PART_1 = '1'
TYPE_JOB_PART_2 = '2'
class Worker(multiprocessing.Process):
def __init__(self, task_queue, result_queue, task_type):
multiprocessing.Process.__init__(self)
self.task_queue = task_queue
self.result_queue = result_queue
self.task_type = task_type
def run(self):
while True:
next_task = self.task_queue.get()
if next_task is None:
#= Poison pill means shutdown
print('%s: is exiting: %s ===============' % (self.name, self.task_type))
self.task_queue.task_done()
break
job_response = next_task()
self.task_queue.task_done()
if self.task_type == TYPE_JOB_PART_1:
self.result_queue.put(do_jobPart_2(job_response))
elif self.task_type == TYPE_JOB_PART_2:
self.result_queue.put(do_reporting(job_response))
return
class Reporter(multiprocessing.Process):
def __init__(self, task_queue, result_queue, num_tasks, num_workers_jobPart1, num_workers_jobPart2, jobPart1_queue, jobPart2_queue):
multiprocessing.Process.__init__(self)
self.task_queue = task_queue
self.result_queue = result_queue
self.jobPart1_queue = jobPart1_queue
self.jobPart2_queue = jobPart2_queue
self.num_tasks = num_tasks
self.num_workers_jobPart1 = num_workers_jobPart1
self.num_workers_jobPart2 = num_workers_jobPart2
self.time_start = datetime.datetime.now() #= Start the timer
self.time_elapsed = 0
self.time_wait_to_terminate = 3 #= Define the timeout to terminate all jobs
def run(self):
while True:
next_task = self.task_queue.get()
if next_task is None:
#= Poison pill means shutdown
self.task_queue.task_done()
print('==================================================')
print('============ END OF PROCESSING ===================')
print('==================================================')
break
job_response = next_task()
self.task_queue.task_done()
self.result_queue.put(job_response)
queueSize = self.result_queue.qsize()
#= TERMINATTION time
print("================>i:%s" % (queueSize))
self.time_elapsed = (datetime.datetime.now() - self.time_start).total_seconds()
if self.time_elapsed > self.time_wait_to_terminate:
print("TIME IS UP. %s elapsed!" % self.time_wait_to_terminate)
#= Empty the JobPart_1 queue to relieve the workers
while not self.jobPart1_queue.empty():
self.jobPart1_queue.get()
self.jobPart1_queue.task_done()
#= And add poison pills again
for i in range(self.num_workers_jobPart1):
self.jobPart1_queue.put(None)
#= Empty the JobPart_2 queue to relieve the workers
while not self.jobPart2_queue.empty():
self.jobPart2_queue.get()
self.jobPart2_queue.task_done()
#= And add poison pills again
for i in range(self.num_workers_jobPart2):
self.jobPart2_queue.put(None)
#= Empty the report queue to relieve the reporter itself
while not self.task_queue.empty():
self.task_queue.get()
self.task_queue.task_done()
print("TIME IS UP: workers stopped, Reporter shutting itself down....")
break
#= Results queue is filled up when count = num_tasks, so give Poison pill to shutdown JobPart_2 workers
if queueSize == self.num_tasks:
for i in range(self.num_workers_jobPart2):
self.jobPart2_queue.put(None)
print("JobPart_2 workers will be poisoned")
return
class do_reporting(object):
def __init__(self, info):
self.info = info
def __call__(self):
try:
print("%s:do_reporting - is RUNNING " % (self.info['jobPart1_results']['i']))
randtime = 0.5 * random.random()
time.sleep(randtime)
print( 'jobPart1_time:%s, jobPart2_time:%s, report_time;%s' % ( self.info["jobPart1_results"]["jobPart1_time"], self.info["jobPart2_time"], randtime ) )
return {'results':self.info,'report_time':randtime}
except:
print("error:do_reporting")
class do_jobPart_1(object):
def __init__(self, i, t0):
self.t0 = t0
self.i = i
def __call__(self):
try:
print("%s:do_jobPart_1 - is RUNNING " % self.i)
randtime = 0.5 * random.random()
time.sleep(randtime)
time_elapsed = (datetime.datetime.now() - self.t0).total_seconds()
return {'i':self.i, 't0':self.t0, 'time_elapsed_job1':time_elapsed, 'jobPart1_time':randtime}
except:
print("error:do_jobPart_1")
class do_jobPart_2(object):
def __init__(self, info):
self.info = info
def __call__(self):
try:
print("%s:do_jobPart_2 - is RUNNING " % (self.info['i']))
randtime = 0.5 * random.random()
time.sleep(randtime)
return {"jobPart1_results":self.info,'jobPart2_time':randtime}
except:
print("error:do_jobPart_2")
if __name__ == '__main__':
print('==================================================')
print('============ START PROCESSING ====================')
print('==================================================')
#===============================================
#= Establish communication queues
q_jobPart_1 = multiprocessing.JoinableQueue()
q_jobPart_2 = multiprocessing.JoinableQueue()
q_reportTasks = multiprocessing.JoinableQueue()
q_results = multiprocessing.Queue()
#===============================================
#= Start workersReporter !!! Should always be just 1 worker !!!
numJobs = 90
numWorkers_jobPart1 = 2
numWorkers_jobPart2 = 2
workersJobPart_1 = [ Worker(q_jobPart_1, q_jobPart_2, TYPE_JOB_PART_1) for i in range(numWorkers_jobPart1) ]
workersJobPart_2 = [ Worker(q_jobPart_2, q_reportTasks, TYPE_JOB_PART_2) for i in range(numWorkers_jobPart2) ]
workerJobReporter = Reporter(q_reportTasks, q_results, numJobs, numWorkers_jobPart1, numWorkers_jobPart2, q_jobPart_1, q_jobPart_2)
#===============================================
#= Start the workers
print("Main PID:%s" % os.getpid())
for w in workersJobPart_1:
w.start()
print("JobPart_1 PID=%s" % w.pid)
for w in workersJobPart_2:
w.start()
print("JobPart_2 PID=%s" % w.pid)
workerJobReporter.start()
print("JobReporter PID=%s" % workerJobReporter.pid)
#= Start the timer and add tasks to the queues
time_start = datetime.datetime.now()
for i in range(numJobs):
q_jobPart_1.put(do_jobPart_1(i, time_start))
#= Add poison pill for each jobPart_1 workers
for i in range(numWorkers_jobPart1):
q_jobPart_1.put(None)
q_jobPart_1.join()
print("JobPart_1 workers terminated")
q_jobPart_2.join()
print("JobPart_2 workers terminated")
q_reportTasks.put(None)
q_reportTasks.join()
workerJobReporter.terminate()
print("Reporter terminated")
print("FINISHED")

Python: How to add new thread after one of threads breaks on error

I'm trying to create loop of threads and so far code is good. But I have problem when thread exits because of some exception.
Now I'm trying to figure out how to start additional thread after one thread exits because of exception. I did browse around but I didn't find any example that would work for this complex code. Any help would be great!
If thread stopped and queue is not empty restart stopped thread and continue with rest of the list.
This is my code:
some_list = [1,2,3,4,5,6,7,8]
exitFlag = 0
class threads():
#staticmethod
def process_data(threadName, q,queueLock):
workQueue = q
while not exitFlag:
queueLock.acquire()
if not workQueue.empty():
data = q.get()
queueLock.release()
print "%s processing %s" % (threadName, data)
else:
queueLock.release()
sleep(1)
def run_threads(self):
threadList = ["Thread-1", "Thread-2", "Thread-3"]
nameList = some_list
queueLock = threading.Lock()
workQueue = Queue.Queue(1000000)
threads = []
threadID = 1
# Create new threads
for tName in threadList:
thread = myThread(threadID, tName, workQueue,queueLock)
thread.start()
threads.append(thread)
threadID += 1
# Fill the queue
queueLock.acquire()
for word in nameList:
workQueue.put(word)
queueLock.release()
# Wait for queue to empty
while not workQueue.empty():
pass
# Notify threads it's time to exit
global exitFlag
exitFlag = 1
# Wait for all threads to complete
for t in threads:
t.join()
print "Exiting Main Thread"
class myThread (threading.Thread,threads):
def __init__(self, threadID, name, q,queueLock):
self.thread = threading.Thread(target=self.run)
threading.Thread.__init__(self,target=self.run)
self.threadID = threadID
self.queueLock = queueLock
self.name = name
self.q = q
def run(self):
print "Starting " + self.name
threads.process_data(self.name, self.q,self.queueLock)
print "Exiting " + self.name
threads().run_threads()
Something like this should work:
...
# Wait for queue to empty
while not workQueue.empty():
for (i, t) in enumerate(threads):
if not t.is_alive():
print("Recreating thread " + t.name)
thread = myThread(threadID, threadList[i], workQueue,queueLock)
thread.start()
threads[i] = thread
threadID += 1
...
I would advice putting the thread-starting code into some method, as it will now be duplicated and hard to maintain.
The problem here is that you might "loose" the data that was popped from queue by the fatal thread.

Non-blocking way to determine if thread is finished?

I have the following code:
import threading
import time
class TestWorker (threading.Thread):
def __init__(self, threadID, name):
threading.Thread.__init__(self)
self.threadID = threadID
self.name = name
def run(self):
print "Starting " + self.name
time.sleep(20)
print "Exiting " + self.name
# how do I let the calling thread know it's done?
class TestMain:
def __init__(self):
pass
def do_work(self):
thread = TestWorker(1, "Thread-1")
thread.start()
def do_something_else(self):
print "Something else"
def on_work_done(self):
print "work done"
How can I let the main thread know that the TestWorker has finished (call on_work_done()), without blocking calls to do_something_else() as thread.join() would?
You can give your thread instance an optional callback function to call when it's finished.
Note I added a Lock to prevent concurrent printing (which does block).
print_lock = threading.Lock() # Prevent threads from printing at same time.
class TestWorker(threading.Thread):
def __init__(self, threadID, name, callback=lambda: None):
threading.Thread.__init__(self)
self.threadID = threadID
self.name = name
self.callback = callback
def run(self):
with print_lock:
print("Starting " + self.name)
time.sleep(3)
with print_lock:
print("Exiting " + self.name)
self.callback()
class TestMain:
def __init__(self):
self.work_done = False
def do_work(self):
thread = TestWorker(1, "Thread-1", self.on_work_done)
thread.start()
def do_something_else(self):
with print_lock:
print("Something else")
def on_work_done(self):
with print_lock:
print("work done")
self.work_done = True
main = TestMain()
main.do_work()
while not main.work_done:
main.do_something_else()
time.sleep(.5) # do other stuff...
print('Done')
Output:
Starting Thread-1
Something else
Something else
Something else
Something else
Something else
Something else
Exiting Thread-1
work done
Done
import queue
import threading
class SThread(threading.Thread, queue.Queue):
def __init__(self, queue_out: object):
threading.Thread.__init__(self)
queue.Queue.__init__(self)
self.queue_out = queue_out
self.setDaemon(True)
self.start()
def run(self):
print('Thread start')
while True:
cmd = self.get()
if cmd is None:
break # exit thread
self.queue_out.put(cmd['target'](*cmd.get('args', ())), **cmd.get('kwargs', {}))
self.task_done()
print('Thread stop')
def testFn(a):
print('+ %s' % a)
return a
if __name__ == '__main__':
print('main 1')
# init
queue_out = queue.Queue()
thread = SThread(queue_out)
# in
for a in range(5): thread.put(dict(target=testFn, args=(a,)))
thread.put(None)
print('main 2')
# out
while True:
try:
print('- %s' % queue_out.get(timeout=3))
except queue.Empty:
break
print('main 3')
OUT:
main 1
Thread start
main 2
+ 0
+ 1
+ 2
+ 3
+ 4
Thread stop
- 0
- 1
- 2
- 3
- 4
main 3
import threading
dt = {}
threading.Thread(target=dt.update, kwargs=dict(out=123)).start()
while 'out' not in dt:
print('out' in dt)
print(dt)

Why thread is interrupted before the changes are complete

I'm attempting to create python module for getting MAC adresses by IP addresses.
def getMACs(addressesList):
def _processArp(pkt):
spa = _inet_ntoa(pkt.spa)
if pkt.op == dpkt.arp.ARP_OP_REPLY and spa in _cache.macTable:
lock.acquire()
try:
_cache.macTable[spa] = _packedToMacStr(pkt.sha)
_cache.notFilledMacs -= 1
finally:
lock.release()
if _cache.notFilledMacs == 0:
thrd.stop()
addresses = _parseAddresses(addressesList)
_cache.registerCacheEntry("macTable", {})
_cache.registerCacheEntry("notFilledMacs", 0)
_events.arpPacket += _processArp
lock = threading.Lock()
thrd = _CaptureThread(promisc=False, timeout_ms=30, filter="arp")
thrd.start()
for addr in addresses:
if _sendArpQuery(addr):
_cache.macTable[str(addr)] = None
_cache.notFilledMacs += 1
thrd.join(125)
thrd.stop()
return _cache.macTable
if __name__ == "__main__":
macTable = getMACs([IPAddress("192.168.1.1"), IPAddress("192.168.1.3")])
_pprint.pprint(macTable)
When I run this module I get
{'192.168.1.1': '00:11:95:9E:25:B1', '192.168.1.3': None}
When I debug _processArp step by step I get
{'192.168.1.1': '00:11:95:9E:25:B1', '192.168.1.3': '00:21:63:78:98:8E'}
Class CaptureThread:
class CaptureThread(threading.Thread):
def __init__ (self, name=None, snaplen=65535, promisc=True, timeout_ms=0, immediate=False, filter=None):
threading.Thread.__init__(self)
self.__running = True
self.__name = name
self.__snaplen = snaplen
self.__promisc = promisc
self.__timeout_ms = timeout_ms
self.__immediate = immediate
self.__filter = filter
def stop(self):
self.__running = False
def run(self):
self.__pc = pcap.pcap(self.__name, self.__snaplen, self.__promisc, self.__timeout_ms, self.__immediate)
if self.__filter:
self.__pc.setfilter(self.__filter)
while self.__running:
self.__pc.dispatch(1, self.__processPacket)
def __processPacket(self, timestamp, pkt):
peth = dpkt.ethernet.Ethernet(pkt)
if isinstance(peth.data, dpkt.arp.ARP):
_events.arpPacket(peth.data)
Stupid error. As always when working with threads - because of thread synchronization.
One of my conditions for interrupting thread is "_cache.notFilledMacs == 0". In the main thread _cache.notFilledMacs did not have time to get the value of 2 when in the CaptureThread value is decreased.

Categories

Resources