Raising exceptions in a thread - python

I tried to get the example 1 Raising exceptions in a python thread using PyThreadState_SetAsyncExc() from geeksforgeeks
Different ways to kill a Thread running.
But for some reason the thread does not terminate.
I use python3.6
here is the source-code
# Python program raising
# exceptions in a python
# thread
import threading
import ctypes
import time
class thread_with_exception(threading.Thread):
def __init__(self, name):
threading.Thread.__init__(self)
self.name = name
def run(self):
# target function of the thread class
try:
while True:
print('running ' + self.name)
finally:
print('ended')
def get_id(self):
# returns id of the respective thread
if hasattr(self, '_thread_id'):
return self._thread_id
for id, thread in threading._active.items():
if thread is self:
return id
def raise_exception(self):
thread_id = self.get_id()
res = ctypes.pythonapi.PyThreadState_SetAsyncExc(thread_id,
ctypes.py_object(SystemExit))
if res > 1:
ctypes.pythonapi.PyThreadState_SetAsyncExc(thread_id, 0)
print('Exception raise failure')
t1 = thread_with_exception('Thread 1')
t1.start()
time.sleep(2)
t1.raise_exception()
t1.join()
Does anybody have an idea why the thread is not terminated with the raise signal?

Wrap thread_id in ctypes.c_long as:
res = ctypes.pythonapi.PyThreadState_SetAsyncExc(ctypes.c_long(thread_id),
ctypes.py_object(SystemExit))

Related

Python custom signal handling in processes pool

I am dealing with the following problem:
I've implemented a dummy 'Thing' class that sleeps for 10 seconds and logs a message ('foo'). This class is instantiated in a worker function for a Processes Pool and the 'foo' method that implements the above mentioned logic is called.
What I want to achieve is a custom signal handling: as long as the processes haven't terminated, if CTRL+C (SIGINT) is sent, each process will log the signal and they will immediately terminate.
Half of the logic is working: while each process is sleeping, on SIGINT, they'll be interrupted and the Pool will be closed.
Problem: if ALL the process end successfully and SIGINT is sent, the message will be logged but the Pool won't be closed.
Code:
import logging
import signal
import os
import time
from multiprocessing import Pool, current_process
logger = logging.getLogger('test')
SIGNAL_NAMES = dict((k, v) for v, k in reversed(sorted(signal.__dict__.items()))
if v.startswith('SIG') and not v.startswith('SIG_'))
class Thing(object):
def __init__(self, my_id):
self.my_id = my_id
self.logger = logging.getLogger(str(my_id))
def foo(self):
time.sleep(10)
self.logger.info('[%s] Foo after 10 secs!', self.my_id)
class Daemon(object):
def __init__(self, no_processes, max_count):
signal.signal(signal.SIGINT, self.stop)
self.done = False
self.count = 0
self.max_count = max_count
self.pool = Pool(no_processes, initializer=self.pool_initializer)
def stop(self, signum, _):
""" Stop function for Daemon """
sig = SIGNAL_NAMES.get(signum) or signum
logger.info('[Daemon] Stopping (received signal %s', sig)
self.done = True
def _generate_ids(self):
""" Generator function of the IDs for the Processes Pool """
while not self.done:
if self.count < self.max_count:
my_id = "ID-{}".format(self.count)
logger.info('[Daemon] Generated ID %s', my_id)
time.sleep(3)
yield my_id
self.count += 1
time.sleep(1)
def run(self):
""" Main daemon run function """
pid = os.getpid()
logger.info('[Daemon] Started running on PID %s', str(pid))
my_ids = self._generate_ids()
for res in self.pool.imap_unordered(run_thing, my_ids):
logger.info("[Daemon] Finished %s", res or '')
logger.info('[Daemon] Closing & waiting processes to terminate')
self.pool.close()
self.pool.join()
def pool_initializer(self):
""" Pool initializer function """
signal.signal(signal.SIGINT, self.worker_signal_handler)
#staticmethod
def worker_signal_handler(signum, _):
""" Signal handler for the Process worker """
sig = SIGNAL_NAMES.get(signum) or signum
cp = current_process()
logger.info("[%s] Received in worker %s signal %s", WORKER_THING_ID or '', str(cp), sig)
global WORKER_EXITING
WORKER_EXITING = True
WORKER_EXITING = False
WORKER_THING_ID = None
def run_thing(arg):
""" Worker function for processes """
if WORKER_EXITING:
return
global WORKER_THING_ID
WORKER_THING_ID = arg
run_exception = None
logger.info('[%s] START Thing foo-ing', arg)
logging.getLogger('Thing-{}'.format(arg)).setLevel(logging.INFO)
try:
thing = Thing(arg)
thing.foo()
except Exception as e:
run_exception = e
finally:
WORKER_THING_ID = None
logger.info('[%s] STOP Thing foo-ing', arg)
if run_exception:
logger.error('[%s] EXCEPTION on Thing foo-ing: %s', arg, run_exception)
return arg
if __name__ == '__main__':
logging.basicConfig()
logger.setLevel(logging.INFO)
daemon = Daemon(4, 3)
daemon.run()
Your problem is logic in function _generate_ids(). The function never ends so pool.imap_unordered() never finishes by itself, only needs to be interrupted by CTRL-C.
Change it for something like this:
def _generate_ids(self):
""" Generator function of the IDs for the Processes Pool """
for i in range(self.max_count):
time.sleep(3)
my_id = "ID-{}".format(self.count)
logger.info('[Daemon] Generated ID %s', my_id)
if self.done:
break
self.count += 1
yield my_id
And the processes end by themselves normally.

Python multiprocessing: Kill producer and consumer processes with KeyboardInterrupt

I want the customer and producer processes to stop in the following python script if the keyboard shortcut CTRL+C is performed. But the processes do not stop - the keyboard interrupt is not passed to them. Also the except block of the main process is never entered.
import time
import multiprocessing as mp
from multiprocessing.managers import SyncManager
import signal
class Consumer(mp.Process):
def __init__(self, **kwargs):
mp.Process.__init__(self, **kwargs)
def run(self):
proc_name = self.name
try:
while True:
print("{}".format(proc_name))
time.sleep(3)
except KeyboardInterrupt:
print("{} stopped".format(proc_name)) # never printed
return
class Producer(mp.Process):
def __init__(self, **kwargs):
mp.Process.__init__(self, **kwargs)
def run(self):
try:
while True:
time.sleep(3)
print("Producer here.")
except KeyboardInterrupt:
print("Producer stopped.") # never printed
return
def main():
def __init_worker():
signal.signal(signal.SIGINT, signal.SIG_IGN)
print('init') # not printed!!??
# manager = SyncManager() # does not change anything
# manager.start(__init_worker)
consumers = [Consumer(target=__init_worker) for i in xrange(3)]
producer = Producer(target=__init_worker)
producer.daemon = True # does not change anything
producer.start()
for c in consumers:
c.daemon = True
c.start()
try:
producer.join()
for c in consumers:
c.join()
except Exception as e:
print('STOP') # never printed
raise e
if __name__ == '__main__':
main()
There might be also a solution for my task by using multiprocesing.Pool for the customers and let the main process work as producer, but I would like to know why my implementation is not working as it is intended to and what I need to adjust.
I realised that __init_worker seems to be not executed (makes no difference if it is located outside of main). Maybe the reason for not passing KeyboardInterrupt to the customer and producer processes?
Based on eryksun's comments I improved my code and use multiprocessing.Event now. And the script is now working like expected. I also removed some lines, which I think are not necessary any more. Since I did not find any similar solution when searching the web, here my code comes:
import time
import multiprocessing as mp
class Consumer(mp.Process):
def __init__(self, quit_event, **kwargs):
mp.Process.__init__(self, **kwargs)
self.quit_event = quit_event
def run(self):
proc_name = self.name
while not self.quit_event.is_set():
print("{}".format(proc_name))
time.sleep(3)
print("{} stopped".format(proc_name))
return
class Producer(mp.Process):
def __init__(self, quit_event, **kwargs):
mp.Process.__init__(self, **kwargs)
self.quit_event = quit_event
def run(self):
while not self.quit_event.is_set():
print("Producer here.")
time.sleep(3)
print("Producer stopped")
return
def main():
quit_event = mp.Event()
consumers = [Consumer(quit_event) for i in xrange(3)]
producer = Producer(quit_event)
producer.start()
for c in consumers:
c.start()
try:
producer.join()
for c in consumers:
c.join()
except KeyboardInterrupt as e:
print('\nSTOP')
quit_event.set()
except Exception as e:
quit_event.set()
raise e
finally:
producer.terminate()
producer.join()
for c in consumers:
c.terminate()
c.join()
if __name__ == '__main__':
main()
Hoping, that it helps somebody.
Edit: Swaped terminate and join statements.

Non-blocking way to determine if thread is finished?

I have the following code:
import threading
import time
class TestWorker (threading.Thread):
def __init__(self, threadID, name):
threading.Thread.__init__(self)
self.threadID = threadID
self.name = name
def run(self):
print "Starting " + self.name
time.sleep(20)
print "Exiting " + self.name
# how do I let the calling thread know it's done?
class TestMain:
def __init__(self):
pass
def do_work(self):
thread = TestWorker(1, "Thread-1")
thread.start()
def do_something_else(self):
print "Something else"
def on_work_done(self):
print "work done"
How can I let the main thread know that the TestWorker has finished (call on_work_done()), without blocking calls to do_something_else() as thread.join() would?
You can give your thread instance an optional callback function to call when it's finished.
Note I added a Lock to prevent concurrent printing (which does block).
print_lock = threading.Lock() # Prevent threads from printing at same time.
class TestWorker(threading.Thread):
def __init__(self, threadID, name, callback=lambda: None):
threading.Thread.__init__(self)
self.threadID = threadID
self.name = name
self.callback = callback
def run(self):
with print_lock:
print("Starting " + self.name)
time.sleep(3)
with print_lock:
print("Exiting " + self.name)
self.callback()
class TestMain:
def __init__(self):
self.work_done = False
def do_work(self):
thread = TestWorker(1, "Thread-1", self.on_work_done)
thread.start()
def do_something_else(self):
with print_lock:
print("Something else")
def on_work_done(self):
with print_lock:
print("work done")
self.work_done = True
main = TestMain()
main.do_work()
while not main.work_done:
main.do_something_else()
time.sleep(.5) # do other stuff...
print('Done')
Output:
Starting Thread-1
Something else
Something else
Something else
Something else
Something else
Something else
Exiting Thread-1
work done
Done
import queue
import threading
class SThread(threading.Thread, queue.Queue):
def __init__(self, queue_out: object):
threading.Thread.__init__(self)
queue.Queue.__init__(self)
self.queue_out = queue_out
self.setDaemon(True)
self.start()
def run(self):
print('Thread start')
while True:
cmd = self.get()
if cmd is None:
break # exit thread
self.queue_out.put(cmd['target'](*cmd.get('args', ())), **cmd.get('kwargs', {}))
self.task_done()
print('Thread stop')
def testFn(a):
print('+ %s' % a)
return a
if __name__ == '__main__':
print('main 1')
# init
queue_out = queue.Queue()
thread = SThread(queue_out)
# in
for a in range(5): thread.put(dict(target=testFn, args=(a,)))
thread.put(None)
print('main 2')
# out
while True:
try:
print('- %s' % queue_out.get(timeout=3))
except queue.Empty:
break
print('main 3')
OUT:
main 1
Thread start
main 2
+ 0
+ 1
+ 2
+ 3
+ 4
Thread stop
- 0
- 1
- 2
- 3
- 4
main 3
import threading
dt = {}
threading.Thread(target=dt.update, kwargs=dict(out=123)).start()
while 'out' not in dt:
print('out' in dt)
print(dt)

Python sys.excepthook working only on main process but not on subprocesses

I have an app with some subprocess running and I have successfully set the sys.excepthook exception handling for the main process. Now, I want to set it for the same hook on the subprocesses. I would expect it to be as simple copying the exact lines of code I used on the main process and that's it but it didn't work.
Next is my code:
class Consumer(multiprocessing.Process):
def __init__(self, codec_status_queue, logger_queue):
multiprocessing.Process.__init__(self)
self.codec_status_queue = codec_status_queue
self.logger_queue = logger_queue
def run(self):
# Set default unhandled exceptions handler
uncaughtErrorHandler = UncaughtErrorHandler(self.logger_queue)
sys.excepthook = uncaughtErrorHandler.error_handler
1/0
class UncaughtErrorHandler(object):
def __init__(self, logger_queue, child_processes=None):
self.logger_queue = logger_queue
self.child_processes = child_processes
def error_handler(self, type, value, trace_back):
trace_formatted = "".join(traceback.format_tb(trace_back))
exeption_message = "Unhandled Exception:\n Type: %s\n Value: %s\n Line: %s\n Traceback:\n %s" % (type, value.message, trace_back.tb_lineno, trace_formatted)
logger_queue.put((LoggerThread.CRITICAL, exeption_message))
if self.child_processes:
self.stop_children()
# Stopping this process
sys.exit()
def stop_children(self):
num_children = len(self.child_processes)
logger_queue.put((LoggerThread.DEBUG, "Terminating child processes (%s)" % num_children))
for process in self.child_processes:
log_message = "Terminating %s with PID %s" % (process.name, process.pid)
logger_queue.put((LoggerThread.DEBUG, log_message))
process.terminate()
if __name__ == '__main__':
...
# Create processes and communication queues
codec_status_queue = multiprocessing.Queue()
num_consumers = multiprocessing.cpu_count() * 2
print 'Creating %d consumers' % num_consumers
consumers = [ Consumer(codec_status_queue, logger_queue)
for i in xrange(num_consumers) ]
# Set default unhandled exceptions handler
uncaughtErrorHandler = UncaughtErrorHandler(logger_queue, consumers)
sys.excepthook = uncaughtErrorHandler.error_handler
# Start processes
for consumer in consumers:
consumer.daemon = True
consumer.start()
If I put the 1/0 on the __main__ part the UncaughtErrorHandler catches the exception but when the 1/0 is put as shown above, it doesn't.
Maybe someone can tell me what am I doing wrong?
The following code was written for Python 3.x but can be adapted to work with Python 3.x instead. It provides an alternative solution to overriding sys.excepthook in child processes. A simple fix involves catching all exceptions and handing the data from sys.exc_info over to the exception handler. The main process could use a similar pattern for exceptions but retains the original design from your program. The example shown below should be a full working demonstration you can play around with and adapt to your needs.
#! /usr/bin/env python3
import logging
import multiprocessing
import queue
import sys
import threading
import time
import traceback
def main():
"""Demonstrate exception handling and logging in several processes."""
logger_queue = multiprocessing.Queue()
logger_thread = LoggerThread(logger_queue)
logger_thread.start()
try:
# Create processes and communication queues
codec_status_queue = multiprocessing.Queue()
num_consumers = multiprocessing.cpu_count() * 2
print('Creating {} consumers'.format(num_consumers))
consumers = [Consumer(codec_status_queue, logger_queue)
for _ in range(num_consumers)]
# Set default unhandled exceptions handler
uncaught_error_handler = UncaughtErrorHandler(logger_queue, consumers)
sys.excepthook = uncaught_error_handler.error_handler
# Start processes
for consumer in consumers:
consumer.start()
time.sleep(2)
finally:
logger_thread.shutdown()
def get_message(value):
"""Retrieve an exception's error message and return it."""
if hasattr(value, 'message'):
return value.message
if hasattr(value, 'args') and value.args:
return value.args[0]
class LoggerThread(threading.Thread):
"""Handle logging messages coming from various sources via a queue."""
CRITICAL = logging.CRITICAL
DEBUG = logging.DEBUG
def __init__(self, logger_queue):
"""Initialize an instance of the LoggerThread class."""
super().__init__()
self.logger_queue = logger_queue
self.mutex = threading.Lock()
self.running = False
def run(self):
"""Process messages coming through the queue until shutdown."""
self.running = True
while self.running:
try:
while True:
self.handle_message(*self.logger_queue.get(True, 0.1))
except queue.Empty:
pass
def handle_message(self, level, message):
"""Show the message while ensuring a guaranteed order on screen."""
with self.mutex:
print('Level:', level)
print('Message:', message)
print('=' * 80, flush=True)
def shutdown(self):
"""Signal the thread to exit once it runs out of messages."""
self.running = False
class Consumer(multiprocessing.Process):
"""Simulate a consumer process that handles data from a queue."""
def __init__(self, codec_status_queue, logger_queue):
"""Initialize an instance of the Consumer class."""
super().__init__()
self.codec_status_queue = codec_status_queue
self.logger_queue = logger_queue
self.daemon = True
def run(self):
"""Begin working as a consumer while handling any exceptions."""
# Set default unhandled exceptions handler
uncaught_error_handler = UncaughtErrorHandler(self.logger_queue)
try:
self.do_consumer_work()
except:
uncaught_error_handler.error_handler(*sys.exc_info())
def do_consumer_work(self):
"""Pretend to be doing the work of a consumer."""
junk = 1 / 0
print('Process', self.ident, 'calculated', junk)
class UncaughtErrorHandler:
"""Organize error handling to automatically terminate child processes."""
def __init__(self, logger_queue, child_processes=None):
"""Initialize an instance of the UncaughtErrorHandler class."""
self.logger_queue = logger_queue
self.child_processes = child_processes
def error_handler(self, kind, value, trace_back):
"""Record errors as they happen and terminate the process tree."""
trace_formatted = ''.join(traceback.format_tb(trace_back))
exception_message = ('Unhandled Exception:\n'
' Type: {}\n'
' Value: {}\n'
' Line: {}\n'
' Traceback:\n{}').format(
kind, get_message(value), trace_back.tb_lineno, trace_formatted)
self.logger_queue.put((LoggerThread.CRITICAL, exception_message))
if self.child_processes:
self.stop_children()
# Stopping this process
sys.exit()
def stop_children(self):
"""Terminate all children associated with this error handler."""
num_children = len(self.child_processes)
log_message = 'Terminating child processes({})'.format(num_children)
self.logger_queue.put((LoggerThread.DEBUG, log_message))
for process in self.child_processes:
log_message = 'Terminating {} with PID {}'.format(
process.name, process.pid)
self.logger_queue.put((LoggerThread.DEBUG, log_message))
process.terminate()
if __name__ == '__main__':
main()

Multithread proxy change but once

Let say I have a code like this:
def func1(a,b,c):
try:
p = pycurl.Curl()
p.setopt(pycurl.PROXY, "127.0.0.1")
p.setopt(pycurl.PROXYPORT, 9050)
p.setopt(pycurl.PROXYTYPE, pycurl.PROXYTYPE_SOCKS5)
p.perform()
p.close()
except pycurl.error as error:
if error[0] == 28: # timeout - change proxy
print "Tor timeout, need to change"
queue.put((a,b,c))
new_tor()
return
def new_tor():
# send_signal_for_new_ident_is_here
I start this code in 7 threads.
And when a thread receives error 28 it change the identify.
But it happens that ALL 7 THREADS sending signal to change identify.
How to do this:
If thread received error 28, then it calls new_tor() and other 6 threads don't but waiting for result and only then they proceed to work. How to sync this?
Just put an error "id" into the queue and if you encounter it, put the value back into the queue, and then handle as needed.
You don't wish to end the thread, which is what I did.
So, you can have some unique identifier for each thread, such that if once a thread encounters an error, it also adds the data (it's identifier) that says it encountered this error before, so that if all threads have encountered this error, the error is removed from the queue.
Code:
import threading
import Queue
y = 0
def f1():
global y
y += 1
if y > 100:
raise ValueError('trial')
def f2():
return
class Test(threading.Thread):
def __init__(self, func, name):
threading.Thread.__init__(self)
self.func = func
self.name = name
def run(self):
while True:
x = ''
if not queue.empty():
x = queue.get()
if x == 'error':
queue.put(x)
print 'Stopping %s' % (self.name,)
return
try:
self.func()
except Exception as e:
queue.put('error')
queue = Queue.Queue()
thread1 = Test(f1, '1')
thread2 = Test(f2, '2')
thread1.start()
thread2.start()

Categories

Resources