My problem is as follows:
I have a class that inherits from threading.Thread that I want to be able to stop gracefully. This class also has a Queue it get's its work from.
Since there are quite some classes in my project that should have this behaviour, I've created some superclasses to reduce duplicate code like this:
Thread related behaviour:
class StoppableThread(Thread):
def __init__(self):
Thread.__init__(self)
self._stop = Event()
def stop(self):
self._stop.set()
def stopped(self):
return self._stop.isSet()
Queue related behaviour:
class Queueable():
def __init__(self):
self._queue = Queue()
def append_to_job_queue(self, job):
self._queue.put(job)
Combining the two above and adding queue.join() to the stop() call
class StoppableQueueThread(StoppableThread, Queueable):
def __init__(self):
StoppableThread.__init__(self)
Queueable.__init__(self)
def stop(self):
super(StoppableQueueThread, self).stop()
self._queue.join()
A base class for a datasource:
class DataSource(StoppableThread, ABC):
def __init__(self, data_parser):
StoppableThread.__init__(self)
self.setName("DataSource")
ABC.__init__(self)
self._data_parser = data_parser
def run(self):
while not self.stopped():
record = self._fetch_data()
self._data_parser.append_to_job_queue(record)
#abstractmethod
def _fetch_data(self):
"""implement logic here for obtaining a data piece
should return the fetched data"""
An implementation for a datasource:
class CSVDataSource(DataSource):
def __init__(self, data_parser, file_path):
DataSource.__init__(self, data_parser)
self.file_path = file_path
self.csv_data = Queue()
print('loading csv')
self.load_csv()
print('done loading csv')
def load_csv(self):
"""Loops through csv and adds data to a queue"""
with open(self.file_path, 'r') as f:
self.reader = reader(f)
next(self.reader, None) # skip header
for row in self.reader:
self.csv_data.put(row)
def _fetch_data(self):
"""Returns next item of the queue"""
item = self.csv_data.get()
self.csv_data.task_done()
print(self.csv_data.qsize())
return item
Suppose there is a CSVDataSource instance called ds, if I want to stop the thread I call:
ds.stop()
ds.join()
The ds.join() call however, never returns. I'm not sure why this is, because the run() method does check if the stop event is set.
Any Ideas?
Update
A little more clarity as requested: the applications is build up out of several threads. The RealStrategy thread (below) is the owner of all the other threads and is responsible for starting and terminating them. I haven't set the daemon flag for any of the threads, so they should be non-daemonic by default.
The main thread looks like this:
if __name__ == '__main__':
def exit_handler(signal, frame):
rs.stop_engine()
rs.join()
sys.exit(0)
signal.signal(signal.SIGINT, exit_handler)
rs = RealStrategy()
rs.run_engine()
And here are the rs.run_engine() and rs.stop_engine() methods that are called in main:
class RealStrategy(Thread):
.....
.....
def run_engine(self):
self.on_start()
self._order_handler.start()
self._data_parser.start()
self._data_source.start()
self.start()
def stop_engine(self):
self._data_source.stop()
self._data_parser.stop()
self._order_handler.stop()
self._data_source.join()
self._data_parser.join()
self._order_handler.join()
self.stop()
If you want to use queue.Queue.join, then you must also use queue.Queue.task_done. You can read the linked documentation or see the following copied from information available online:
Queue.task_done()
Indicate that a formerly enqueued task is complete.
Used by queue consumer threads. For each get() used to fetch a task, a
subsequent call to task_done() tells the queue that the processing on
the task is complete.
If a join() is currently blocking, it will resume when all items have
been processed (meaning that a task_done() call was received for every
item that had been put() into the queue).
Raises a ValueError if called more times than there were items placed
in the queue.
Queue.join()
Blocks until all items in the queue have been gotten and processed.
The count of unfinished tasks goes up whenever an item is added to the
queue. The count goes down whenever a consumer thread calls
task_done() to indicate that the item was retrieved and all work on it
is complete. When the count of unfinished tasks drops to zero, join()
unblocks.
To test your problem, an example implementation was created to find out what was going on. It is slightly different from how your program works but demonstrates a method to solving your problem:
#! /usr/bin/env python3
import abc
import csv
import pathlib
import queue
import sys
import threading
import time
def main():
source_path = pathlib.Path(r'C:\path\to\file.csv')
data_source = CSVDataSource(source_path)
data_source.start()
processor = StoppableThread(target=consumer, args=[data_source])
processor.start()
time.sleep(0.1)
data_source.stop()
def consumer(data_source):
while data_source.empty:
time.sleep(0.001)
while not data_source.empty:
task = data_source.get_from_queue(True, 0.1)
print(*task.data, sep=', ', flush=True)
task.done()
class StopThread(StopIteration):
pass
threading.SystemExit = SystemExit, StopThread
class StoppableThread(threading.Thread):
def _bootstrap(self, stop=False):
# noinspection PyProtectedMember
if threading._trace_hook:
raise RuntimeError('cannot run thread with tracing')
def terminate():
nonlocal stop
stop = True
self.__terminate = terminate
# noinspection PyUnusedLocal
def trace(frame, event, arg):
if stop:
raise StopThread
sys.settrace(trace)
super()._bootstrap()
def terminate(self):
try:
self.__terminate()
except AttributeError:
raise RuntimeError('cannot terminate thread '
'before it is started') from None
class Queryable:
def __init__(self, maxsize=1 << 10):
self.__queue = queue.Queue(maxsize)
def add_to_queue(self, item):
self.__queue.put(item)
def get_from_queue(self, block=True, timeout=None):
return self.__queue.get(block, timeout)
#property
def empty(self):
return self.__queue.empty()
#property
def full(self):
return self.__queue.full()
def task_done(self):
self.__queue.task_done()
def join_queue(self):
self.__queue.join()
class StoppableQueryThread(StoppableThread, Queryable):
def __init__(self, target=None, name=None, args=(), kwargs=None,
*, daemon=None, maxsize=1 << 10):
super().__init__(None, target, name, args, kwargs, daemon=daemon)
Queryable.__init__(self, maxsize)
def stop(self):
self.terminate()
self.join_queue()
class DataSource(StoppableQueryThread, abc.ABC):
#abc.abstractmethod
def __init__(self, maxsize=1 << 10):
super().__init__(None, 'DataSource', maxsize=maxsize)
def run(self):
while True:
record = self._fetch_data()
self.add_to_queue(record)
#abc.abstractmethod
def _fetch_data(self):
pass
class CSVDataSource(DataSource):
def __init__(self, source_path):
super().__init__()
self.__data_parser = self.__build_data_parser(source_path)
#staticmethod
def __build_data_parser(source_path):
with source_path.open(newline='') as source:
parser = csv.reader(source)
next(parser, None)
yield from parser
def _fetch_data(self):
try:
return Task(next(self.__data_parser), self.task_done)
except StopIteration:
raise StopThread from None
class Task:
def __init__(self, data, callback):
self.__data = data
self.__callback = callback
#property
def data(self):
return self.__data
def done(self):
self.__callback()
if __name__ == '__main__':
main()
Related
In order not to freeze the PyQT GUI I am using QRunnable (functions to be executed in the background) and QThreadPool. I have three functions out of which one requires results from previous two to run and I am struggling to ensure that the execution of third function starts after two previous return their respective results.
The class for QThreadPool is defined in following way:
class Worker(QtCore.QRunnable):
'''
Worker thread
Inherits from QRunnable to handler worker thread setup, signals and wrap-up.
:param callback: The function callback to run on this worker thread. Supplied args and
kwargs will be passed through to the runner.
:type callback: function
:param args: Arguments to pass to the callback function
:param kwargs: Keywords to pass to the callback function
'''
def __init__(self, fn, *args, **kwargs):
super(Worker, self).__init__()
# Store constructor arguments (re-used for processing)
self.running = None
self.fn = fn
self.args = args
self.kwargs = kwargs
self.signals = WorkerSignals()
# Add the callback to our kwargs
self.kwargs['callback_progress'] = self.signals.progress
self.kwargs['callback_data'] = self.signals.data
# #Slot()
def run(self):
'''
Initialise the runner function with passed args, kwargs.
'''
# Retrieve args/kwargs here; and fire processing using them
try:
self.signals.started.emit()
self.result = self.fn(
*self.args,
**self.kwargs
)
except:
traceback.print_exc()
exctype, value = sys.exc_info()[:2]
self.signals.error.emit((exctype, value, traceback.format_exc()))
else:
self.signals.result.emit(self.result) # Return the result of the processing
finally:
self.signals.finished.emit() # Done
and signals
class WorkerSignals(QtCore.QObject):
'''
Defines the signals available from a running worker thread.
Supported signals are:
finished
No data
error
`tuple` (exctype, value, traceback.format_exc() )
result
`object` data returned from processing, anything
progress
`int` indicating % progress
'''
error = QtCore.Signal(tuple)
started = QtCore.Signal()
finished = QtCore.Signal()
progress = QtCore.Signal(int)
result = QtCore.Signal(object)
data = QtCore.Signal(dict)
the function to be executed is invoked by the following function
def exe_worker_run(self, WorkerPool, function, arguments):
Worker = thread.Worker(function, arguments)
Worker.signals.started.connect(self.sig_thread_start)
Worker.signals.error.connect(self.sig_thread_error)
Worker.signals.result.connect(self.sig_thread_result)
Worker.signals.finished.connect(self.sig_thread_finish)
WorkerPool.start(Worker)
the signal that emits the result is connected to function
def sig_thread_result(self, result):
for key in result.keys():
try:
dfrm = getattr(self, key)
print('{} {} loaded!!!'.format(time.time(), key))
except:
pass
The main problem is that the result of each function is emitted after all functions finished the execution. So what I need is the solution which allows to hold the execution of a QRunnable until the result from previous QRunnable are available.
You can organize tasks into queue and execute them passing to worker one by one over signal-slot mechanism. This way you can use results from one computation to schedule next computation.
from PySide2 import QtCore, QtWidgets, QtGui
class Task:
def __init__(self, taskId):
self._taskId = taskId
def taskId(self):
return self._taskId
def execute():
pass
class TaskPlusOne(Task):
def __init__(self, taskId, value):
super().__init__(taskId)
self._value = value
def execute(self):
QtCore.QThread.currentThread().sleep(3)
return self._value + 1
class Worker(QtCore.QObject):
complete = QtCore.Signal(int, object)
def append(self, task):
print("execute", task.taskId())
res = task.execute()
self.complete.emit(task.taskId(), res)
class Window(QtWidgets.QWidget):
task = QtCore.Signal(object)
def __init__(self, parent = None):
super().__init__(parent)
worker = Worker()
self.task.connect(worker.append)
worker.complete.connect(self.onComplete)
thread = QtCore.QThread()
worker.moveToThread(thread)
thread.start()
self._thread = thread
self._worker = worker
self._queue = [TaskPlusOne(0, 0)]
self.executeOne()
def executeOne(self):
queue = self._queue
if len(queue) == 0:
print("task queue is empty")
return
self.task.emit(queue.pop(0))
def executeAll(self):
while len(self._queue) > 0:
self.executeOne()
def onComplete(self, taskId, res):
print("onComplete", taskId)
if res < 2:
print("append task to queue")
self._queue.append(TaskPlusOne(taskId + 1, res))
self.executeOne()
def closeEvent(self, event):
thread = self._thread
thread.quit()
thread.wait()
super().closeEvent(event)
if __name__ == "__main__":
app = QtWidgets.QApplication([])
widget = Window()
widget.show()
app.exec_()
I have been searching for some explanations about thread synchronization. I have found a challenge to use as case of study, I will simply here with my solution. Basically there is a class with a numeric value, and you can add or subtract from it. If there is many threads accessing this instance, it should wait all threads finishes before return the final value. My implementation is the following:
from threading import Lock, Thread
from time import sleep
import sys
class ClassA(object):
def with_lock():
def wrapper(func):
def wrapped(self, *args):
with self.lock:
return func(self, *args)
return wrapped
return wrapper
def __init__(self, balance = 0):
self.balance = balance
self.lock = Lock()
def get_balance(self):
return self.balance
#with_lock()
def add(self):
self.balance += 1
#with_lock()
def sub(self):
self.balance -= 1
if __name__ == "__main__":
sys.setswitchinterval(1e-12)
value = 10
def foo():
a.add()
sleep(0.01)
a.sub()
a = ClassA(value)
threads = [Thread(target=foo) for _ in range(1000)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
print(a.get_balance()) # should return "value"
The implementation of the decorator "with_lock" was found in other implementation from the internet, but I did not understand it.
About this part:
with self.lock:
return func(self, *args)
I have searched about the Lock documentation, and it shows that self.lock would be a Lock Object with methods acquire and release. Can I understand the 'with' statement would wait until the thread is released? Or is there any other behavior?
The print at the end waits until all the threads to finish, but the get_balance does not have the decorator "with_lock". Why it waits until the threads finish?
I'm trying to write a program that crawls through a website and download all the videos it has. I'm facing a problem that the number of threads continuously increases even after the downloading of individual videos are done.
Here is the code for the individual Worker object, which is queued and then joined later. This is the only part of the code at which I generate a Thread. What I don't understand is how there can be remaining threads if given the object, I implement the self.stop() function and the while loop breaks.
class Worker(Thread):
def __init__(self, thread_pool):
Thread.__init__(self)
self.tasks = thread_pool.tasks
self.tasks_info = thread_pool.tasks_info
self.daemon = True
self._is_running=True
self.start()
def stop(self):
self._is_running = False
def run(self):
while self._is_running:
func, args, kargs = self.tasks.get()
try: func(*args, **kargs)
except Exception:
print("\nError: Threadpool error.")
sys.exit(1)
self.tasks_info['num_tasks_complete'] += 1
self.tasks.task_done()
self.stop()
I've used the thread functions to check which threads are alive, and it turns out that it is indeed mostly the worker functions as well as other objects called Thread(SockThread) and _MainThread, which I do not know how to close.
Please advise on 1. why the Worker thread is not ending and 2. how to get rid of the Thread(SockThread) as well as the _MainThread.
Thank you!
edit 1
class ThreadPool:
def __init__(self, name, num_threads, num_tasks):
self.tasks = Queue(num_threads)
self.num_threads=num_threads
self.tasks_info = {
'name': name,
'num_tasks': num_tasks,
'num_tasks_complete': 0
}
for _ in range(num_threads):
Worker(self)
print(threading.active_count)
def add_task(self, func, *args, **kwargs):
self.tasks.put((func, args, kwargs))
def wait_completion(self):
print("at the beginning of wait_completion:")
print(threading.active_count())
By looking at your code it looks like you have initialized the thread which calls the run() method for processing. After that you're even using the start method which is not the proper way. Your code should be as follows:
from threading import Event
class Worker(Thread):
def __init__(self, thread_pool):
self.tasks = thread_pool.tasks
self.tasks_info = thread_pool.tasks_info
self.exit = Event()
super(Thread,self).__init__()
def shutdown(self):
self.exit.set()
def run(self):
while not self.exit.is_set():
func, args, kargs = self.tasks.get()
try:
func(*args, **kargs)
except Exception:
print("\nError: Threadpool error.")
# use shutdown method for error
self.shutdown()
sys.exit(1)
self.tasks_info['num_tasks_complete'] += 1
self.tasks.task_done()
self.shutdown()
Please explain how do we send/receive data from Thread managed by Queue....
First I subclass 'QThread' defining its run() method which is started when QThread's.start() is called:
class SimpleThread(QtCore.QThread):
def __init__(self, queue, parent=None):
QtCore.QThread.__init__(self, parent)
self.queue=queue
def run(self):
while True:
arg=self.queue.get()
self.fun(arg)
self.queue.task_done()
def fun(self, arg):
for i in range (3):
print 'fun: %s'%i
self.sleep(1)
return arg+1
Then I declare two Thread instances (so only two CPU cores are taken) sending self.queue instance as an argument.
self.queue=queue.Queue()
for i in range(2):
thread=SimpleThread(self.queue)
thread.start()
Now if I understand it correctly thread.start() is not starting anything. The real "start" happens only when I call queue.put():
for arg in [1,2,3]: self.queue.put(arg)
This last line is what makes a "real" call. Aside from creating and starting Queue item put() allows to save any arbitrary value to each Queue item. .put() does several things at once: it creates, it starts, it moves the processing through the Queue and it allows to place a variable "inside" of the queue item (which later can be retrieved from inside of the function-processor: using Queue item's '.get()` method).
But how do I return the value from fun() function. A "regular" fun()'s return resultValue doesn't work. And I can't use self.queue.put() method since this method aside from storing a data "creates" a new queue item...
EDITED LATER:
Here is slightly tweaked code (copy/pasted from another post) showing an approach on how to return a value from completed Thread. I am not sure if the the approach used here would work with QThread... please correct me if I am wrong:
import os, sys
import threading
import Queue
def callMe(incomingFun, daemon=False):
def execute(_queue, *args, **kwargs):
result=incomingFun(*args, **kwargs)
_queue.put(result)
def wrap(*args, **kwargs):
_queue=Queue.Queue()
_thread=threading.Thread(target=execute, args=(_queue,)+args, kwargs=kwargs)
_thread.daemon=daemon
_thread.start()
_thread.result_queue=_queue
return _thread
return wrap
#callMe
def localFunc(x):
import time
x = x + 5
time.sleep(5)
return x
thread=localFunc(10)
# this blocks, waiting for the result
result = thread.result_queue.get()
print result
In normal circumstances you'd use a result queue to send results back, and then have some other thread running that waits for the results:
class SimpleThread(QtCore.QThread):
def __init__(self, queue, result_queue, parent=None):
QtCore.QThread.__init__(self, parent)
self.queue=queue
self.result_queue = result_queue
def run(self):
while True:
arg=self.queue.get()
self.fun(arg)
self.queue.task_done()
def fun(self, arg):
for i in range (3):
print 'fun: %s'%i
self.sleep(1)
self.result_queue.put(arg+1)
def handle_results(result_queue):
while True:
result = result_queue.get()
print("Got result {}".format(result))
Main thread:
self.queue=queue.Queue()
self.result_queue = queue.Queue()
result_handler = threading.Thread(target=handle_results, self.result_queue)
for i in range(2):
thread=SimpleThread(self.queue, self.result_queue)
thread.start()
Doing it this way will keep you from blocking the GUI's event loop while you wait for the results. Here's what the equivalent would look like with multiprocessing.pool.ThreadPool:
from multiprocessing.pool import ThreadPool
import time
def fun(arg):
for i in range (3):
print 'fun: %s'%i
time.sleep(1)
return arg+1
def handle_result(result):
print("got result {}".format(result))
pool = ThreadPool(2)
pool.map_async(fun, [1,2,3], callback=handle_result)
Which is a lot simpler. It internally creates a result handling thread, which will automatically call handle_result for you when fun completes.
That said, you're using QThread, and you want the results to update GUI widgets, so you really want your results to be sent back to the main thread, not to a result handling thread. In that case, it makes sense to use Qt's signaling system, so that you can safely update the GUI when you receive the result:
from PyQt4 import QtCore, QtGui
import sys
import Queue as queue
class ResultObj(QtCore.QObject):
def __init__(self, val):
self.val = val
class SimpleThread(QtCore.QThread):
finished = QtCore.pyqtSignal(object)
def __init__(self, queue, callback, parent=None):
QtCore.QThread.__init__(self, parent)
self.queue = queue
self.finished.connect(callback)
def run(self):
while True:
arg = self.queue.get()
if arg is None: # None means exit
print("Shutting down")
return
self.fun(arg)
def fun(self, arg):
for i in range(3):
print 'fun: %s' % i
self.sleep(1)
self.finished.emit(ResultObj(arg+1))
class AppWindow(QtGui.QMainWindow):
def __init__(self):
super(AppWindow, self).__init__()
mainWidget = QtGui.QWidget()
self.setCentralWidget(mainWidget)
mainLayout = QtGui.QVBoxLayout()
mainWidget.setLayout(mainLayout)
button = QtGui.QPushButton('Process')
button.clicked.connect(self.process)
mainLayout.addWidget(button)
def handle_result(self, result):
val = result.val
print("got val {}".format(val))
# You can update the UI from here.
def process(self):
MAX_CORES=2
self.queue = queue.Queue()
self.threads = []
for i in range(MAX_CORES):
thread = SimpleThread(self.queue, self.handle_result)
self.threads.append(thread)
thread.start()
for arg in [1,2,3]:
self.queue.put(arg)
for _ in range(MAX_CORES): # Tell the workers to shut down
self.queue.put(None)
app = QtGui.QApplication([])
window = AppWindow()
window.show()
sys.exit(app.exec_())
Output when the button is pushed:
fun: 0
fun: 0
fun: 1
fun: 1
fun: 2
fun: 2
fun: 0
got val 2
got val 3
Shutting down
fun: 1
fun: 2
Shutting down
got val 4
I am trying to combine the answers I got from two different python questions.
Here is the the first question and answer. Basically I just wanted to spawn two threads, one to powerDown() and the other to powerUp(), where powerUp() pends on powerDown()
How to spawn a thread inside another thread in the same object in python?
import threading
class Server(threading.Thread):
# some code
def run(self):
self.reboot()
# This is the top level function called by other objects
def reboot(self):
# perhaps add a lock
if not hasattr(self, "_down"):
self._down = threading.Thread(target=self.__powerDown)
self._down.start()
up = threading.Thread(target=self.__powerUp)
up.start()
def __powerDown(self):
# do something
def __powerUp(self):
if not hasattr(self, "_down"):
return
self._down.join()
# do something
del self._down
Here is the the second question and answer. Basically I wanted to start a thread, and then call a function of the object.
How to call a function on a running Python thread
import queue
import threading
class SomeClass(threading.Thread):
def __init__(self, q, loop_time = 1.0/60):
self.q = q
self.timeout = loop_time
super(SomeClass, self).__init__()
def onThread(self, function, *args, **kwargs):
self.q.put((function, args, kwargs))
def run(self):
while True:
try:
function, args, kwargs = self.q.get(timeout=self.timeout)
function(*args, **kwargs)
except queue.Empty:
self.idle()
def idle(self):
# put the code you would have put in the `run` loop here
def doSomething(self):
pass
def doSomethingElse(self):
pass
Here is combined idea code. Basically I wanted to spawn a thread, then then queue up a functions to execute, which in this case is reboot(). reboot() in turns creates two threads, the powerDown() and powerUp() threads, where powerDown() pends on powerUp()
import threading
import Queue
class Server(threading.Thread):
def __init__(self, q, loop_time = 1.0/60):
self.q = q
self.timeout = loop_time
super(Server, self).__init__()
def run(self):
while True:
try:
function, args, kwargs = self.q.get(timeout=self.timeout)
function(*args, **kwargs)
except queue.Empty:
self.idle()
def idle(self):
# put the code you would have put in the `run` loop here
# This is the top level function called by other objects
def reboot(self):
self.__onthread(self.__reboot)
def __reboot(self):
if not hasattr(self, "_down"):
self._down = threading.Thread(target=self.__powerDown)
self._down.start()
up = threading.Thread(target=self.__powerUp)
up.start()
def __onThread(self, function, *args, **kwargs):
self.q.put((function, args, kwargs))
def __powerDown(self):
# do something
def __powerUp(self):
if not hasattr(self, "_down"):
return
self._down.join()
# do something
del self._down
All work, except when I create two Server subclasses.
class ServerA(Server):
pass
class ServerB(Server):
pass
Here is the code that instatiats both subclasses, and call the start() and reboot functions
serverA = ServerA(None)
serverB = ServerB(None)
serverA.start()
serverB.start()
serverA.reboot()
serverB.reboot()
I expect serverA.reboot() and serverB.reboot() to happen concurrently, which is what I want, but they DO NOT! serverB.reboot() gets executed after serverA.reboot() is done. That is, if I put print statements, I get
serverA started
serverB started
serverA.reboot() called
serverA.__powerDown called
serverA.__powerUp called
serverB.reboot() called
serverB.__powerDown called
serverB.__powerUp called
I know for a fact that it takes longer for ServerA to reboot, so I expect something like this
serverA started
serverB started
serverA.reboot() called
serverB.reboot() called
serverA.__powerDown called
serverB.__powerDown called
serverB.__powerUp called
serverA.__powerUp called
I hope that makes sense. If it does, why aren't my reboot() functions happening simultaneously?
Why are you sending None while you are expecting a queue object in the first place ? This causes an exception which complains that None type object doesn't have a get method. Besides that the exception you want to be handled in the run method is Queue.Empty and not queue.Empty.
Here is the revised code and its output on my machine:
import threading
import Queue
class Server(threading.Thread):
def __init__(self, title, q, loop_time = 1.0/60):
self.title = title
self.q = q
self.timeout = loop_time
super(Server, self).__init__()
def run(self):
print "%s started" % self.title
while True:
try:
function, args, kwargs = self.q.get(timeout=self.timeout)
function(*args, **kwargs)
except Queue.Empty:
# print "empty"
self.idle()
def idle(self):
pass
# put the code you would have put in the `run` loop here
# This is the top level function called by other objects
def reboot(self):
self.__onThread(self.__reboot)
def __reboot(self):
if not hasattr(self, "_down"):
self._down = threading.Thread(target=self.__powerDown)
self._down.start()
up = threading.Thread(target=self.__powerUp)
up.start()
def __onThread(self, function, *args, **kwargs):
self.q.put((function, args, kwargs))
def __powerDown(self):
# do something
print "%s power down" % self.title
pass
def __powerUp(self):
print "%s power up" % self.title
if not hasattr(self, "_down"):
return
self._down.join()
# do something
del self._down
class ServerA(Server):
pass
class ServerB(Server):
pass
def main():
serverA = ServerA("A", Queue.Queue())
serverB = ServerB("B", Queue.Queue())
serverA.start()
serverB.start()
serverA.reboot()
serverB.reboot()
if __name__ == '__main__':
main()
Output:
A started
B started
B power down
A power down
B power up
A power up