Python multiprocessing Queue child class losing attributes in process - python

I am trying to implement a child class from the multiprocessing Queue in python. The child class contains a simple Boolean flag "ready". When I send the queue to a new process, the ready attribute is disappearing. The following code demonstrates the problem:
import multiprocessing
import multiprocessing.queues
class ReadyQueue(multiprocessing.queues.Queue):
def __init__(self, ctx, *args, **kwargs):
super(ReadyQueue, self).__init__(ctx=ctx, *args, **kwargs)
self.ready = False
def ready_queue(*args, **kwargs):
return ReadyQueue(ctx=multiprocessing.get_context(), *args, **kwargs)
def foo(q):
print(q.ready)
if __name__ == "__main__":
my_queue = ready_queue()
print(my_queue.ready)
p = multiprocessing.Process(target=foo, args=(my_queue,))
p.start()
p.join()
With the output:
False
Process Process-1:
Traceback (most recent call last):
File "C:\Users\acre018\Anaconda3\envs\EIT_Qt\lib\multiprocessing\process.py", line 315, in _bootstrap
self.run()
File "C:\Users\acre018\Anaconda3\envs\EIT_Qt\lib\multiprocessing\process.py", line 108, in run
self._target(*self._args, **self._kwargs)
File "C:\Users\acre018\github\EIT_Qt\Experiments\ready_queue_test.py", line 16, in foo
print(q.ready)
AttributeError: 'ReadyQueue' object has no attribute 'ready'

I implemented this workaround:
import multiprocessing
from queue import Empty
import time
import ctypes
class ReadyQueue:
def __init__(self, *args, **kwargs):
self.queue = multiprocessing.Queue(*args, **kwargs)
self._ready = multiprocessing.Value(ctypes.c_bool, False)
def set_ready(self):
self._ready.value = True
def set_not_ready(self):
self._ready.value = False
self.clear()
def is_ready(self):
return self._ready.value
def clear(self):
try:
while True:
self.queue.get(block=False)
except Empty:
pass
def get(self, block=True, timeout=None):
return self.queue.get(block, timeout)
def put(self, obj, block=True, timeout=None):
return self.queue.put(obj, block, timeout)
def full(self):
return self.queue.full()
def empty(self):
return self.queue.empty()
def qsize(self):
return self.queue.qsize()
def foo(q):
while q.is_ready():
time.sleep(1)
q.put("hello from foo")
print("q no longer ready, foo loop finished")
if __name__ == "__main__":
my_queue = ReadyQueue()
my_queue.set_ready()
p = multiprocessing.Process(target=foo, args=(my_queue,))
p.start()
for i in range(2):
print(my_queue.get())
time.sleep(2)
print("my_queue._ready = %s, qsize: %d. Setting not ready.." % (str(my_queue.is_ready()), my_queue.qsize()))
my_queue.set_not_ready()
print("my_queue._ready = %s, qusize: %d" % (str(my_queue.is_ready()), my_queue.qsize()))
With the output:
C:\Users\acre018\Anaconda3\envs\test_pyqt\python.exe C:/Users/acre018/github/EIT_Qt/Experiments/ready_queue_test2.py
hello from foo
hello from foo
my_queue._ready = True, qsize: 2. Setting not ready..
my_queue._ready = False, qusize: 0
q no longer ready, foo loop finished
Process finished with exit code 0
The workaround is to have my ReadyQueue class not inherit from multiprocessing.queues.Queue but have a queue as an attribute. For convenience I implemented the methods that I need from queue, and they just pass through to the queue attribute. I also implemented a clear method.
Note that in my first example I neglected to make self.ready a multiprocessing.Value, so wouldn't have been able to edit it across processes, but I tested after fixing that and it was not the source of the issue.

Related

Python multprocessing callback

Using this post as inspiration, I am trying to add a callback. I am using GLib.add_timeout to poll for the result, as I want to use it in a Gtk app. However, the main_quit() is not called properly, and thus the following code hangs after finishing:
import multiprocessing
import queue
import collections
import gi
gi.require_version("Gtk", "3.0")
from gi.repository import GLib, Gtk
Msg = collections.namedtuple("Msg", ["event", "args"])
class BaseProcess(multiprocessing.Process):
"A process backed by internal queues for simple messaging"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.requests = multiprocessing.Queue()
self.responses = multiprocessing.Queue()
def send(self, event, *args, finished_callback=None):
"Puts the event and args as a `Msg` on the requests queue"
msg = Msg(event, args)
self.requests.put(msg)
GLib.timeout_add(100, self._monitor_process, finished_callback)
def run(self):
while True:
event, args = self.requests.get()
if event == "quit":
break
handler = getattr(self, "do_%s" % event, None)
if not handler:
raise NotImplementedError("Process has no handler for [%s]" % event)
msg = handler(*args)
self.responses.put(msg)
def _monitor_process(self, finished_callback):
print(f"in _monitor_process {finished_callback}", flush=True)
try:
result = self.responses.get(False)
if finished_callback is not None:
finished_callback(result)
except queue.Empty:
return GLib.SOURCE_CONTINUE
return GLib.SOURCE_REMOVE
class MyProcess(BaseProcess):
"test process class"
def do_sum(self, arg1, arg2):
"test method"
print(f"do_sum {arg1 + arg2}", flush=True)
return arg1 + arg2
def finished_callback(result):
print(f"result {result}", flush=True)
Gtk.main_quit()
if __name__ == "__main__":
process = MyProcess()
process.start()
process.send('sum', 1, 2, finished_callback=finished_callback)
Gtk.main()
How can I prevent the code from hanging?
Edit: I see from this page that others have noted problems. How can I build a Gtk-based app to control long-running processes like scanners without blocking the main thread?
The threading docs for pyGObject lead me to the answer, which is to add process.daemon = True before starting the process:
if __name__ == "__main__":
process = MyProcess()
process.daemon = True
process.start()
process.send('sum', 1, 2, finished_callback=finished_callback)
Gtk.main()

How to postpone execution of the QRunnable until signal from previous is emitted

In order not to freeze the PyQT GUI I am using QRunnable (functions to be executed in the background) and QThreadPool. I have three functions out of which one requires results from previous two to run and I am struggling to ensure that the execution of third function starts after two previous return their respective results.
The class for QThreadPool is defined in following way:
class Worker(QtCore.QRunnable):
'''
Worker thread
Inherits from QRunnable to handler worker thread setup, signals and wrap-up.
:param callback: The function callback to run on this worker thread. Supplied args and
kwargs will be passed through to the runner.
:type callback: function
:param args: Arguments to pass to the callback function
:param kwargs: Keywords to pass to the callback function
'''
def __init__(self, fn, *args, **kwargs):
super(Worker, self).__init__()
# Store constructor arguments (re-used for processing)
self.running = None
self.fn = fn
self.args = args
self.kwargs = kwargs
self.signals = WorkerSignals()
# Add the callback to our kwargs
self.kwargs['callback_progress'] = self.signals.progress
self.kwargs['callback_data'] = self.signals.data
# #Slot()
def run(self):
'''
Initialise the runner function with passed args, kwargs.
'''
# Retrieve args/kwargs here; and fire processing using them
try:
self.signals.started.emit()
self.result = self.fn(
*self.args,
**self.kwargs
)
except:
traceback.print_exc()
exctype, value = sys.exc_info()[:2]
self.signals.error.emit((exctype, value, traceback.format_exc()))
else:
self.signals.result.emit(self.result) # Return the result of the processing
finally:
self.signals.finished.emit() # Done
and signals
class WorkerSignals(QtCore.QObject):
'''
Defines the signals available from a running worker thread.
Supported signals are:
finished
No data
error
`tuple` (exctype, value, traceback.format_exc() )
result
`object` data returned from processing, anything
progress
`int` indicating % progress
'''
error = QtCore.Signal(tuple)
started = QtCore.Signal()
finished = QtCore.Signal()
progress = QtCore.Signal(int)
result = QtCore.Signal(object)
data = QtCore.Signal(dict)
the function to be executed is invoked by the following function
def exe_worker_run(self, WorkerPool, function, arguments):
Worker = thread.Worker(function, arguments)
Worker.signals.started.connect(self.sig_thread_start)
Worker.signals.error.connect(self.sig_thread_error)
Worker.signals.result.connect(self.sig_thread_result)
Worker.signals.finished.connect(self.sig_thread_finish)
WorkerPool.start(Worker)
the signal that emits the result is connected to function
def sig_thread_result(self, result):
for key in result.keys():
try:
dfrm = getattr(self, key)
print('{} {} loaded!!!'.format(time.time(), key))
except:
pass
The main problem is that the result of each function is emitted after all functions finished the execution. So what I need is the solution which allows to hold the execution of a QRunnable until the result from previous QRunnable are available.
You can organize tasks into queue and execute them passing to worker one by one over signal-slot mechanism. This way you can use results from one computation to schedule next computation.
from PySide2 import QtCore, QtWidgets, QtGui
class Task:
def __init__(self, taskId):
self._taskId = taskId
def taskId(self):
return self._taskId
def execute():
pass
class TaskPlusOne(Task):
def __init__(self, taskId, value):
super().__init__(taskId)
self._value = value
def execute(self):
QtCore.QThread.currentThread().sleep(3)
return self._value + 1
class Worker(QtCore.QObject):
complete = QtCore.Signal(int, object)
def append(self, task):
print("execute", task.taskId())
res = task.execute()
self.complete.emit(task.taskId(), res)
class Window(QtWidgets.QWidget):
task = QtCore.Signal(object)
def __init__(self, parent = None):
super().__init__(parent)
worker = Worker()
self.task.connect(worker.append)
worker.complete.connect(self.onComplete)
thread = QtCore.QThread()
worker.moveToThread(thread)
thread.start()
self._thread = thread
self._worker = worker
self._queue = [TaskPlusOne(0, 0)]
self.executeOne()
def executeOne(self):
queue = self._queue
if len(queue) == 0:
print("task queue is empty")
return
self.task.emit(queue.pop(0))
def executeAll(self):
while len(self._queue) > 0:
self.executeOne()
def onComplete(self, taskId, res):
print("onComplete", taskId)
if res < 2:
print("append task to queue")
self._queue.append(TaskPlusOne(taskId + 1, res))
self.executeOne()
def closeEvent(self, event):
thread = self._thread
thread.quit()
thread.wait()
super().closeEvent(event)
if __name__ == "__main__":
app = QtWidgets.QApplication([])
widget = Window()
widget.show()
app.exec_()

Python threading.join() hangs

My problem is as follows:
I have a class that inherits from threading.Thread that I want to be able to stop gracefully. This class also has a Queue it get's its work from.
Since there are quite some classes in my project that should have this behaviour, I've created some superclasses to reduce duplicate code like this:
Thread related behaviour:
class StoppableThread(Thread):
def __init__(self):
Thread.__init__(self)
self._stop = Event()
def stop(self):
self._stop.set()
def stopped(self):
return self._stop.isSet()
Queue related behaviour:
class Queueable():
def __init__(self):
self._queue = Queue()
def append_to_job_queue(self, job):
self._queue.put(job)
Combining the two above and adding queue.join() to the stop() call
class StoppableQueueThread(StoppableThread, Queueable):
def __init__(self):
StoppableThread.__init__(self)
Queueable.__init__(self)
def stop(self):
super(StoppableQueueThread, self).stop()
self._queue.join()
A base class for a datasource:
class DataSource(StoppableThread, ABC):
def __init__(self, data_parser):
StoppableThread.__init__(self)
self.setName("DataSource")
ABC.__init__(self)
self._data_parser = data_parser
def run(self):
while not self.stopped():
record = self._fetch_data()
self._data_parser.append_to_job_queue(record)
#abstractmethod
def _fetch_data(self):
"""implement logic here for obtaining a data piece
should return the fetched data"""
An implementation for a datasource:
class CSVDataSource(DataSource):
def __init__(self, data_parser, file_path):
DataSource.__init__(self, data_parser)
self.file_path = file_path
self.csv_data = Queue()
print('loading csv')
self.load_csv()
print('done loading csv')
def load_csv(self):
"""Loops through csv and adds data to a queue"""
with open(self.file_path, 'r') as f:
self.reader = reader(f)
next(self.reader, None) # skip header
for row in self.reader:
self.csv_data.put(row)
def _fetch_data(self):
"""Returns next item of the queue"""
item = self.csv_data.get()
self.csv_data.task_done()
print(self.csv_data.qsize())
return item
Suppose there is a CSVDataSource instance called ds, if I want to stop the thread I call:
ds.stop()
ds.join()
The ds.join() call however, never returns. I'm not sure why this is, because the run() method does check if the stop event is set.
Any Ideas?
Update
A little more clarity as requested: the applications is build up out of several threads. The RealStrategy thread (below) is the owner of all the other threads and is responsible for starting and terminating them. I haven't set the daemon flag for any of the threads, so they should be non-daemonic by default.
The main thread looks like this:
if __name__ == '__main__':
def exit_handler(signal, frame):
rs.stop_engine()
rs.join()
sys.exit(0)
signal.signal(signal.SIGINT, exit_handler)
rs = RealStrategy()
rs.run_engine()
And here are the rs.run_engine() and rs.stop_engine() methods that are called in main:
class RealStrategy(Thread):
.....
.....
def run_engine(self):
self.on_start()
self._order_handler.start()
self._data_parser.start()
self._data_source.start()
self.start()
def stop_engine(self):
self._data_source.stop()
self._data_parser.stop()
self._order_handler.stop()
self._data_source.join()
self._data_parser.join()
self._order_handler.join()
self.stop()
If you want to use queue.Queue.join, then you must also use queue.Queue.task_done. You can read the linked documentation or see the following copied from information available online:
Queue.task_done()
Indicate that a formerly enqueued task is complete.
Used by queue consumer threads. For each get() used to fetch a task, a
subsequent call to task_done() tells the queue that the processing on
the task is complete.
If a join() is currently blocking, it will resume when all items have
been processed (meaning that a task_done() call was received for every
item that had been put() into the queue).
Raises a ValueError if called more times than there were items placed
in the queue.
Queue.join()
Blocks until all items in the queue have been gotten and processed.
The count of unfinished tasks goes up whenever an item is added to the
queue. The count goes down whenever a consumer thread calls
task_done() to indicate that the item was retrieved and all work on it
is complete. When the count of unfinished tasks drops to zero, join()
unblocks.
To test your problem, an example implementation was created to find out what was going on. It is slightly different from how your program works but demonstrates a method to solving your problem:
#! /usr/bin/env python3
import abc
import csv
import pathlib
import queue
import sys
import threading
import time
def main():
source_path = pathlib.Path(r'C:\path\to\file.csv')
data_source = CSVDataSource(source_path)
data_source.start()
processor = StoppableThread(target=consumer, args=[data_source])
processor.start()
time.sleep(0.1)
data_source.stop()
def consumer(data_source):
while data_source.empty:
time.sleep(0.001)
while not data_source.empty:
task = data_source.get_from_queue(True, 0.1)
print(*task.data, sep=', ', flush=True)
task.done()
class StopThread(StopIteration):
pass
threading.SystemExit = SystemExit, StopThread
class StoppableThread(threading.Thread):
def _bootstrap(self, stop=False):
# noinspection PyProtectedMember
if threading._trace_hook:
raise RuntimeError('cannot run thread with tracing')
def terminate():
nonlocal stop
stop = True
self.__terminate = terminate
# noinspection PyUnusedLocal
def trace(frame, event, arg):
if stop:
raise StopThread
sys.settrace(trace)
super()._bootstrap()
def terminate(self):
try:
self.__terminate()
except AttributeError:
raise RuntimeError('cannot terminate thread '
'before it is started') from None
class Queryable:
def __init__(self, maxsize=1 << 10):
self.__queue = queue.Queue(maxsize)
def add_to_queue(self, item):
self.__queue.put(item)
def get_from_queue(self, block=True, timeout=None):
return self.__queue.get(block, timeout)
#property
def empty(self):
return self.__queue.empty()
#property
def full(self):
return self.__queue.full()
def task_done(self):
self.__queue.task_done()
def join_queue(self):
self.__queue.join()
class StoppableQueryThread(StoppableThread, Queryable):
def __init__(self, target=None, name=None, args=(), kwargs=None,
*, daemon=None, maxsize=1 << 10):
super().__init__(None, target, name, args, kwargs, daemon=daemon)
Queryable.__init__(self, maxsize)
def stop(self):
self.terminate()
self.join_queue()
class DataSource(StoppableQueryThread, abc.ABC):
#abc.abstractmethod
def __init__(self, maxsize=1 << 10):
super().__init__(None, 'DataSource', maxsize=maxsize)
def run(self):
while True:
record = self._fetch_data()
self.add_to_queue(record)
#abc.abstractmethod
def _fetch_data(self):
pass
class CSVDataSource(DataSource):
def __init__(self, source_path):
super().__init__()
self.__data_parser = self.__build_data_parser(source_path)
#staticmethod
def __build_data_parser(source_path):
with source_path.open(newline='') as source:
parser = csv.reader(source)
next(parser, None)
yield from parser
def _fetch_data(self):
try:
return Task(next(self.__data_parser), self.task_done)
except StopIteration:
raise StopThread from None
class Task:
def __init__(self, data, callback):
self.__data = data
self.__callback = callback
#property
def data(self):
return self.__data
def done(self):
self.__callback()
if __name__ == '__main__':
main()

Exiting a Thread in Python

I'm trying to write a program that crawls through a website and download all the videos it has. I'm facing a problem that the number of threads continuously increases even after the downloading of individual videos are done.
Here is the code for the individual Worker object, which is queued and then joined later. This is the only part of the code at which I generate a Thread. What I don't understand is how there can be remaining threads if given the object, I implement the self.stop() function and the while loop breaks.
class Worker(Thread):
def __init__(self, thread_pool):
Thread.__init__(self)
self.tasks = thread_pool.tasks
self.tasks_info = thread_pool.tasks_info
self.daemon = True
self._is_running=True
self.start()
def stop(self):
self._is_running = False
def run(self):
while self._is_running:
func, args, kargs = self.tasks.get()
try: func(*args, **kargs)
except Exception:
print("\nError: Threadpool error.")
sys.exit(1)
self.tasks_info['num_tasks_complete'] += 1
self.tasks.task_done()
self.stop()
I've used the thread functions to check which threads are alive, and it turns out that it is indeed mostly the worker functions as well as other objects called Thread(SockThread) and _MainThread, which I do not know how to close.
Please advise on 1. why the Worker thread is not ending and 2. how to get rid of the Thread(SockThread) as well as the _MainThread.
Thank you!
edit 1
class ThreadPool:
def __init__(self, name, num_threads, num_tasks):
self.tasks = Queue(num_threads)
self.num_threads=num_threads
self.tasks_info = {
'name': name,
'num_tasks': num_tasks,
'num_tasks_complete': 0
}
for _ in range(num_threads):
Worker(self)
print(threading.active_count)
def add_task(self, func, *args, **kwargs):
self.tasks.put((func, args, kwargs))
def wait_completion(self):
print("at the beginning of wait_completion:")
print(threading.active_count())
By looking at your code it looks like you have initialized the thread which calls the run() method for processing. After that you're even using the start method which is not the proper way. Your code should be as follows:
from threading import Event
class Worker(Thread):
def __init__(self, thread_pool):
self.tasks = thread_pool.tasks
self.tasks_info = thread_pool.tasks_info
self.exit = Event()
super(Thread,self).__init__()
def shutdown(self):
self.exit.set()
def run(self):
while not self.exit.is_set():
func, args, kargs = self.tasks.get()
try:
func(*args, **kargs)
except Exception:
print("\nError: Threadpool error.")
# use shutdown method for error
self.shutdown()
sys.exit(1)
self.tasks_info['num_tasks_complete'] += 1
self.tasks.task_done()
self.shutdown()

How do I Spawn threads from two different objects and coordinate them in python v2.7?

I am trying to combine the answers I got from two different python questions.
Here is the the first question and answer. Basically I just wanted to spawn two threads, one to powerDown() and the other to powerUp(), where powerUp() pends on powerDown()
How to spawn a thread inside another thread in the same object in python?
import threading
class Server(threading.Thread):
# some code
def run(self):
self.reboot()
# This is the top level function called by other objects
def reboot(self):
# perhaps add a lock
if not hasattr(self, "_down"):
self._down = threading.Thread(target=self.__powerDown)
self._down.start()
up = threading.Thread(target=self.__powerUp)
up.start()
def __powerDown(self):
# do something
def __powerUp(self):
if not hasattr(self, "_down"):
return
self._down.join()
# do something
del self._down
Here is the the second question and answer. Basically I wanted to start a thread, and then call a function of the object.
How to call a function on a running Python thread
import queue
import threading
class SomeClass(threading.Thread):
def __init__(self, q, loop_time = 1.0/60):
self.q = q
self.timeout = loop_time
super(SomeClass, self).__init__()
def onThread(self, function, *args, **kwargs):
self.q.put((function, args, kwargs))
def run(self):
while True:
try:
function, args, kwargs = self.q.get(timeout=self.timeout)
function(*args, **kwargs)
except queue.Empty:
self.idle()
def idle(self):
# put the code you would have put in the `run` loop here
def doSomething(self):
pass
def doSomethingElse(self):
pass
Here is combined idea code. Basically I wanted to spawn a thread, then then queue up a functions to execute, which in this case is reboot(). reboot() in turns creates two threads, the powerDown() and powerUp() threads, where powerDown() pends on powerUp()
import threading
import Queue
class Server(threading.Thread):
def __init__(self, q, loop_time = 1.0/60):
self.q = q
self.timeout = loop_time
super(Server, self).__init__()
def run(self):
while True:
try:
function, args, kwargs = self.q.get(timeout=self.timeout)
function(*args, **kwargs)
except queue.Empty:
self.idle()
def idle(self):
# put the code you would have put in the `run` loop here
# This is the top level function called by other objects
def reboot(self):
self.__onthread(self.__reboot)
def __reboot(self):
if not hasattr(self, "_down"):
self._down = threading.Thread(target=self.__powerDown)
self._down.start()
up = threading.Thread(target=self.__powerUp)
up.start()
def __onThread(self, function, *args, **kwargs):
self.q.put((function, args, kwargs))
def __powerDown(self):
# do something
def __powerUp(self):
if not hasattr(self, "_down"):
return
self._down.join()
# do something
del self._down
All work, except when I create two Server subclasses.
class ServerA(Server):
pass
class ServerB(Server):
pass
Here is the code that instatiats both subclasses, and call the start() and reboot functions
serverA = ServerA(None)
serverB = ServerB(None)
serverA.start()
serverB.start()
serverA.reboot()
serverB.reboot()
I expect serverA.reboot() and serverB.reboot() to happen concurrently, which is what I want, but they DO NOT! serverB.reboot() gets executed after serverA.reboot() is done. That is, if I put print statements, I get
serverA started
serverB started
serverA.reboot() called
serverA.__powerDown called
serverA.__powerUp called
serverB.reboot() called
serverB.__powerDown called
serverB.__powerUp called
I know for a fact that it takes longer for ServerA to reboot, so I expect something like this
serverA started
serverB started
serverA.reboot() called
serverB.reboot() called
serverA.__powerDown called
serverB.__powerDown called
serverB.__powerUp called
serverA.__powerUp called
I hope that makes sense. If it does, why aren't my reboot() functions happening simultaneously?
Why are you sending None while you are expecting a queue object in the first place ? This causes an exception which complains that None type object doesn't have a get method. Besides that the exception you want to be handled in the run method is Queue.Empty and not queue.Empty.
Here is the revised code and its output on my machine:
import threading
import Queue
class Server(threading.Thread):
def __init__(self, title, q, loop_time = 1.0/60):
self.title = title
self.q = q
self.timeout = loop_time
super(Server, self).__init__()
def run(self):
print "%s started" % self.title
while True:
try:
function, args, kwargs = self.q.get(timeout=self.timeout)
function(*args, **kwargs)
except Queue.Empty:
# print "empty"
self.idle()
def idle(self):
pass
# put the code you would have put in the `run` loop here
# This is the top level function called by other objects
def reboot(self):
self.__onThread(self.__reboot)
def __reboot(self):
if not hasattr(self, "_down"):
self._down = threading.Thread(target=self.__powerDown)
self._down.start()
up = threading.Thread(target=self.__powerUp)
up.start()
def __onThread(self, function, *args, **kwargs):
self.q.put((function, args, kwargs))
def __powerDown(self):
# do something
print "%s power down" % self.title
pass
def __powerUp(self):
print "%s power up" % self.title
if not hasattr(self, "_down"):
return
self._down.join()
# do something
del self._down
class ServerA(Server):
pass
class ServerB(Server):
pass
def main():
serverA = ServerA("A", Queue.Queue())
serverB = ServerB("B", Queue.Queue())
serverA.start()
serverB.start()
serverA.reboot()
serverB.reboot()
if __name__ == '__main__':
main()
Output:
A started
B started
B power down
A power down
B power up
A power up

Categories

Resources