I have a working script which invokes multiple blocking read methods in different threads and whenever it gets data in the blocking method it puts the data in queue that will be read by task.
import asyncio
import atexit
import concurrent.futures
import threading
def blocking_read(x, loop):
while True:
print(f"blocking_read {x}")
time.sleep(1)
# implementation is wait for data and put into the queue
class Recorder():
def __init__(self):
self.count = 0
def run_reader_threads(self, thread_pool, event):
loop = asyncio.get_running_loop()
threads = []
for rx in range(2):
threads.append(loop.run_in_executor(thread_pool, blocking_read, rx, loop))
return threads
def wiatforever():
while True:
time.sleep(1)
reader_futures = []
executor = concurrent.futures.ThreadPoolExecutor()
event = threading.Event()
def main():
async def doit():
recorder_app = Recorder()
global reader_futures
reader_futures = recorder_app.run_reader_threads(executor, event)
#reader_handlers = recorder_app.create_handlers()
await wiatforever()
try:
print("RUN DO IT")
asyncio.run(doit())
except KeyboardInterrupt:
# cancel all futures but geeting exception on cancelling 1st future
reader_futures[0].cancel()
pass
#--------------------------------------------------------------------------------------------
if __name__ == '__main__':
main()
The problem is whenever I want to stop the script gracefully by cancelling all futures I am getting exception that RuntimeError: Event loop is closed
OUTPUT
RUN DO IT
blocking_read 0
blocking_read 1
blocking_read 1
blocking_read 0
blocking_read 0
blocking_read 1
^CTraceback (most recent call last):
File "/home/devuser/Desktop/rnr_crash/2trial.py", line 45, in main
asyncio.run(doit())
File "/usr/local/lib/python3.9/asyncio/runners.py", line 44, in run
return loop.run_until_complete(main)
File "/usr/local/lib/python3.9/asyncio/base_events.py", line 629, in run_until_complete
self.run_forever()
File "/usr/local/lib/python3.9/asyncio/base_events.py", line 596, in run_forever
self._run_once()
File "/usr/local/lib/python3.9/asyncio/base_events.py", line 1890, in _run_once
handle._run()
File "/usr/local/lib/python3.9/asyncio/events.py", line 80, in _run
self._context.run(self._callback, *self._args)
File "/home/devuser/Desktop/rnr_crash/2trial.py", line 41, in doit
await wiatforever()
File "/home/devuser/Desktop/rnr_crash/2trial.py", line 27, in wiatforever
time.sleep(1)
KeyboardInterrupt
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/devuser/Desktop/rnr_crash/2trial.py", line 53, in <module>
main()
File "/home/devuser/Desktop/rnr_crash/2trial.py", line 48, in main
reader_futures[0].cancel()
File "/usr/local/lib/python3.9/asyncio/base_events.py", line 746, in call_soon
self._check_closed()
File "/usr/local/lib/python3.9/asyncio/base_events.py", line 510, in _check_closed
raise RuntimeError('Event loop is closed')
RuntimeError: Event loop is closed
blocking_read 0
blocking_read 1
blocking_read 0
blocking_read 1
blocking_read 0
blocking_read 1
^CException ignored in: <module 'threading' from '/usr/local/lib/python3.9/threading.py'>
Traceback (most recent call last):
File "/usr/local/lib/python3.9/threading.py", line 1411, in _shutdown
atexit_call()
File "/usr/local/lib/python3.9/concurrent/futures/thread.py", line 31, in _python_exit
t.join()
File "/usr/local/lib/python3.9/threading.py", line 1029, in join
self._wait_for_tstate_lock()
File "/usr/local/lib/python3.9/threading.py", line 1045, in _wait_for_tstate_lock
elif lock.acquire(block, timeout):
KeyboardInterrupt:
so can anyone help me to find out the issue here and exit the script correctly without any exception.
Related
my code:
import asyncio
from bleak import BleakClient
address = "C4:DE:E2:19:2B:B2"
MODEL_NBR_UUID = "6e400003-b5a3-f393-e0a9-e50e24dcca9e"
async def main(address):
async with BleakClient(address) as client:
model_number = await client.read_gatt_char(MODEL_NBR_UUID)
print("Model Number: {1}".format("".join(map(chr, model_number))))
asyncio.run(main(address))
CMD:
Traceback (most recent call last):
File "C:\Users\ferna\OneDrive\Documentos\python\codigos\kivymd\Buzzer\conectar.py", line 12, in <module>
asyncio.run(main(address))
File "C:\Users\ferna\AppData\Local\Programs\Python\Python310\lib\asyncio\runners.py", line 44, in run
return loop.run_until_complete(main)
File "C:\Users\ferna\AppData\Local\Programs\Python\Python310\lib\asyncio\base_events.py", line 646, in run_until_complete
return future.result()
File "C:\Users\ferna\OneDrive\Documentos\python\codigos\kivymd\Buzzer\conectar.py", line 9, in main
model_number = await client.read_gatt_char(MODEL_NBR_UUID)
File "C:\Users\ferna\OneDrive\Documentos\python\codigos\kivymd\Buzzer\buzzer\lib\site-packages\bleak\__init__.py", line 482, in read_gatt_char
return await self._backend.read_gatt_char(char_specifier, **kwargs)
File "C:\Users\ferna\OneDrive\Documentos\python\codigos\kivymd\Buzzer\buzzer\lib\site-packages\bleak\backends\winrt\client.py", line 612, in read_gatt_char
_ensure_success(
File "C:\Users\ferna\OneDrive\Documentos\python\codigos\kivymd\Buzzer\buzzer\lib\site-packages\bleak\backends\winrt\client.py", line 107, in _ensure_success
raise BleakError(
bleak.exc.BleakError: Could not read characteristic handle 20: Protocol Error 0x02: Read Not Permitted
#import threading
import multiprocessing
import time
condition = True
finish_state = False
x = 0
all_processes = []
def operation_process(x):
#many operations with x value that if they take a long time will be closed because the other thread will set the boolean to True and close all open threads
time.sleep(20) #for example these operations with x take 20 seconds in this case
def finish_process(finish_state):
finish_state = True
time.sleep(5) #time to send the boolean that terminates all started threads
return finish_state
def main_process(condition, finish_state, x):
while condition == True:
#Create a new thread and fork the code()
#THIS IS THE LINE WHERE I AM SUPPOSED TO BE EXPECTING TO RECEIVE THE RETURN VALUE, AT THE CYCLE NUMBER IN WHICH THE FUNCTION finish_state() TIME HAS FINISHED...
#thr1 = threading.Thread(target=finish_process, args=(finish_state, ))
thr1 = multiprocessing.Thread(target=finish_process, args=(finish_state, ))
#thr2 = threading.Thread(target=operation_process, args=(x, ))
thr2 = multiprocessing.Thread(target=operation_process, args=(x, ))
x = x + 1
print("cycle number: " + str(x))
#Only in the first cycle
if(x == 1):
thr1.start()
thr2.start()
all_processes.append(thr1)
all_processes.append(thr2)
#print(threading.activeCount()) #to list open threads
print(multiprocessing.active_children()) #to list open subprocesses
if(finish_state == True):
print("loop ends!")
for process in all_processes:
process.terminate()
condition = False
main_process(condition, finish_state, x)
print("continue the code with other things after all threads/processes except the main one were closed with the loop that started them... ")
How do I receive the value finish_state = True returned by the function finish_process() that is inside the thread?
The objective is that the main thread calls the function main_process() which contains a loop that simulates a do-while loop, and within that loop a thread is created only during the first cycle of that loop (that is, when the variable x = 1 ), and the idea is that the do-while loop repeats until the thread returns the value finish_state = True and the main thread is closed, because the variable finish_state is not False.
But the problem is that I need `` to run only on the first loop because otherwise a new thread will be created on each iteration, but at the same time I would have to be waiting for that thread to return me at some point the finish_state = True
I decided to change threads to subprocesses because it's easier to kill them because even though threads share variables I didn't find a way to kill them selectively so use terminate() method to kill processes.
I'm not really sure if it's the best way to do it, since anyway I need to somehow make the signal that is the variable finish_state get to the process or main thread, so that it decides if all the threads should be closed, processes or threads, minus itself.
Edit: When I try with the code of Frank Yellin, this give me that error in console:
Traceback (most recent call last):
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "<string>", line 1, in <module>
File "C:\Users\Maty0\Anaconda3\lib\multiprocessing\spawn.py", line 116, in spawn_main
File "C:\Users\Maty0\Anaconda3\lib\multiprocessing\spawn.py", line 116, in spawn_main
exitcode = _main(fd, parent_sentinel)
exitcode = _main(fd, parent_sentinel)
File "C:\Users\Maty0\Anaconda3\lib\multiprocessing\spawn.py", line 125, in _main
File "C:\Users\Maty0\Anaconda3\lib\multiprocessing\spawn.py", line 125, in _main
prepare(preparation_data)
prepare(preparation_data)
File "C:\Users\Maty0\Anaconda3\lib\multiprocessing\spawn.py", line 236, in prepare
File "C:\Users\Maty0\Anaconda3\lib\multiprocessing\spawn.py", line 236, in prepare
_fixup_main_from_path(data['init_main_from_path'])
_fixup_main_from_path(data['init_main_from_path'])
File "C:\Users\Maty0\Anaconda3\lib\multiprocessing\spawn.py", line 287, in _fixup_main_from_path
File "C:\Users\Maty0\Anaconda3\lib\multiprocessing\spawn.py", line 287, in _fixup_main_from_path
main_content = runpy.run_path(main_path,
main_content = runpy.run_path(main_path,
File "C:\Users\Maty0\Anaconda3\lib\runpy.py", line 265, in run_path
File "C:\Users\Maty0\Anaconda3\lib\runpy.py", line 265, in run_path
return _run_module_code(code, init_globals, run_name,
return _run_module_code(code, init_globals, run_name,
File "C:\Users\Maty0\Anaconda3\lib\runpy.py", line 97, in _run_module_code
File "C:\Users\Maty0\Anaconda3\lib\runpy.py", line 97, in _run_module_code
_run_code(code, mod_globals, init_globals,
_run_code(code, mod_globals, init_globals,
File "C:\Users\Maty0\Anaconda3\lib\runpy.py", line 87, in _run_code
File "C:\Users\Maty0\Anaconda3\lib\runpy.py", line 87, in _run_code
exec(code, run_globals)
exec(code, run_globals)
File "C:\Users\Maty0\Desktop\P\Python\Threads\thread_way.py", line 36, in <module>
File "C:\Users\Maty0\Desktop\P\Python\Threads\thread_way.py", line 36, in <module>
main_process(finish_state)
main_process(finish_state)
File "C:\Users\Maty0\Desktop\P\Python\Threads\thread_way.py", line 26, in main_process
File "C:\Users\Maty0\Desktop\P\Python\Threads\thread_way.py", line 26, in main_process
thr1.start()
thr1.start()
File "C:\Users\Maty0\Anaconda3\lib\multiprocessing\process.py", line 121, in start
File "C:\Users\Maty0\Anaconda3\lib\multiprocessing\process.py", line 121, in start
self._popen = self._Popen(self)
self._popen = self._Popen(self)
File "C:\Users\Maty0\Anaconda3\lib\multiprocessing\context.py", line 224, in _Popen
File "C:\Users\Maty0\Anaconda3\lib\multiprocessing\context.py", line 224, in _Popen
return _default_context.get_context().Process._Popen(process_obj)
return _default_context.get_context().Process._Popen(process_obj)
File "C:\Users\Maty0\Anaconda3\lib\multiprocessing\context.py", line 327, in _Popen
File "C:\Users\Maty0\Anaconda3\lib\multiprocessing\context.py", line 327, in _Popen
return Popen(process_obj)
return Popen(process_obj)
File "C:\Users\Maty0\Anaconda3\lib\multiprocessing\popen_spawn_win32.py", line 45, in __init__
File "C:\Users\Maty0\Anaconda3\lib\multiprocessing\popen_spawn_win32.py", line 45, in __init__
prep_data = spawn.get_preparation_data(process_obj._name)
prep_data = spawn.get_preparation_data(process_obj._name)
File "C:\Users\Maty0\Anaconda3\lib\multiprocessing\spawn.py", line 154, in get_preparation_data
File "C:\Users\Maty0\Anaconda3\lib\multiprocessing\spawn.py", line 154, in get_preparation_data
_check_not_importing_main()
_check_not_importing_main()
File "C:\Users\Maty0\Anaconda3\lib\multiprocessing\spawn.py", line 134, in _check_not_importing_main
File "C:\Users\Maty0\Anaconda3\lib\multiprocessing\spawn.py", line 134, in _check_not_importing_main
raise RuntimeError('''
raise RuntimeError('''
RuntimeError:
An attempt has been made to start a new process before the
current process has finished its bootstrapping phase.
This probably means that you are not using fork to start your
child processes and you have forgotten to use the proper idiom
in the main module:
if __name__ == '__main__':
freeze_support()
...
The "freeze_support()" line can be omitted if the program
is not going to be frozen to produce an executable.RuntimeError:
An attempt has been made to start a new process before the
current process has finished its bootstrapping phase.
This probably means that you are not using fork to start your
child processes and you have forgotten to use the proper idiom
in the main module:
if __name__ == '__main__':
freeze_support()
...
The "freeze_support()" line can be omitted if the program
is not going to be frozen to produce an executable.
I think you're going about this completely wrong. I'm also not quite sure why you have global variables that then get passed as arguments to functions, instead of just having local variables.
In any case you're looking for something like this. You need to make finish_state into an event, which can be set by one thread and observed by other threads.
finish_state = multiprocessing.Event()
def operation_process(x):
# many operations with x value that if they take a long time will be closed because the other thread will set the boolean to True and close all open threads
time.sleep(20) # for example these operations with x take 20 seconds in this case
def finish_process(finish_state):
time.sleep(5)
finish_state.set()
def main_process(finish_state):
# Create a new thread and fork the code()
# THIS IS THE LINE WHERE I AM SUPPOSED TO BE EXPECTING TO RECEIVE THE RETURN VALUE, AT THE CYCLE NUMBER IN WHICH THE FUNCTION finish_state() TIME HAS FINISHED...
# thr1 = threading.Thread(target=finish_process, args=(finish_state, ))
thr1 = multiprocessing.Thread(target=finish_process, args=(finish_state,))
thr1.start()
thr2 = multiprocessing.Thread(target=operation_process, args=(0,))
thr2.start()
finish_state.wait()
for process in [thr1, thr2]:
process.terminate()
if __name__ == '__main__':
main_process(finish_state)
If you want a status report every second until finished:
while not finish_state.wait(timeout=1):
# .wait() returns False if this times out:
... print status report ....
I have a simple Flask app set up which runs with the command flask run. I'd like to be able to run this app in a Docker container, which I'm trying to do using a gunicorn server. However, when I try to run using gunicorn I'm seeing error messages. I'm running gunicorn --worker-class eventlet -w 1 app:app which I got from their documentation
I have a simple flask-socketio app:
.
├── app.py
└── templates
└── index.html
Here's the contents of app.py:
import os
import sys
from eventlet import patcher, support
import six
select = patcher.original('select')
time = patcher.original('time')
from eventlet.hubs.hub import BaseHub, READ, WRITE, noop
if getattr(select, 'kqueue', None) is None:
raise ImportError('No kqueue implementation found in select module')
FILTERS = {READ: select.KQ_FILTER_READ,
WRITE: select.KQ_FILTER_WRITE}
class Hub(BaseHub):
MAX_EVENTS = 100
def __init__(self, clock=None):
super(Hub, self).__init__(clock)
self._events = {}
self._init_kqueue()
def _init_kqueue(self):
self.kqueue = select.kqueue()
self._pid = os.getpid()
def _reinit_kqueue(self):
self.kqueue.close()
self._init_kqueue()
kqueue = self.kqueue
events = [e for i in six.itervalues(self._events)
for e in six.itervalues(i)]
kqueue.control(events, 0, 0)
def _control(self, events, max_events, timeout):
try:
return self.kqueue.control(events, max_events, timeout)
except (OSError, IOError):
# have we forked?
if os.getpid() != self._pid:
self._reinit_kqueue()
return self.kqueue.control(events, max_events, timeout)
raise
def add(self, evtype, fileno, cb, tb, mac):
listener = super(Hub, self).add(evtype, fileno, cb, tb, mac)
events = self._events.setdefault(fileno, {})
if evtype not in events:
try:
event = select.kevent(fileno, FILTERS.get(evtype), select.KQ_EV_ADD)
self._control([event], 0, 0)
events[evtype] = event
except ValueError:
super(Hub, self).remove(listener)
raise
return listener
def _delete_events(self, events):
del_events = [
select.kevent(e.ident, e.filter, select.KQ_EV_DELETE)
for e in events
]
self._control(del_events, 0, 0)
def remove(self, listener):
super(Hub, self).remove(listener)
evtype = listener.evtype
fileno = listener.fileno
if not self.listeners[evtype].get(fileno):
event = self._events[fileno].pop(evtype, None)
if event is None:
return
try:
self._delete_events((event,))
except OSError:
pass
def remove_descriptor(self, fileno):
super(Hub, self).remove_descriptor(fileno)
try:
events = self._events.pop(fileno).values()
self._delete_events(events)
except KeyError:
pass
except OSError:
pass
def wait(self, seconds=None):
readers = self.listeners[READ]
writers = self.listeners[WRITE]
if not readers and not writers:
if seconds:
time.sleep(seconds)
return
result = self._control([], self.MAX_EVENTS, seconds)
SYSTEM_EXCEPTIONS = self.SYSTEM_EXCEPTIONS
for event in result:
fileno = event.ident
evfilt = event.filter
try:
if evfilt == FILTERS[READ]:
readers.get(fileno, noop).cb(fileno)
if evfilt == FILTERS[WRITE]:
writers.get(fileno, noop).cb(fileno)
except SYSTEM_EXCEPTIONS:
raise
except:
self.squelch_exception(fileno, sys.exc_info())
support.clear_sys_exc_info()
Here are the errors I'm seeing when running the gunicorn command:
Exception ignored in: <function _after_fork at 0x1121fc1f0>
Traceback (most recent call last):
File "/usr/local/Cellar/python#3.9/3.9.6/Frameworks/Python.framework/Versions/3.9/lib/python3.9/threading.py", line 1510, in _after_fork
thread._reset_internal_locks(True)
File "/usr/local/Cellar/python#3.9/3.9.6/Frameworks/Python.framework/Versions/3.9/lib/python3.9/threading.py", line 845, in _reset_internal_locks
self._started._at_fork_reinit()
File "/usr/local/Cellar/python#3.9/3.9.6/Frameworks/Python.framework/Versions/3.9/lib/python3.9/threading.py", line 527, in _at_fork_reinit
self._cond._at_fork_reinit()
File "/usr/local/Cellar/python#3.9/3.9.6/Frameworks/Python.framework/Versions/3.9/lib/python3.9/threading.py", line 253, in _at_fork_reinit
self._lock._at_fork_reinit()
AttributeError: 'Semaphore' object has no attribute '_at_fork_reinit'
[2022-01-31 20:57:29 +0000] [51963] [INFO] Booting worker with pid: 51963
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/eventlet/hubs/hub.py", line 460, in fire_timers
timer()
File "/usr/local/lib/python3.9/site-packages/eventlet/hubs/timer.py", line 59, in __call__
cb(*args, **kw)
File "/usr/local/lib/python3.9/site-packages/eventlet/greenthread.py", line 219, in main
result = function(*args, **kwargs)
File "/usr/local/lib/python3.9/site-packages/gunicorn/workers/geventlet.py", line 78, in _eventlet_serve
conn, addr = sock.accept()
File "/usr/local/lib/python3.9/site-packages/eventlet/greenio/base.py", line 228, in accept
self._trampoline(fd, read=True, timeout=self.gettimeout(), timeout_exc=_timeout_exc)
File "/usr/local/lib/python3.9/site-packages/eventlet/greenio/base.py", line 206, in _trampoline
return trampoline(fd, read=read, write=write, timeout=timeout,
File "/usr/local/lib/python3.9/site-packages/eventlet/hubs/__init__.py", line 160, in trampoline
listener = hub.add(hub.READ, fileno, current.switch, current.throw, mark_as_closed)
File "/usr/local/lib/python3.9/site-packages/eventlet/hubs/kqueue.py", line 55, in add
self._control([event], 0, 0)
File "/usr/local/lib/python3.9/site-packages/eventlet/hubs/kqueue.py", line 41, in _control
return self.kqueue.control(events, max_events, timeout)
OSError: [Errno 9] Bad file descriptor
[2022-01-31 20:57:29 +0000] [51963] [ERROR] Exception in worker process
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/gunicorn/arbiter.py", line 589, in spawn_worker
worker.init_process()
File "/usr/local/lib/python3.9/site-packages/gunicorn/workers/geventlet.py", line 134, in init_process
super().init_process()
File "/usr/local/lib/python3.9/site-packages/gunicorn/workers/base.py", line 142, in init_process
self.run()
File "/usr/local/lib/python3.9/site-packages/gunicorn/workers/geventlet.py", line 166, in run
eventlet.sleep(1.0)
File "/usr/local/lib/python3.9/site-packages/eventlet/greenthread.py", line 36, in sleep
hub.switch()
File "/usr/local/lib/python3.9/site-packages/eventlet/hubs/hub.py", line 297, in switch
return self.greenlet.switch()
File "/usr/local/lib/python3.9/site-packages/eventlet/hubs/hub.py", line 349, in run
self.wait(sleep_time)
File "/usr/local/lib/python3.9/site-packages/eventlet/hubs/kqueue.py", line 100, in wait
result = self._control([], self.MAX_EVENTS, seconds)
File "/usr/local/lib/python3.9/site-packages/eventlet/hubs/kqueue.py", line 41, in _control
return self.kqueue.control(events, max_events, timeout)
I'm not sure if I'm using the wrong command or if there is some other issue preventing me from using gunicorn?
I have an issue in Python 3.7.3 where my multiprocessing operation (using Queue, Pool, and apply_async) deadlocks when handling large computational tasks.
For small computations, this multiprocessing task works just fine. However, when dealing with larger processes, the multiprocessing task stops, or deadlocks, altogether without exiting the process! I read that this will happen if you "grow your queue without bounds, and you are joining up to a subprocess that is waiting for room in the queue [...] your main process is stalled waiting for that one to complete, and it never will." (Process.join() and queue don't work with large numbers)
I am having trouble converting this concept into code. I would greatly appreciate guidance on refactoring the code I have written below:
import multiprocessing as mp
def listener(q, d): # task to queue information into a manager dictionary
while True:
item_to_write = q.get()
if item_to_write == 'kill':
break
foo = d['region']
foo.add(item_to_write)
d['region'] = foo # add items and set to manager dictionary
def main():
manager = mp.Manager()
q = manager.Queue()
d = manager.dict()
d['region'] = set()
pool = mp.Pool(mp.cpu_count() + 2)
watcher = pool.apply_async(listener, (q, d))
jobs = []
for i in range(24):
job = pool.apply_async(execute_search, (q, d)) # task for multiprocessing
jobs.append(job)
for job in jobs:
job.get() # begin multiprocessing task
q.put('kill') # kill multiprocessing task (view listener function)
pool.close()
pool.join()
print('process complete')
if __name__ == '__main__':
main()
Ultimately, I would like to prevent deadlocking altogether to facilitate a multiprocessing task that could operate indefinitely until completion.
BELOW IS THE TRACEBACK WHEN EXITING DEADLOCK IN BASH
^CTraceback (most recent call last):
File "multithread_search_cl_gamma.py", line 260, in <module>
main(GEOTAG)
File "multithread_search_cl_gamma.py", line 248, in main
job.get()
File "/Users/Ira/anaconda3/lib/python3.7/multiprocessing/pool.py", line 651, in get
Process ForkPoolWorker-28:
Process ForkPoolWorker-31:
Process ForkPoolWorker-30:
Process ForkPoolWorker-27:
Process ForkPoolWorker-29:
Process ForkPoolWorker-26:
self.wait(timeout)
File "/Users/Ira/anaconda3/lib/python3.7/multiprocessing/pool.py", line 648, in wait
Traceback (most recent call last):
Traceback (most recent call last):
Traceback (most recent call last):
File "/Users/Ira/anaconda3/lib/python3.7/multiprocessing/process.py", line 297, in _bootstrap
self.run()
File "/Users/Ira/anaconda3/lib/python3.7/multiprocessing/process.py", line 297, in _bootstrap
self.run()
File "/Users/Ira/anaconda3/lib/python3.7/multiprocessing/process.py", line 99, in run
self._target(*self._args, **self._kwargs)
File "/Users/Ira/anaconda3/lib/python3.7/multiprocessing/process.py", line 99, in run
self._target(*self._args, **self._kwargs)
File "/Users/Ira/anaconda3/lib/python3.7/multiprocessing/pool.py", line 110, in worker
task = get()
File "/Users/Ira/anaconda3/lib/python3.7/multiprocessing/pool.py", line 110, in worker
task = get()
File "/Users/Ira/anaconda3/lib/python3.7/multiprocessing/queues.py", line 351, in get
with self._rlock:
File "/Users/Ira/anaconda3/lib/python3.7/multiprocessing/queues.py", line 351, in get
self._event.wait(timeout)
File "/Users/Ira/anaconda3/lib/python3.7/threading.py", line 552, in wait
Traceback (most recent call last):
Traceback (most recent call last):
File "/Users/Ira/anaconda3/lib/python3.7/multiprocessing/process.py", line 297, in _bootstrap
self.run()
File "/Users/Ira/anaconda3/lib/python3.7/multiprocessing/process.py", line 99, in run
self._target(*self._args, **self._kwargs)
File "/Users/Ira/anaconda3/lib/python3.7/multiprocessing/pool.py", line 110, in worker
task = get()
File "/Users/Ira/anaconda3/lib/python3.7/multiprocessing/queues.py", line 352, in get
res = self._reader.recv_bytes()
File "/Users/Ira/anaconda3/lib/python3.7/multiprocessing/connection.py", line 216, in recv_bytes
buf = self._recv_bytes(maxlength)
File "/Users/Ira/anaconda3/lib/python3.7/multiprocessing/connection.py", line 407, in _recv_bytes
buf = self._recv(4)
File "/Users/Ira/anaconda3/lib/python3.7/multiprocessing/connection.py", line 379, in _recv
chunk = read(handle, remaining)
File "/Users/Ira/anaconda3/lib/python3.7/multiprocessing/process.py", line 297, in _bootstrap
self.run()
File "/Users/Ira/anaconda3/lib/python3.7/multiprocessing/process.py", line 99, in run
self._target(*self._args, **self._kwargs)
File "/Users/Ira/anaconda3/lib/python3.7/multiprocessing/pool.py", line 110, in worker
task = get()
File "/Users/Ira/anaconda3/lib/python3.7/multiprocessing/queues.py", line 351, in get
with self._rlock:
File "/Users/Ira/anaconda3/lib/python3.7/multiprocessing/synchronize.py", line 95, in __enter__
return self._semlock.__enter__()
KeyboardInterrupt
KeyboardInterrupt
signaled = self._cond.wait(timeout)
File "/Users/Ira/anaconda3/lib/python3.7/threading.py", line 296, in wait
waiter.acquire()
KeyboardInterrupt
with self._rlock:
File "/Users/Ira/anaconda3/lib/python3.7/multiprocessing/synchronize.py", line 95, in __enter__
return self._semlock.__enter__()
KeyboardInterrupt
Traceback (most recent call last):
File "/Users/Ira/anaconda3/lib/python3.7/multiprocessing/process.py", line 297, in _bootstrap
self.run()
File "/Users/Ira/anaconda3/lib/python3.7/multiprocessing/process.py", line 99, in run
self._target(*self._args, **self._kwargs)
File "/Users/Ira/anaconda3/lib/python3.7/multiprocessing/pool.py", line 110, in worker
task = get()
File "/Users/Ira/anaconda3/lib/python3.7/multiprocessing/queues.py", line 351, in get
with self._rlock:
File "/Users/Ira/anaconda3/lib/python3.7/multiprocessing/synchronize.py", line 95, in __enter__
return self._semlock.__enter__()
KeyboardInterrupt
File "/Users/Ira/anaconda3/lib/python3.7/multiprocessing/synchronize.py", line 95, in __enter__
return self._semlock.__enter__()
KeyboardInterrupt
File "/Users/Ira/anaconda3/lib/python3.7/multiprocessing/process.py", line 297, in _bootstrap
self.run()
File "/Users/Ira/anaconda3/lib/python3.7/multiprocessing/process.py", line 99, in run
self._target(*self._args, **self._kwargs)
File "/Users/Ira/anaconda3/lib/python3.7/multiprocessing/pool.py", line 110, in worker
task = get()
File "/Users/Ira/anaconda3/lib/python3.7/multiprocessing/queues.py", line 351, in get
with self._rlock:
File "/Users/Ira/anaconda3/lib/python3.7/multiprocessing/synchronize.py", line 95, in __enter__
return self._semlock.__enter__()
KeyboardInterrupt
Below is the updated script:
import multiprocessing as mp
import queue
def listener(q, d, stop_event):
while not stop_event.is_set():
try:
while True:
item_to_write = q.get(False)
if item_to_write == 'kill':
break
foo = d['region']
foo.add(item_to_write)
d['region'] = foo
except queue.Empty:
pass
time.sleep(0.5)
if not q.empty():
continue
def main():
manager = mp.Manager()
stop_event = manager.Event()
q = manager.Queue()
d = manager.dict()
d['region'] = set()
pool = mp.get_context("spawn").Pool(mp.cpu_count() + 2)
watcher = pool.apply_async(listener, (q, d, stop_event))
stop_event.set()
jobs = []
for i in range(24):
job = pool.apply_async(execute_search, (q, d))
jobs.append(job)
for job in jobs:
job.get()
q.put('kill')
pool.close()
pool.join()
print('process complete')
if __name__ == '__main__':
main()
UPDATE::
execute_command executes several processes necessary for search, so I put in code for where q.put() lies.
Alone, the script will take > 72 hrs to finish. Each multiprocess never completes the entire task, rather they work individually and reference a manager.dict() to avoid repeating tasks. These tasks work until every tuple in the manager.dict() has been processed.
def area(self, tup, housing_dict, q):
state, reg, sub_reg = tup[0], tup[1], tup[2]
for cat in housing_dict:
"""
computationally expensive, takes > 72 hours
for a list of 512 tup(s)
"""
result = self.search_geotag(
state, reg, cat, area=sub_reg
)
q.put(tup)
The q.put(tup) is ultimately placed in the listener function to add tup to the manager.dict()
Since listener and execute_search are sharing the same queue object, there could be race,
where execute_search gets 'kill' from queue before listener does, thus listener will stuck in blocking get() forever, since there are no more new items.
For that case you can use Event object to signal all processes to stop:
import multiprocessing as mp
import queue
def listener(q, d, stop_event):
while not stop_event.is_set():
try:
item_to_write = q.get(timeout=0.1)
foo = d['region']
foo.add(item_to_write)
d['region'] = foo
except queue.Empty:
pass
print("Listener process stopped")
def main():
manager = mp.Manager()
stop_event = manager.Event()
q = manager.Queue()
d = manager.dict()
d['region'] = set()
pool = mp.get_context("spawn").Pool(mp.cpu_count() + 2)
watcher = pool.apply_async(listener, (q, d, stop_event))
stop_event.set()
jobs = []
for i in range(24):
job = pool.apply_async(execute_search, (q, d))
jobs.append(job)
try:
for job in jobs:
job.get(300) #get the result or throws a timeout exception after 300 seconds
except multiprocessing.TimeoutError:
pool.terminate()
stop_event.set() # stop listener process
print('process complete')
if __name__ == '__main__':
main()
I want to start a new process after main tkinter window is loaded.
I do it like This:
if __name__ == "__main__":
app = MainFrame()
print(("App loaded"))
canvasWindow = app.getCurrentTopFrame().start()
app.mainloop()
print("Window change")
after tkinter init i call function to start new thread
def start(self):
print("Before logic thread init")
lock = mp.Lock()
pro = mp.Process(target = self.manager.startTraining, args = (deley,lock))
pro.start()
This is startTrening fun
def startTraining(self,lock):
"""Starts training sequence"""
print("start tra")
for x in range(0,self.numOfGenerations):
print("x")
self.population.nextGeneration()
print("Iteration num ",x,"Fitness of best one is
",self.population.bestSalesman)
lock.acquire()
...........self.canvas.updateFrame(self.population.bestSalesman.dna.getAsListOfTuple())
lock.release()
This is updateFrame fun
def updateFrame(self,listOfPoints):
self.canvas.delete("all")
for y in listOfPoints:
self.canvas.create_oval(y[0], y[1], y[0]+5, y[1]+5, fill="Black")
li = cycle(listOfPoints)
p2 = next(li)
for x in listOfPoints:
p1,p2 = p2, next(li)
self.canvas.create_line(p1[0],p1[1],p2[0]+2,p2[1]+2)
if p2 == listOfPoints[-1]:
self.canvas.create_line(p2[0],p2[1],listOfPoints[0]+2,listOfPoints[0][1]+2)
self.canvas.pack()
I dont get why but behavior is such that main window does load and the
error occure
After init
After tkrasie
after start
App loaded
Before logic thread init
Traceback (most recent call last):
File "C:\Users\CrazyUrusai\git\WarsawSchoolOfAI\GeneticsAlgorithms\MainFrame.py", line 129, in <module>
canvasWindow = app.getCurrentTopFrame().start()
File "C:\Users\CrazyUrusai\git\WarsawSchoolOfAI\GeneticsAlgorithms\MainFrame.py", line 111, in start
pro.start()
File "C:\Users\CrazyUrusai\AppData\Local\Programs\Python\Python36\lib\multiprocessing\process.py", line 105, in start
self._popen = self._Popen(self)
File "C:\Users\CrazyUrusai\AppData\Local\Programs\Python\Python36\lib\multiprocessing\context.py", line 223, in _Popen
return _default_context.get_context().Process._Popen(process_obj)
File "C:\Users\CrazyUrusai\AppData\Local\Programs\Python\Python36\lib\multiprocessing\context.py", line 322, in _Popen
return Popen(process_obj)
File "C:\Users\CrazyUrusai\AppData\Local\Programs\Python\Python36\lib\multiprocessing\popen_spawn_win32.py", line 65, in __init__
reduction.dump(process_obj, to_child)
File "C:\Users\CrazyUrusai\AppData\Local\Programs\Python\Python36\lib\multiprocessing\reduction.py", line 60, in dump
ForkingPickler(file, protocol).dump(obj)
TypeError: can't pickle _tkinter.tkapp objects
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "C:\Users\CrazyUrusai\AppData\Local\Programs\Python\Python36\lib\multiprocessing\spawn.py", line 105, in spawn_main
exitcode = _main(fd)
File "C:\Users\CrazyUrusai\AppData\Local\Programs\Python\Python36\lib\multiprocessing\spawn.py", line 115, in _main
self = reduction.pickle.load(from_parent)
EOFError: Ran out of input