im running my function with multiprocessing implementation
def assign_task(self, module, command):
logging.debug("Assigning task for {0}".format(command._get_module_id()))
if self.queue is None:
self.queue = JoinableQueue()
if self.processes is None:
self.processes = [Process(target=self.do_execute) for i in range(self.max_process)]
for process in self.processes:
process.daemon = True
process.start()
logging.debug("Queuing message {0}".format(command._get_module_id()))
self.queue.put((module, command))
def do_execute(self):
# the code
but getting this error
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "C:\Users\muhammad.aqshol\AppData\Local\Programs\Python\Python38\lib\multiprocessing\spawn.py", line 107, in spawn_main
new_handle = reduction.duplicate(pipe_handle,
File "C:\Users\muhammad.aqshol\AppData\Local\Programs\Python\Python38\lib\multiprocessing\reduction.py", line 79, in duplicate
return _winapi.DuplicateHandle(
OSError: [WinError 6] The handle is invalid
Am i wrong on the implementation or something missing?
Related
I want to run parallel processing, using the class but the code gives this error :
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "C:\Users\MonPc\AppData\Local\Programs\Python\Python37\lib\multiprocessing\spawn.py", line 105, in spawn_main
exitcode = _main(fd)
File "C:\Users\MonPc\AppData\Local\Programs\Python\Python37\lib\multiprocessing\spawn.py", line 115, in _main
self = reduction.pickle.load(from_parent)
EOFError: Ran out of input
This is just similar to the original code that I want to work on.
MyCode :
from multiprocessing import Process
class class1:
def __init__(self):
super().__init__()
txt = "Rachid"
p1 = class2(txt)
p1.start()
p1.join()
class class2(Process):
def __init__(self, txt):
Process.__init__(self)
self.txt = txt
def run (self):
print("*"*10)
print(self.txt)
print("*"*10)
class1()
How can I avoid getting this error ?
I have a simple Flask app set up which runs with the command flask run. I'd like to be able to run this app in a Docker container, which I'm trying to do using a gunicorn server. However, when I try to run using gunicorn I'm seeing error messages. I'm running gunicorn --worker-class eventlet -w 1 app:app which I got from their documentation
I have a simple flask-socketio app:
.
├── app.py
└── templates
└── index.html
Here's the contents of app.py:
import os
import sys
from eventlet import patcher, support
import six
select = patcher.original('select')
time = patcher.original('time')
from eventlet.hubs.hub import BaseHub, READ, WRITE, noop
if getattr(select, 'kqueue', None) is None:
raise ImportError('No kqueue implementation found in select module')
FILTERS = {READ: select.KQ_FILTER_READ,
WRITE: select.KQ_FILTER_WRITE}
class Hub(BaseHub):
MAX_EVENTS = 100
def __init__(self, clock=None):
super(Hub, self).__init__(clock)
self._events = {}
self._init_kqueue()
def _init_kqueue(self):
self.kqueue = select.kqueue()
self._pid = os.getpid()
def _reinit_kqueue(self):
self.kqueue.close()
self._init_kqueue()
kqueue = self.kqueue
events = [e for i in six.itervalues(self._events)
for e in six.itervalues(i)]
kqueue.control(events, 0, 0)
def _control(self, events, max_events, timeout):
try:
return self.kqueue.control(events, max_events, timeout)
except (OSError, IOError):
# have we forked?
if os.getpid() != self._pid:
self._reinit_kqueue()
return self.kqueue.control(events, max_events, timeout)
raise
def add(self, evtype, fileno, cb, tb, mac):
listener = super(Hub, self).add(evtype, fileno, cb, tb, mac)
events = self._events.setdefault(fileno, {})
if evtype not in events:
try:
event = select.kevent(fileno, FILTERS.get(evtype), select.KQ_EV_ADD)
self._control([event], 0, 0)
events[evtype] = event
except ValueError:
super(Hub, self).remove(listener)
raise
return listener
def _delete_events(self, events):
del_events = [
select.kevent(e.ident, e.filter, select.KQ_EV_DELETE)
for e in events
]
self._control(del_events, 0, 0)
def remove(self, listener):
super(Hub, self).remove(listener)
evtype = listener.evtype
fileno = listener.fileno
if not self.listeners[evtype].get(fileno):
event = self._events[fileno].pop(evtype, None)
if event is None:
return
try:
self._delete_events((event,))
except OSError:
pass
def remove_descriptor(self, fileno):
super(Hub, self).remove_descriptor(fileno)
try:
events = self._events.pop(fileno).values()
self._delete_events(events)
except KeyError:
pass
except OSError:
pass
def wait(self, seconds=None):
readers = self.listeners[READ]
writers = self.listeners[WRITE]
if not readers and not writers:
if seconds:
time.sleep(seconds)
return
result = self._control([], self.MAX_EVENTS, seconds)
SYSTEM_EXCEPTIONS = self.SYSTEM_EXCEPTIONS
for event in result:
fileno = event.ident
evfilt = event.filter
try:
if evfilt == FILTERS[READ]:
readers.get(fileno, noop).cb(fileno)
if evfilt == FILTERS[WRITE]:
writers.get(fileno, noop).cb(fileno)
except SYSTEM_EXCEPTIONS:
raise
except:
self.squelch_exception(fileno, sys.exc_info())
support.clear_sys_exc_info()
Here are the errors I'm seeing when running the gunicorn command:
Exception ignored in: <function _after_fork at 0x1121fc1f0>
Traceback (most recent call last):
File "/usr/local/Cellar/python#3.9/3.9.6/Frameworks/Python.framework/Versions/3.9/lib/python3.9/threading.py", line 1510, in _after_fork
thread._reset_internal_locks(True)
File "/usr/local/Cellar/python#3.9/3.9.6/Frameworks/Python.framework/Versions/3.9/lib/python3.9/threading.py", line 845, in _reset_internal_locks
self._started._at_fork_reinit()
File "/usr/local/Cellar/python#3.9/3.9.6/Frameworks/Python.framework/Versions/3.9/lib/python3.9/threading.py", line 527, in _at_fork_reinit
self._cond._at_fork_reinit()
File "/usr/local/Cellar/python#3.9/3.9.6/Frameworks/Python.framework/Versions/3.9/lib/python3.9/threading.py", line 253, in _at_fork_reinit
self._lock._at_fork_reinit()
AttributeError: 'Semaphore' object has no attribute '_at_fork_reinit'
[2022-01-31 20:57:29 +0000] [51963] [INFO] Booting worker with pid: 51963
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/eventlet/hubs/hub.py", line 460, in fire_timers
timer()
File "/usr/local/lib/python3.9/site-packages/eventlet/hubs/timer.py", line 59, in __call__
cb(*args, **kw)
File "/usr/local/lib/python3.9/site-packages/eventlet/greenthread.py", line 219, in main
result = function(*args, **kwargs)
File "/usr/local/lib/python3.9/site-packages/gunicorn/workers/geventlet.py", line 78, in _eventlet_serve
conn, addr = sock.accept()
File "/usr/local/lib/python3.9/site-packages/eventlet/greenio/base.py", line 228, in accept
self._trampoline(fd, read=True, timeout=self.gettimeout(), timeout_exc=_timeout_exc)
File "/usr/local/lib/python3.9/site-packages/eventlet/greenio/base.py", line 206, in _trampoline
return trampoline(fd, read=read, write=write, timeout=timeout,
File "/usr/local/lib/python3.9/site-packages/eventlet/hubs/__init__.py", line 160, in trampoline
listener = hub.add(hub.READ, fileno, current.switch, current.throw, mark_as_closed)
File "/usr/local/lib/python3.9/site-packages/eventlet/hubs/kqueue.py", line 55, in add
self._control([event], 0, 0)
File "/usr/local/lib/python3.9/site-packages/eventlet/hubs/kqueue.py", line 41, in _control
return self.kqueue.control(events, max_events, timeout)
OSError: [Errno 9] Bad file descriptor
[2022-01-31 20:57:29 +0000] [51963] [ERROR] Exception in worker process
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/gunicorn/arbiter.py", line 589, in spawn_worker
worker.init_process()
File "/usr/local/lib/python3.9/site-packages/gunicorn/workers/geventlet.py", line 134, in init_process
super().init_process()
File "/usr/local/lib/python3.9/site-packages/gunicorn/workers/base.py", line 142, in init_process
self.run()
File "/usr/local/lib/python3.9/site-packages/gunicorn/workers/geventlet.py", line 166, in run
eventlet.sleep(1.0)
File "/usr/local/lib/python3.9/site-packages/eventlet/greenthread.py", line 36, in sleep
hub.switch()
File "/usr/local/lib/python3.9/site-packages/eventlet/hubs/hub.py", line 297, in switch
return self.greenlet.switch()
File "/usr/local/lib/python3.9/site-packages/eventlet/hubs/hub.py", line 349, in run
self.wait(sleep_time)
File "/usr/local/lib/python3.9/site-packages/eventlet/hubs/kqueue.py", line 100, in wait
result = self._control([], self.MAX_EVENTS, seconds)
File "/usr/local/lib/python3.9/site-packages/eventlet/hubs/kqueue.py", line 41, in _control
return self.kqueue.control(events, max_events, timeout)
I'm not sure if I'm using the wrong command or if there is some other issue preventing me from using gunicorn?
I want to configure the Multiple gpu environment using ‘torch.multiprocessing’ and ‘torch.distributed’. However, I received the following error message.
Traceback (most recent call last):
File "train_custom.py", line 398, in <module>
mp.spawn(init_process, args=(world_size, backend), nprocs=world_size, join=True)
File "/usr/local/lib/python3.8/dist-packages/torch/multiprocessing/spawn.py", line 230, in spawn
return start_processes(fn, args, nprocs, join, daemon, start_method='spawn')
File "/usr/local/lib/python3.8/dist-packages/torch/multiprocessing/spawn.py", line 188, in start_processes
while not context.join():
File "/usr/local/lib/python3.8/dist-packages/torch/multiprocessing/spawn.py", line 150, in join
raise ProcessRaisedException(msg, error_index, failed_process.pid)
torch.multiprocessing.spawn.ProcessRaisedException:
-- Process 0 terminated with the following error:
Traceback (most recent call last):
File "/usr/local/lib/python3.8/dist-packages/torch/multiprocessing/spawn.py", line 59, in _wrap
fn(i, *args)
File "/root/USRGAN_step2/train_custom.py", line 390, in init_process
fn(rank, size2)
TypeError: 'str' object is not callable
My code is as follows.
def run(rank, size2):
...
def init_process(rank, size2, fn, backend='gloo'):
""" Initialize the distributed environment. """
os.environ['MASTER_ADDR'] = '127.0.0.1'
os.environ['MASTER_PORT'] = '29500'
dist.init_process_group(backend, rank=rank, world_size=torch.cuda.device_count())
fn(rank, size2)
###################################
if __name__ == "__main__":
world_size = torch.cuda.device_count()
backend = 'gloo'
mp.spawn(init_process, args=(world_size, backend), nprocs=world_size, join=True)
#mp.set_start_method('spawn', force=True)
processes = []
size2 = 4
for rank in range(size2):
p = Process(target=init_process, args=(rank, size2, run))
p.start()
processes.append(p)
for p in processes:
p.join()
I followed the tutorial(https://pytorch.org/tutorials/intermediate/dist_tuto.html) as it was, but this error occurred. And I don't find the cause of error.
I'm wrting a program that spawns a process and restarts the process on certain conditions. For example, if a child process doesn't send data anymore to the mother process, for a certain period of time, I want the mother process to terminate the child process and restart it. I thought I could use a thread to recieve data from a child process and restart the child process, but it doesn't work the way I thought.
import numpy as np
import multiprocessing as mp
import threading
import time
from apscheduler.schedulers.background import BackgroundScheduler
pipe_in, pipe_out = mp.Pipe()
class Mother():
def __init__(self):
self.pipe_out = pipe_out
self.proc = mp.Process(target = self.test_func, args=(pipe_in, ))
self.proc.start()
self.thread = threading.Thread(target=self.thread_reciever, args=(self.pipe_out, ))
self.thread.start()
def thread_reciever(self, pipe_out):
while True:
value = pipe_out.recv()
print(value)
if value == 5:
self.proc.terminate()
time.sleep(2)
self.proc = mp.Process(target = self.test_func)
self.proc.start()
def test_func(self, pipe_in):
for i in range(10):
pipe_in.send(i)
time.sleep(1)
if __name__ == '__main__':
r = Mother()
It prints out this error.
D:\>d:\python36-32\python.exe temp06.py
0
1
2
3
4
5
Exception in thread Thread-1:
Traceback (most recent call last):
File "d:\python36-32\lib\threading.py", line 916, in _bootstrap_inner
self.run()
File "d:\python36-32\lib\threading.py", line 864, in run
self._target(*self._args, **self._kwargs)
File "temp06.py", line 28, in thread_reciever
self.proc.start()
File "d:\python36-32\lib\multiprocessing\process.py", line 105, in start
self._popen = self._Popen(self)
File "d:\python36-32\lib\multiprocessing\context.py", line 223, in _Popen
return _default_context.get_context().Process._Popen(process_obj)
File "d:\python36-32\lib\multiprocessing\context.py", line 322, in _Popen
return Popen(process_obj)
File "d:\python36-32\lib\multiprocessing\popen_spawn_win32.py", line 65, in __init__
reduction.dump(process_obj, to_child)
File "d:\python36-32\lib\multiprocessing\reduction.py", line 60, in dump
ForkingPickler(file, protocol).dump(obj)
TypeError: can't pickle _thread.lock objects
D:\>Traceback (most recent call last):
File "<string>", line 1, in <module>
File "d:\python36-32\lib\multiprocessing\spawn.py", line 99, in spawn_main
new_handle = reduction.steal_handle(parent_pid, pipe_handle)
File "d:\python36-32\lib\multiprocessing\reduction.py", line 82, in steal_handle
_winapi.PROCESS_DUP_HANDLE, False, source_pid)
OSError: [WinError 87]
How could I start and terminate a process inside a thread? (I'm using a thread because it can synchronously recieve data from a different process) Or are there any other ways to do this job?
test_func as a global function
import numpy as np
import multiprocessing as mp
import threading
import time
from apscheduler.schedulers.background import BackgroundScheduler
pipe_in, pipe_out = mp.Pipe()
def test_func( pipe_in):
for i in range(10):
pipe_in.send(i)
time.sleep(1)
class Mother():
def __init__(self):
self.pipe_out = pipe_out
mp.freeze_support()
self.proc = mp.Process(target = test_func, args=(pipe_in, ))
self.proc.start()
self.thread = threading.Thread(target=self.thread_reciever, args=(self.pipe_out, ))
self.thread.start()
def thread_reciever(self, pipe_out):
while True:
value = pipe_out.recv()
print(value)
if value == 5:
self.proc.terminate()
time.sleep(2)
mp.freeze_support()
self.proc = mp.Process(target = test_func, args=(pipe_in,))
self.proc.start()
if __name__ == '__main__':
r = Mother()
OUTPUT
D:\> d:\python36-32\python.exe temp06.py
0
1
2
3
4
5
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "d:\python36-32\lib\multiprocessing\spawn.py", line 105, in spawn_main
exitcode = _main(fd)
File "d:\python36-32\lib\multiprocessing\spawn.py", line 115, in _main
self = reduction.pickle.load(from_parent)
AttributeError: Can't get attribute 'test_func' on <module '__main__' (built-in)>
under windows, as there is no fork syscall, python starts a new interpreter instance, use pickle/unpickle to reconstruct execution context, but thread.Lock is not picklable. while pickling self.test_func, self.thread reference to a thread.Lock object, makes it unpicklable.
you could simply change test_func to a plain global function, without thread object reference :
self.proc = mp.Process(target = test_func, args=(pipe_in,))
...
def test_func(pipe_in):
for i in range(10):
pipe_in.send(i)
time.sleep(1)
I cant find an explanation for this behavior in Python 3:
from multiprocessing import Process, cpu_count, freeze_support, Manager
class A:
def __init__(self):
# self._manager = Manager()
# self._list = self._manager.list()
manager = Manager()
self._list = manager.list()
def producer(self):
processes = []
cores = cpu_count()
for i in range(cores):
process = Process(target=self.worker)
process.start()
processes.append(process)
for process in processes:
process.join()
def worker(self):
print('I was called')
if __name__ == '__main__':
freeze_support()
a = A()
a.producer()
With this in __init__ :
self._manager = Manager()
self._list = self._manager.list()
I get an error OSError: handle is closed at the call process.start().
With this in __init__:
manager = Manager()
self._list = manager.list()
All seems to work.
I read https://docs.python.org/3.6/library/multiprocessing.html#sharing-state-between-processes but I can't find an explanation why an instance of a Manager() can't be a variable in the example above. My best guess is because Manager() is itself process and with a call like that target=self.worker I'm trying to break some logic in handling processes.
Question: Am I right? or I miss something?
Full Traceback:
Traceback (most recent call last):
File "G:/files-from-server/apps/test_module/test_export.py", line 27, in <module>
a.producer()
File "G:/files-from-server/apps/test_module/test_export.py", line 15, in producer
process.start()
File "c:\users\maxim\appdata\local\programs\python\python36-32\Lib\multiprocessing\process.py", line 105, in start
self._popen = self._Popen(self)
File "c:\users\maxim\appdata\local\programs\python\python36-32\Lib\multiprocessing\context.py", line 223, in _Popen
return _default_context.get_context().Process._Popen(process_obj)
File "c:\users\maxim\appdata\local\programs\python\python36-32\Lib\multiprocessing\context.py", line 322, in _Popen
return Popen(process_obj)
File "c:\users\maxim\appdata\local\programs\python\python36-32\Lib\multiprocessing\popen_spawn_win32.py", line 65, in __init__
reduction.dump(process_obj, to_child)
File "c:\users\maxim\appdata\local\programs\python\python36-32\Lib\multiprocessing\reduction.py", line 60, in dump
ForkingPickler(file, protocol).dump(obj)
File "c:\users\maxim\appdata\local\programs\python\python36-32\Lib\multiprocessing\connection.py", line 939, in reduce_pipe_connection
dh = reduction.DupHandle(conn.fileno(), access)
File "c:\users\maxim\appdata\local\programs\python\python36-32\Lib\multiprocessing\connection.py", line 170, in fileno
self._check_closed()
File "c:\users\maxim\appdata\local\programs\python\python36-32\Lib\multiprocessing\connection.py", line 136, in _check_closed
raise OSError("handle is closed")
OSError: handle is closed