Code:
from aiohttp import web
from aiortc.mediastreams import MediaStreamTrack
from aiortc import RTCPeerConnection, RTCSessionDescription
from aiortc.contrib.media import MediaPlayer
import asyncio
import json
import os
from multiprocessing import Process, freeze_support
from queue import Queue
import sys
import threading
from time import sleep
import fractions
import time
class RadioServer(Process):
def __init__(self,q):
super().__init__()
self.q = q
self.ROOT = os.path.dirname(__file__)
self.pcs = []
self.channels = []
self.stream_offers = []
self.requests = []
def run(self):
self.app = web.Application()
self.app.on_shutdown.append(self.on_shutdown)
self.app.router.add_get("/", self.index)
self.app.router.add_get("/radio.js", self.javascript)
self.app.router.add_get("/jquery-3.5.1.min.js", self.jquery)
self.app.router.add_post("/offer", self.offer)
threading.Thread(target=self.fill_the_queues).start()
web.run_app(self.app, access_log=None, host="192.168.1.20", port="8080", ssl_context=None)
def fill_the_queues(self):
while(True):
frame = self.q.get()
for stream_offer in self.stream_offers:
stream_offer.q.put(frame)
async def index(self,request):
content = open(os.path.join(self.ROOT, "index.html"), encoding="utf8").read()
return web.Response(content_type="text/html", text=content)
async def javascript(self,request):
content = open(os.path.join(self.ROOT, "radio.js"), encoding="utf8").read()
return web.Response(content_type="application/javascript", text=content)
async def jquery(self,request):
content = open(os.path.join(self.ROOT, "jquery-3.5.1.min.js"), encoding="utf8").read()
return web.Response(content_type="application/javascript", text=content)
async def offer(self,request):
params = await request.json()
offer = RTCSessionDescription(sdp=params["sdp"], type=params["type"])
pc = RTCPeerConnection()
self.pcs.append(pc)
self.requests.append(request)
# prepare epalxeis media
self.stream_offers.append(CustomRadioStream())
pc.addTrack(self.stream_offers[-1])
#pc.on("iceconnectionstatechange")
async def on_iceconnectionstatechange():
if pc.iceConnectionState == "failed":
self.pcs.remove(pc)
self.requests.remove(request)
print(str(request.remote)+" disconnected from radio server")
print("Current peer connections:"+str(len(self.pcs)))
# handle offer
await pc.setRemoteDescription(offer)
# send answer
answer = await pc.createAnswer()
await pc.setLocalDescription(answer)
return web.Response(content_type="application/json",text=json.dumps({"sdp": pc.localDescription.sdp, "type": pc.localDescription.type}))
async def on_shutdown(self,app):
# close peer connections
if self.pcs:
coros = [pc.close() for pc in self.pcs]
await asyncio.gather(*coros)
self.pcs = []
self.channels = []
self.stream_offers = []
"""
some other classes here such as CustomRadioStream and RadioOutputStream
"""
if __name__ == "__main__":
freeze_support()
q = Queue()
custom_server_child_process = RadioServer(q)
custom_server_child_process.start()
Error
Traceback (most recent call last):
File "123.py", line 106, in <module>
custom_server_child_process.start()
File "C:/msys64/mingw64/lib/python3.8/multiprocessing/process.py", line 121, i
n start
self._popen = self._Popen(self)
File "C:/msys64/mingw64/lib/python3.8/multiprocessing/context.py", line 224, i
n _Popen
return _default_context.get_context().Process._Popen(process_obj)
File "C:/msys64/mingw64/lib/python3.8/multiprocessing/context.py", line 327, i
n _Popen
return Popen(process_obj)
File "C:/msys64/mingw64/lib/python3.8/multiprocessing/popen_spawn_win32.py", l
ine 93, in __init__
reduction.dump(process_obj, to_child)
File "C:/msys64/mingw64/lib/python3.8/multiprocessing/reduction.py", line 60,
in dump
ForkingPickler(file, protocol).dump(obj)
TypeError: cannot pickle '_thread.lock' object
What I am doing wrong?
If I call the run function (instead of start) directly, then there is no problem, but i want to use processing for this class.
Edit: Ok with multiprocessing.Queue works fine but now with similar code there is this error:
$ python "Papinhio_player.py"
Traceback (most recent call last):
File "Papinhio_player.py", line 3078, in <module>
program = PapinhioPlayerCode()
File "Papinhio_player.py", line 250, in __init__
self.manage_decks_instance = Manage_Decks(self)
File "C:\python\scripts\Papinhio player\src\main\python_files/manage_decks.py"
, line 356, in __init__
self.custom_server_child_process.start()
File "C:/msys64/mingw64/lib/python3.8/multiprocessing/process.py", line 121, i
n start
self._popen = self._Popen(self)
File "C:/msys64/mingw64/lib/python3.8/multiprocessing/context.py", line 224, i
n _Popen
return _default_context.get_context().Process._Popen(process_obj)
File "C:/msys64/mingw64/lib/python3.8/multiprocessing/context.py", line 327, i
n _Popen
return Popen(process_obj)
File "C:/msys64/mingw64/lib/python3.8/multiprocessing/popen_spawn_win32.py", l
ine 93, in __init__
reduction.dump(process_obj, to_child)
File "C:/msys64/mingw64/lib/python3.8/multiprocessing/reduction.py", line 60,
in dump
ForkingPickler(file, protocol).dump(obj)
File "stringsource", line 2, in av.audio.codeccontext.AudioCodecContext.__redu
ce_cython__
TypeError: self.parser,self.ptr cannot be converted to a Python object for pickl
ing
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "C:/msys64/mingw64/lib/python3.8/multiprocessing/spawn.py", line 116, in
spawn_main
exitcode = _main(fd, parent_sentinel)
File "C:/msys64/mingw64/lib/python3.8/multiprocessing/spawn.py", line 126, in
_main
self = reduction.pickle.load(from_parent)
EOFError: Ran out of input
Some objects cannot be serialized then unserialized.
The stack trace you posted mentions :
TypeError: cannot pickle '_thread.lock' object
a lock, which holds a state in memory and gives guarantees that no other process can own the same lock at the same moment, is typically a very bad candidate for this operation -- what should be created when you deserialize it ?
To fix this : choose a way to select the relevant fields of the object you want to serialize, and pickle/unpickle that part.
Related
I'm trying to compute a feature for every vertex in my graph using gremlinpython. It's too slow to sequentially iterate over every single vertex. While batching could help to provide a speedup, I thought first I'd try parallizing the query.
Broadly, 1. get the full set of vertices, 2. split them over num_cores=x, 3. iterate over each sub-vertex set in parallel.
But I'm getting the error "OSError: [Errno 9] Bad file descriptor". The below code is my latest attempt at solving this.
import multiprocessing
from gremlin_python.structure.graph import Graph
from gremlin_python.driver.driver_remote_connection import DriverRemoteConnection
from gremlin_python.process.traversal import lt
def create_traversal_object():
graph = Graph()
g = graph.traversal().withRemote(DriverRemoteConnection('ws://localhost:8182/gremlin', 'g'))
return g
g = create_traversal_object()
num_cores = 1
vertex_lsts = np.array_split(g.V().limit(30).id().toList(), num_cores)
class FeatureClass():
def __init__(self, g, vertex_list):
self.g = g
self.vertex_list = vertex_list
def orchestrator(self):
for vertex_id in self.vertex_list:
self.compute_number_of_names(float(vertex_id))
def get_names(self, vertex_id):
return self.g.V(vertex_id).inE().values('benef_nm').dedup().toList()
class Simulation(multiprocessing.Process):
def __init__(self, id, worker, *args, **kwargs):
# must call this before anything else
multiprocessing.Process.__init__(self)
self.id = id
self.worker = worker
self.args = args
self.kwargs = kwargs
sys.stdout.write('[%d] created\n' % (self.id))
def run(self):
sys.stdout.write('[%d] running ... process id: %s\n' % (self.id, os.getpid()))
self.worker.orchestrator()
sys.stdout.write('[%d] completed\n' % (self.id))
list_of_objects = [FeatureClass(create_traversal_object(), vertex_lst) for vertex_lst in vertex_lsts]
list_of_sim = [Simulation(id=k, worker=obj) for k, obj in enumerate(list_of_objects)]
for sim in list_of_sim:
sim.start()
Here's the full stack-trace, looks like it's an issue with tornado, which gremlinpython uses.
Process Simulation-1:
Traceback (most recent call last):
File "/Users/greatora/anaconda3/lib/python3.6/multiprocessing/process.py", line 258, in _bootstrap
self.run()
File "<ipython-input-4-b3177477fabe>", line 42, in run
self.worker.orchestrator()
File "<ipython-input-4-b3177477fabe>", line 23, in orchestrator
self.compute_number_of_names(float(vertex_id))
File "<ipython-input-4-b3177477fabe>", line 26, in compute_number_of_names
print(self.g.V(vertex_id).inE().values('benef_nm').dedup().count().next())
File "/Users/greatora/anaconda3/lib/python3.6/site-packages/gremlin_python/process/traversal.py", line 88, in next
return self.__next__()
File "/Users/greatora/anaconda3/lib/python3.6/site-packages/gremlin_python/process/traversal.py", line 47, in __next__
self.traversal_strategies.apply_strategies(self)
File "/Users/greatora/anaconda3/lib/python3.6/site-packages/gremlin_python/process/traversal.py", line 512, in apply_strategies
traversal_strategy.apply(traversal)
File "/Users/greatora/anaconda3/lib/python3.6/site-packages/gremlin_python/driver/remote_connection.py", line 148, in apply
remote_traversal = self.remote_connection.submit(traversal.bytecode)
File "/Users/greatora/anaconda3/lib/python3.6/site-packages/gremlin_python/driver/driver_remote_connection.py", line 53, in submit
result_set = self._client.submit(bytecode)
File "/Users/greatora/anaconda3/lib/python3.6/site-packages/gremlin_python/driver/client.py", line 108, in submit
return self.submitAsync(message, bindings=bindings).result()
File "/Users/greatora/anaconda3/lib/python3.6/concurrent/futures/_base.py", line 432, in result
return self.__get_result()
File "/Users/greatora/anaconda3/lib/python3.6/concurrent/futures/_base.py", line 384, in __get_result
raise self._exception
File "/Users/greatora/anaconda3/lib/python3.6/site-packages/gremlin_python/driver/connection.py", line 63, in cb
f.result()
File "/Users/greatora/anaconda3/lib/python3.6/concurrent/futures/_base.py", line 425, in result
return self.__get_result()
File "/Users/greatora/anaconda3/lib/python3.6/concurrent/futures/_base.py", line 384, in __get_result
raise self._exception
File "/Users/greatora/anaconda3/lib/python3.6/concurrent/futures/thread.py", line 56, in run
result = self.fn(*self.args, **self.kwargs)
File "/Users/greatora/anaconda3/lib/python3.6/site-packages/gremlin_python/driver/protocol.py", line 74, in write
self._transport.write(message)
File "/Users/greatora/anaconda3/lib/python3.6/site-packages/gremlin_python/driver/tornado/transport.py", line 37, in write
lambda: self._ws.write_message(message, binary=True))
File "/Users/greatora/anaconda3/lib/python3.6/site-packages/tornado/ioloop.py", line 453, in run_sync
self.start()
File "/Users/greatora/anaconda3/lib/python3.6/site-packages/tornado/ioloop.py", line 863, in start
event_pairs = self._impl.poll(poll_timeout)
File "/Users/greatora/anaconda3/lib/python3.6/site-packages/tornado/platform/kqueue.py", line 66, in poll
kevents = self._kqueue.control(None, 1000, timeout)
OSError: [Errno 9] Bad file descriptor
I'm using Pythton3.7, gremlinpython==3.4.6, MacOS.
I'm still not entirely sure what the issue was, but this works.
import multiprocessing
from multiprocessing import Pool
import itertools
def graph_function(vertex_id_list):
graph = Graph()
g = graph.traversal().withRemote(DriverRemoteConnection('ws://localhost:8182/gremlin', 'g'))
res = []
for vertex_id in vertex_id_list:
res.append(g.V(str(vertex_id)).inE().values('benef_nm').dedup().toList())
return res
num_cores = 4
vertex_lst = g.V().limit(30).id().toList()
vertex_lsts = np.array_split(vertex_lst, num_cores)
with Pool(processes=num_cores) as pool:
results = pool.map(graph_function, vertex_lsts)
results = [*itertools.chain.from_iterable(results)]
I'd like to generate a dynamically threads or processes in Python to consume each own queue.
My code: main.py
import cv2
import numpy as np
from classes import roi_process
import time
import os
import copy
import queue
import multiprocessing
roi_list = eval("[(0,0,639,720,1),(640,0,1280,720,2)]")
for _ROI in roi_list:
print("################# " + str(_ROI[4]))
vars()["FILA_"+str(_ROI[4])] = queue.Queue(maxsize=4)
vars()["T_"+str(_ROI[4])] = multiprocessing.Process(target = roi_process.RoiProcess, args = ( eval("FILA_"+str(_ROI[4])) , str(_ROI[4])), daemon=True)
for _ROI in roi_list:
eval("T_"+str(_ROI[4])).start()
classes/roi_process.py
import cv2
import queue
import numpy as np
import imutils
import time
import os
class RoiProcess:
def __init__(self, queue_pool = None, id_roi = 0):
self.id_roi = id_roi
self.queue_pool = queue_pool
print("Iniciou em thread o id: " + self.id_roi)
self.run()
def run(self):
i = 0
while True:
print(str(self.id_roi) + ": " + str(i))
i = i + 1
time.sleep(1)
This is generating the following error:
(tensorflow) C:\projects\car detector\semparar\AI_CARANDPLATE>python main.py
################# 1
################# 2
Traceback (most recent call last):
File "main.py", line 64, in <module>
eval("T_"+str(_ROI[4])).start()
File "C:\Users\MasterRoot\Anaconda3\envs\tensorflow\lib\multiprocessing\process.py", line 121, in start
self._popen = self._Popen(self)
File "C:\Users\MasterRoot\Anaconda3\envs\tensorflow\lib\multiprocessing\context.py", line 224, in _Popen
return _default_context.get_context().Process._Popen(process_obj)
File "C:\Users\MasterRoot\Anaconda3\envs\tensorflow\lib\multiprocessing\context.py", line 326, in _Popen
return Popen(process_obj)
File "C:\Users\MasterRoot\Anaconda3\envs\tensorflow\lib\multiprocessing\popen_spawn_win32.py", line 93, in __init__
reduction.dump(process_obj, to_child)
File "C:\Users\MasterRoot\Anaconda3\envs\tensorflow\lib\multiprocessing\reduction.py", line 60, in dump
ForkingPickler(file, protocol).dump(obj)
TypeError: cannot pickle '_thread.lock' object
(tensorflow) C:\projects\car detector\semparar\AI_CARANDPLATE>Traceback (most recent call last):
File "<string>", line 1, in <module>
File "C:\Users\MasterRoot\Anaconda3\envs\tensorflow\lib\multiprocessing\spawn.py", line 107, in spawn_main
new_handle = reduction.duplicate(pipe_handle,
File "C:\Users\MasterRoot\Anaconda3\envs\tensorflow\lib\multiprocessing\reduction.py", line 79, in duplicate
return _winapi.DuplicateHandle(
PermissionError: [WinError 5] Acesso negado
I really need to start dynamic threads or processes to consume every each poll that will be fed by a while True: in main.py
I will make a opencv frame reader and slice the main frame into many pieces.
After that I will feed a dynamic queue object with this information and each thread will process a predictor as I need.
I changed my code to:
FILA={}
T={}
#cria fila para cada ROI e instancia uma thread de obj para ler esta fila continuamente.
for _ROI in roi_list:
print("################# " + str(_ROI[4]))
FILA[_ROI[4]] = queue.Queue(maxsize=4)
T[_ROI[4]] = multiprocessing.Process(target = roi_process.RoiProcess, args = ( FILA[_ROI[4]] , str(_ROI[4])), daemon=True).start()
but its keepeing going to do the same error for threads.
Don't use standard Queue with multiprocessing, use:
from multiprocessing import Queue
I want to start a new process after main tkinter window is loaded.
I do it like This:
if __name__ == "__main__":
app = MainFrame()
print(("App loaded"))
canvasWindow = app.getCurrentTopFrame().start()
app.mainloop()
print("Window change")
after tkinter init i call function to start new thread
def start(self):
print("Before logic thread init")
lock = mp.Lock()
pro = mp.Process(target = self.manager.startTraining, args = (deley,lock))
pro.start()
This is startTrening fun
def startTraining(self,lock):
"""Starts training sequence"""
print("start tra")
for x in range(0,self.numOfGenerations):
print("x")
self.population.nextGeneration()
print("Iteration num ",x,"Fitness of best one is
",self.population.bestSalesman)
lock.acquire()
...........self.canvas.updateFrame(self.population.bestSalesman.dna.getAsListOfTuple())
lock.release()
This is updateFrame fun
def updateFrame(self,listOfPoints):
self.canvas.delete("all")
for y in listOfPoints:
self.canvas.create_oval(y[0], y[1], y[0]+5, y[1]+5, fill="Black")
li = cycle(listOfPoints)
p2 = next(li)
for x in listOfPoints:
p1,p2 = p2, next(li)
self.canvas.create_line(p1[0],p1[1],p2[0]+2,p2[1]+2)
if p2 == listOfPoints[-1]:
self.canvas.create_line(p2[0],p2[1],listOfPoints[0]+2,listOfPoints[0][1]+2)
self.canvas.pack()
I dont get why but behavior is such that main window does load and the
error occure
After init
After tkrasie
after start
App loaded
Before logic thread init
Traceback (most recent call last):
File "C:\Users\CrazyUrusai\git\WarsawSchoolOfAI\GeneticsAlgorithms\MainFrame.py", line 129, in <module>
canvasWindow = app.getCurrentTopFrame().start()
File "C:\Users\CrazyUrusai\git\WarsawSchoolOfAI\GeneticsAlgorithms\MainFrame.py", line 111, in start
pro.start()
File "C:\Users\CrazyUrusai\AppData\Local\Programs\Python\Python36\lib\multiprocessing\process.py", line 105, in start
self._popen = self._Popen(self)
File "C:\Users\CrazyUrusai\AppData\Local\Programs\Python\Python36\lib\multiprocessing\context.py", line 223, in _Popen
return _default_context.get_context().Process._Popen(process_obj)
File "C:\Users\CrazyUrusai\AppData\Local\Programs\Python\Python36\lib\multiprocessing\context.py", line 322, in _Popen
return Popen(process_obj)
File "C:\Users\CrazyUrusai\AppData\Local\Programs\Python\Python36\lib\multiprocessing\popen_spawn_win32.py", line 65, in __init__
reduction.dump(process_obj, to_child)
File "C:\Users\CrazyUrusai\AppData\Local\Programs\Python\Python36\lib\multiprocessing\reduction.py", line 60, in dump
ForkingPickler(file, protocol).dump(obj)
TypeError: can't pickle _tkinter.tkapp objects
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "C:\Users\CrazyUrusai\AppData\Local\Programs\Python\Python36\lib\multiprocessing\spawn.py", line 105, in spawn_main
exitcode = _main(fd)
File "C:\Users\CrazyUrusai\AppData\Local\Programs\Python\Python36\lib\multiprocessing\spawn.py", line 115, in _main
self = reduction.pickle.load(from_parent)
EOFError: Ran out of input
I'm wrting a program that spawns a process and restarts the process on certain conditions. For example, if a child process doesn't send data anymore to the mother process, for a certain period of time, I want the mother process to terminate the child process and restart it. I thought I could use a thread to recieve data from a child process and restart the child process, but it doesn't work the way I thought.
import numpy as np
import multiprocessing as mp
import threading
import time
from apscheduler.schedulers.background import BackgroundScheduler
pipe_in, pipe_out = mp.Pipe()
class Mother():
def __init__(self):
self.pipe_out = pipe_out
self.proc = mp.Process(target = self.test_func, args=(pipe_in, ))
self.proc.start()
self.thread = threading.Thread(target=self.thread_reciever, args=(self.pipe_out, ))
self.thread.start()
def thread_reciever(self, pipe_out):
while True:
value = pipe_out.recv()
print(value)
if value == 5:
self.proc.terminate()
time.sleep(2)
self.proc = mp.Process(target = self.test_func)
self.proc.start()
def test_func(self, pipe_in):
for i in range(10):
pipe_in.send(i)
time.sleep(1)
if __name__ == '__main__':
r = Mother()
It prints out this error.
D:\>d:\python36-32\python.exe temp06.py
0
1
2
3
4
5
Exception in thread Thread-1:
Traceback (most recent call last):
File "d:\python36-32\lib\threading.py", line 916, in _bootstrap_inner
self.run()
File "d:\python36-32\lib\threading.py", line 864, in run
self._target(*self._args, **self._kwargs)
File "temp06.py", line 28, in thread_reciever
self.proc.start()
File "d:\python36-32\lib\multiprocessing\process.py", line 105, in start
self._popen = self._Popen(self)
File "d:\python36-32\lib\multiprocessing\context.py", line 223, in _Popen
return _default_context.get_context().Process._Popen(process_obj)
File "d:\python36-32\lib\multiprocessing\context.py", line 322, in _Popen
return Popen(process_obj)
File "d:\python36-32\lib\multiprocessing\popen_spawn_win32.py", line 65, in __init__
reduction.dump(process_obj, to_child)
File "d:\python36-32\lib\multiprocessing\reduction.py", line 60, in dump
ForkingPickler(file, protocol).dump(obj)
TypeError: can't pickle _thread.lock objects
D:\>Traceback (most recent call last):
File "<string>", line 1, in <module>
File "d:\python36-32\lib\multiprocessing\spawn.py", line 99, in spawn_main
new_handle = reduction.steal_handle(parent_pid, pipe_handle)
File "d:\python36-32\lib\multiprocessing\reduction.py", line 82, in steal_handle
_winapi.PROCESS_DUP_HANDLE, False, source_pid)
OSError: [WinError 87]
How could I start and terminate a process inside a thread? (I'm using a thread because it can synchronously recieve data from a different process) Or are there any other ways to do this job?
test_func as a global function
import numpy as np
import multiprocessing as mp
import threading
import time
from apscheduler.schedulers.background import BackgroundScheduler
pipe_in, pipe_out = mp.Pipe()
def test_func( pipe_in):
for i in range(10):
pipe_in.send(i)
time.sleep(1)
class Mother():
def __init__(self):
self.pipe_out = pipe_out
mp.freeze_support()
self.proc = mp.Process(target = test_func, args=(pipe_in, ))
self.proc.start()
self.thread = threading.Thread(target=self.thread_reciever, args=(self.pipe_out, ))
self.thread.start()
def thread_reciever(self, pipe_out):
while True:
value = pipe_out.recv()
print(value)
if value == 5:
self.proc.terminate()
time.sleep(2)
mp.freeze_support()
self.proc = mp.Process(target = test_func, args=(pipe_in,))
self.proc.start()
if __name__ == '__main__':
r = Mother()
OUTPUT
D:\> d:\python36-32\python.exe temp06.py
0
1
2
3
4
5
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "d:\python36-32\lib\multiprocessing\spawn.py", line 105, in spawn_main
exitcode = _main(fd)
File "d:\python36-32\lib\multiprocessing\spawn.py", line 115, in _main
self = reduction.pickle.load(from_parent)
AttributeError: Can't get attribute 'test_func' on <module '__main__' (built-in)>
under windows, as there is no fork syscall, python starts a new interpreter instance, use pickle/unpickle to reconstruct execution context, but thread.Lock is not picklable. while pickling self.test_func, self.thread reference to a thread.Lock object, makes it unpicklable.
you could simply change test_func to a plain global function, without thread object reference :
self.proc = mp.Process(target = test_func, args=(pipe_in,))
...
def test_func(pipe_in):
for i in range(10):
pipe_in.send(i)
time.sleep(1)
I cant find an explanation for this behavior in Python 3:
from multiprocessing import Process, cpu_count, freeze_support, Manager
class A:
def __init__(self):
# self._manager = Manager()
# self._list = self._manager.list()
manager = Manager()
self._list = manager.list()
def producer(self):
processes = []
cores = cpu_count()
for i in range(cores):
process = Process(target=self.worker)
process.start()
processes.append(process)
for process in processes:
process.join()
def worker(self):
print('I was called')
if __name__ == '__main__':
freeze_support()
a = A()
a.producer()
With this in __init__ :
self._manager = Manager()
self._list = self._manager.list()
I get an error OSError: handle is closed at the call process.start().
With this in __init__:
manager = Manager()
self._list = manager.list()
All seems to work.
I read https://docs.python.org/3.6/library/multiprocessing.html#sharing-state-between-processes but I can't find an explanation why an instance of a Manager() can't be a variable in the example above. My best guess is because Manager() is itself process and with a call like that target=self.worker I'm trying to break some logic in handling processes.
Question: Am I right? or I miss something?
Full Traceback:
Traceback (most recent call last):
File "G:/files-from-server/apps/test_module/test_export.py", line 27, in <module>
a.producer()
File "G:/files-from-server/apps/test_module/test_export.py", line 15, in producer
process.start()
File "c:\users\maxim\appdata\local\programs\python\python36-32\Lib\multiprocessing\process.py", line 105, in start
self._popen = self._Popen(self)
File "c:\users\maxim\appdata\local\programs\python\python36-32\Lib\multiprocessing\context.py", line 223, in _Popen
return _default_context.get_context().Process._Popen(process_obj)
File "c:\users\maxim\appdata\local\programs\python\python36-32\Lib\multiprocessing\context.py", line 322, in _Popen
return Popen(process_obj)
File "c:\users\maxim\appdata\local\programs\python\python36-32\Lib\multiprocessing\popen_spawn_win32.py", line 65, in __init__
reduction.dump(process_obj, to_child)
File "c:\users\maxim\appdata\local\programs\python\python36-32\Lib\multiprocessing\reduction.py", line 60, in dump
ForkingPickler(file, protocol).dump(obj)
File "c:\users\maxim\appdata\local\programs\python\python36-32\Lib\multiprocessing\connection.py", line 939, in reduce_pipe_connection
dh = reduction.DupHandle(conn.fileno(), access)
File "c:\users\maxim\appdata\local\programs\python\python36-32\Lib\multiprocessing\connection.py", line 170, in fileno
self._check_closed()
File "c:\users\maxim\appdata\local\programs\python\python36-32\Lib\multiprocessing\connection.py", line 136, in _check_closed
raise OSError("handle is closed")
OSError: handle is closed