How do I put an existing object into shared memory? - python

I've managed to create an object that can exist in shared memory with BaseManager and NamespaceProxy, however all the examples I've seen require me to create the object with the proxy. For example:
class Foo:
def __init__(self):
self._a = 1000
def get_a(self):
return self._a
class SharedFoo(NamespaceProxy):
_exposed_ = ('__getattribute__', '__getattr__', '__setattr__', '__init__', 'get_a')
def get_a(self):
callmethod = object.__getattribute__(self, '_callmethod')
return callmethod('get_a', ())
class FooManager(BaseManager):
pass
def test():
FooManager.register('Foo', Foo, SharedFoo)
with FooManager() as manager:
ls = []
t = time.time()
for i in range(100):
ls.append(manager.Foo())
print(time.time() - t)
which prints out:
0.44 (and some other numbers that i ommitted)
Since I would likely create millions of Foo objects, this is too slow for the task. I tried to make it faster like this:
def do_stuff(obj):
obj.ls[4].set_a(300)
def test():
FooManager.register('Foo', Foo, SharedFoo)
with FooManager() as manager:
if manager._Server != None:
manager._Server.mutex = NoLock()
ls = []
t = time.time()
for i in range(100000):
ls.append(Foo())
foos = manager.Foo()
foos.ls = mp.Manager().list(ls)
print(time.time() - t)
processes = [Process(target=do_stuff, args = (foos,)) for _ in range(3)]
for process in processes:
process.start()
for process in processes:
process.join()
print(foos.ls[4].get_a())
which gave me this error:
Traceback (most recent call last):
File "/path/to/lib/python3.7/multiprocessing/managers.py", line 788, in _callmethod
conn = self._tls.connection
AttributeError: 'ForkAwareLocal' object has no attribute 'connection'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/path/to/lib/python3.7/multiprocessing/process.py", line 297, in _bootstrap
self.run()
File "/path/to/lib/python3.7/multiprocessing/process.py", line 99, in run
self._target(*self._args, **self._kwargs)
File "/path/to/myproject/test.py", line 121, in do_stuff
obj.ls[4].set_a(300)
File "/path/to/lib/python3.7/multiprocessing/managers.py", line 1099, in __getattr__
return callmethod('__getattribute__', (key,))
File "/path/to/lib/python3.7/multiprocessing/managers.py", line 792, in _callmethod
self._connect()
File "/path/to/lib/python3.7/multiprocessing/managers.py", line 779, in _connect
conn = self._Client(self._token.address, authkey=self._authkey)
File "/path/to/lib/python3.7/multiprocessing/connection.py", line 492, in Client
c = SocketClient(address)
File "/path/to/lib/python3.7/multiprocessing/connection.py", line 619, in SocketClient
s.connect(address)
FileNotFoundError: [Errno 2] No such file or directory
Is what I'm trying to do possible? If so what should I use (not looking for a complete solution, just some ressources on how to do it)? I'm using Python 3.7 on Linux if that's relevant
Thanks
Edit: is this feasible with mmap, or am I going in a completely wrong direction? It looks promising but the documentations seems to say that it's more for files (again not looking for a complete solution, just if mmap would work with custom objects)

Related

Multiprocessing BrokenPipeError

I have a following problem. I am running a parallel task. I am getting this error:
Traceback (most recent call last):
File "/usr/lib/python3.8/multiprocessing/process.py", line 315, in _bootstrap
self.run()
File "/usr/lib/python3.8/multiprocessing/process.py", line 108, in run
self._target(*self._args, **self._kwargs)
File "eclat_model.py", line 127, in do_work
function(*args, work_queue, valid_list)
File "eclat_model.py", line 115, in eclat_parallel_helper
valid_list.extend(next_vectors)
File "<string>", line 2, in extend
File "/usr/lib/python3.8/multiprocessing/managers.py", line 834, in _callmethod
conn.send((self._id, methodname, args, kwds))
File "/usr/lib/python3.8/multiprocessing/connection.py", line 206, in send
self._send_bytes(_ForkingPickler.dumps(obj))
File "/usr/lib/python3.8/multiprocessing/connection.py", line 404, in _send_bytes
self._send(header)
File "/usr/lib/python3.8/multiprocessing/connection.py", line 368, in _send
n = write(self._handle, buf)
BrokenPipeError: [Errno 32] Broken pipe
Relevant functions in eclat_model.py look like this:
def eclat_parallel_helper(index, bit_vectors, min_support, work_queue, valid_list):
next_vectors = []
for j in range(index + 1, len(bit_vectors)):
item_vector = bit_vectors[index][0] | bit_vectors[j][0]
transaction_vector = bit_vectors[index][1] & bit_vectors[j][1]
support = get_vector_support(transaction_vector)
if support >= min_support:
next_vectors.append((item_vector, transaction_vector, support))
if len(next_vectors) > 0:
valid_list.extend(next_vectors)
for i in range(len(next_vectors)):
work_queue.put((eclat_parallel_helper, (i, next_vectors, min_support)))
def do_work(work_queue, valid_list, not_done):
# work queue entries have the form (function, args)
while not_done.value:
try:
function, args = work_queue.get_nowait()
except QueueEmptyError:
continue
function(*args, work_queue, valid_list)
work_queue.task_done()
work_queue.close()
EDIT:
Multiprocessing part of the code is as follows: bit_vectors is a list of lists, where each entry is of the form
[items, transactions, support], where items is a bit vector encoding which items appear in the itemset, vector is a bit vector encoding which transactions the itemset appears in, and support is the number of transactions in which the itemset occurs.
from multiprocessing import Process, JoinableQueue, Manager, Value, cpu_count
def eclat_parallel(bit_vectors, min_support):
not_done = Value('i', 1)
manager = Manager()
valid_list = manager.list()
work_queue = JoinableQueue()
for i in range(len(bit_vectors)):
work_queue.put((eclat_parallel_helper, (i, bit_vectors, min_support)))
processes = []
for i in range(cpu_count()):
p = Process(target=do_work, args=(work_queue, valid_list, not_done), daemon=True)
p.start()
processes.append(p)
work_queue.join()
not_done.value = 0
work_queue.close()
valid_itemset_vectors = bit_vectors
for element in valid_list:
valid_itemset_vectors.append(element)
for p in processes:
p.join()
return valid_itemset_vectors
What does this error mean, please? Am I appending too many elements into next_vectors list?
I had the same issue, in my case just added a delay (time.sleep(0.01)) to solve it.
The problem is that the individual processes are too fast on queue that causes the error.

Python Process cannot pickle

Code:
from aiohttp import web
from aiortc.mediastreams import MediaStreamTrack
from aiortc import RTCPeerConnection, RTCSessionDescription
from aiortc.contrib.media import MediaPlayer
import asyncio
import json
import os
from multiprocessing import Process, freeze_support
from queue import Queue
import sys
import threading
from time import sleep
import fractions
import time
class RadioServer(Process):
def __init__(self,q):
super().__init__()
self.q = q
self.ROOT = os.path.dirname(__file__)
self.pcs = []
self.channels = []
self.stream_offers = []
self.requests = []
def run(self):
self.app = web.Application()
self.app.on_shutdown.append(self.on_shutdown)
self.app.router.add_get("/", self.index)
self.app.router.add_get("/radio.js", self.javascript)
self.app.router.add_get("/jquery-3.5.1.min.js", self.jquery)
self.app.router.add_post("/offer", self.offer)
threading.Thread(target=self.fill_the_queues).start()
web.run_app(self.app, access_log=None, host="192.168.1.20", port="8080", ssl_context=None)
def fill_the_queues(self):
while(True):
frame = self.q.get()
for stream_offer in self.stream_offers:
stream_offer.q.put(frame)
async def index(self,request):
content = open(os.path.join(self.ROOT, "index.html"), encoding="utf8").read()
return web.Response(content_type="text/html", text=content)
async def javascript(self,request):
content = open(os.path.join(self.ROOT, "radio.js"), encoding="utf8").read()
return web.Response(content_type="application/javascript", text=content)
async def jquery(self,request):
content = open(os.path.join(self.ROOT, "jquery-3.5.1.min.js"), encoding="utf8").read()
return web.Response(content_type="application/javascript", text=content)
async def offer(self,request):
params = await request.json()
offer = RTCSessionDescription(sdp=params["sdp"], type=params["type"])
pc = RTCPeerConnection()
self.pcs.append(pc)
self.requests.append(request)
# prepare epalxeis media
self.stream_offers.append(CustomRadioStream())
pc.addTrack(self.stream_offers[-1])
#pc.on("iceconnectionstatechange")
async def on_iceconnectionstatechange():
if pc.iceConnectionState == "failed":
self.pcs.remove(pc)
self.requests.remove(request)
print(str(request.remote)+" disconnected from radio server")
print("Current peer connections:"+str(len(self.pcs)))
# handle offer
await pc.setRemoteDescription(offer)
# send answer
answer = await pc.createAnswer()
await pc.setLocalDescription(answer)
return web.Response(content_type="application/json",text=json.dumps({"sdp": pc.localDescription.sdp, "type": pc.localDescription.type}))
async def on_shutdown(self,app):
# close peer connections
if self.pcs:
coros = [pc.close() for pc in self.pcs]
await asyncio.gather(*coros)
self.pcs = []
self.channels = []
self.stream_offers = []
"""
some other classes here such as CustomRadioStream and RadioOutputStream
"""
if __name__ == "__main__":
freeze_support()
q = Queue()
custom_server_child_process = RadioServer(q)
custom_server_child_process.start()
Error
Traceback (most recent call last):
File "123.py", line 106, in <module>
custom_server_child_process.start()
File "C:/msys64/mingw64/lib/python3.8/multiprocessing/process.py", line 121, i
n start
self._popen = self._Popen(self)
File "C:/msys64/mingw64/lib/python3.8/multiprocessing/context.py", line 224, i
n _Popen
return _default_context.get_context().Process._Popen(process_obj)
File "C:/msys64/mingw64/lib/python3.8/multiprocessing/context.py", line 327, i
n _Popen
return Popen(process_obj)
File "C:/msys64/mingw64/lib/python3.8/multiprocessing/popen_spawn_win32.py", l
ine 93, in __init__
reduction.dump(process_obj, to_child)
File "C:/msys64/mingw64/lib/python3.8/multiprocessing/reduction.py", line 60,
in dump
ForkingPickler(file, protocol).dump(obj)
TypeError: cannot pickle '_thread.lock' object
What I am doing wrong?
If I call the run function (instead of start) directly, then there is no problem, but i want to use processing for this class.
Edit: Ok with multiprocessing.Queue works fine but now with similar code there is this error:
$ python "Papinhio_player.py"
Traceback (most recent call last):
File "Papinhio_player.py", line 3078, in <module>
program = PapinhioPlayerCode()
File "Papinhio_player.py", line 250, in __init__
self.manage_decks_instance = Manage_Decks(self)
File "C:\python\scripts\Papinhio player\src\main\python_files/manage_decks.py"
, line 356, in __init__
self.custom_server_child_process.start()
File "C:/msys64/mingw64/lib/python3.8/multiprocessing/process.py", line 121, i
n start
self._popen = self._Popen(self)
File "C:/msys64/mingw64/lib/python3.8/multiprocessing/context.py", line 224, i
n _Popen
return _default_context.get_context().Process._Popen(process_obj)
File "C:/msys64/mingw64/lib/python3.8/multiprocessing/context.py", line 327, i
n _Popen
return Popen(process_obj)
File "C:/msys64/mingw64/lib/python3.8/multiprocessing/popen_spawn_win32.py", l
ine 93, in __init__
reduction.dump(process_obj, to_child)
File "C:/msys64/mingw64/lib/python3.8/multiprocessing/reduction.py", line 60,
in dump
ForkingPickler(file, protocol).dump(obj)
File "stringsource", line 2, in av.audio.codeccontext.AudioCodecContext.__redu
ce_cython__
TypeError: self.parser,self.ptr cannot be converted to a Python object for pickl
ing
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "C:/msys64/mingw64/lib/python3.8/multiprocessing/spawn.py", line 116, in
spawn_main
exitcode = _main(fd, parent_sentinel)
File "C:/msys64/mingw64/lib/python3.8/multiprocessing/spawn.py", line 126, in
_main
self = reduction.pickle.load(from_parent)
EOFError: Ran out of input
Some objects cannot be serialized then unserialized.
The stack trace you posted mentions :
TypeError: cannot pickle '_thread.lock' object
a lock, which holds a state in memory and gives guarantees that no other process can own the same lock at the same moment, is typically a very bad candidate for this operation -- what should be created when you deserialize it ?
To fix this : choose a way to select the relevant fields of the object you want to serialize, and pickle/unpickle that part.

Can't use namedtuple with concurrent.futures? [duplicate]

>>> import concurrent.futures
>>> from collections import namedtuple
>>> #1. Initialise namedtuple here
>>> # tm = namedtuple("tm", ["pk"])
>>> class T:
... #2. Initialise named tuple here
... #tm = namedtuple("tm", ["pk"])
... def __init__(self):
... #3: Initialise named tuple here
... tm = namedtuple("tm", ["pk"])
... self.x = {'key': [tm('value')]}
... def test1(self):
... with concurrent.futures.ProcessPoolExecutor(max_workers=1) as executor:
... results = executor.map(self.test, ["key"])
... return results
... def test(self, s):
... print(self.x[s])
...
>>> t = T().test1()
This gets stuck here.
^CTraceback (most recent call last):
File "<stdin>", line 1, in <module>
Process ForkProcess-1:
File "<stdin>", line 10, in test1
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/concurrent/futures/_base.py", line 623, in __exit__
self.shutdown(wait=True)
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/concurrent/futures/process.py", line 681, in shutdown
self._queue_management_thread.join()
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/threading.py", line 1044, in join
Traceback (most recent call last):
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/multiprocessing/process.py", line 297, in _bootstrap
self.run()
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/multiprocessing/process.py", line 99, in run
self._target(*self._args, **self._kwargs)
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/concurrent/futures/process.py", line 233, in _process_worker
call_item = call_queue.get(block=True)
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/multiprocessing/queues.py", line 94, in get
res = self._recv_bytes()
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/multiprocessing/connection.py", line 216, in recv_bytes
buf = self._recv_bytes(maxlength)
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/multiprocessing/connection.py", line 407, in _recv_bytes
buf = self._recv(4)
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/multiprocessing/connection.py", line 379, in _recv
chunk = read(handle, remaining)
KeyboardInterrupt
self._wait_for_tstate_lock()
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/threading.py", line 1060, in _wait_for_tstate_lock
elif lock.acquire(block, timeout):
KeyboardInterrupt
If I initialise the named tuple outside of the class (in #1), in that case, this works fine. Could someone please let me know what is the issue if I initialise as per #2 or #3 ?
You're not changing where you initialize the namedtuple. You're changing where you create the namedtuple class.
When you create a namedtuple class named "x" in module "y" with collections.namedtuple, its __module__ is set to 'y' and its __qualname__ is set to 'x'. Pickling and unpickling relies on this class actually being available in the y.x location indicated by these attributes, but in cases 2 and 3 of your example, it's not.
Python can't pickle the namedtuple, which breaks inter-process communication with the workers. Executing self.test in a worker process relies on pickling self.test and unpickling a copy of it in the worker process, and that can't happen if self.x is an instance of a class that can't be pickled.

Multiprocessing Gremlin "OSError: [Errno 9] Bad file descriptor"

I'm trying to compute a feature for every vertex in my graph using gremlinpython. It's too slow to sequentially iterate over every single vertex. While batching could help to provide a speedup, I thought first I'd try parallizing the query.
Broadly, 1. get the full set of vertices, 2. split them over num_cores=x, 3. iterate over each sub-vertex set in parallel.
But I'm getting the error "OSError: [Errno 9] Bad file descriptor". The below code is my latest attempt at solving this.
import multiprocessing
from gremlin_python.structure.graph import Graph
from gremlin_python.driver.driver_remote_connection import DriverRemoteConnection
from gremlin_python.process.traversal import lt
def create_traversal_object():
graph = Graph()
g = graph.traversal().withRemote(DriverRemoteConnection('ws://localhost:8182/gremlin', 'g'))
return g
g = create_traversal_object()
num_cores = 1
vertex_lsts = np.array_split(g.V().limit(30).id().toList(), num_cores)
class FeatureClass():
def __init__(self, g, vertex_list):
self.g = g
self.vertex_list = vertex_list
def orchestrator(self):
for vertex_id in self.vertex_list:
self.compute_number_of_names(float(vertex_id))
def get_names(self, vertex_id):
return self.g.V(vertex_id).inE().values('benef_nm').dedup().toList()
class Simulation(multiprocessing.Process):
def __init__(self, id, worker, *args, **kwargs):
# must call this before anything else
multiprocessing.Process.__init__(self)
self.id = id
self.worker = worker
self.args = args
self.kwargs = kwargs
sys.stdout.write('[%d] created\n' % (self.id))
def run(self):
sys.stdout.write('[%d] running ... process id: %s\n' % (self.id, os.getpid()))
self.worker.orchestrator()
sys.stdout.write('[%d] completed\n' % (self.id))
list_of_objects = [FeatureClass(create_traversal_object(), vertex_lst) for vertex_lst in vertex_lsts]
list_of_sim = [Simulation(id=k, worker=obj) for k, obj in enumerate(list_of_objects)]
for sim in list_of_sim:
sim.start()
Here's the full stack-trace, looks like it's an issue with tornado, which gremlinpython uses.
Process Simulation-1:
Traceback (most recent call last):
File "/Users/greatora/anaconda3/lib/python3.6/multiprocessing/process.py", line 258, in _bootstrap
self.run()
File "<ipython-input-4-b3177477fabe>", line 42, in run
self.worker.orchestrator()
File "<ipython-input-4-b3177477fabe>", line 23, in orchestrator
self.compute_number_of_names(float(vertex_id))
File "<ipython-input-4-b3177477fabe>", line 26, in compute_number_of_names
print(self.g.V(vertex_id).inE().values('benef_nm').dedup().count().next())
File "/Users/greatora/anaconda3/lib/python3.6/site-packages/gremlin_python/process/traversal.py", line 88, in next
return self.__next__()
File "/Users/greatora/anaconda3/lib/python3.6/site-packages/gremlin_python/process/traversal.py", line 47, in __next__
self.traversal_strategies.apply_strategies(self)
File "/Users/greatora/anaconda3/lib/python3.6/site-packages/gremlin_python/process/traversal.py", line 512, in apply_strategies
traversal_strategy.apply(traversal)
File "/Users/greatora/anaconda3/lib/python3.6/site-packages/gremlin_python/driver/remote_connection.py", line 148, in apply
remote_traversal = self.remote_connection.submit(traversal.bytecode)
File "/Users/greatora/anaconda3/lib/python3.6/site-packages/gremlin_python/driver/driver_remote_connection.py", line 53, in submit
result_set = self._client.submit(bytecode)
File "/Users/greatora/anaconda3/lib/python3.6/site-packages/gremlin_python/driver/client.py", line 108, in submit
return self.submitAsync(message, bindings=bindings).result()
File "/Users/greatora/anaconda3/lib/python3.6/concurrent/futures/_base.py", line 432, in result
return self.__get_result()
File "/Users/greatora/anaconda3/lib/python3.6/concurrent/futures/_base.py", line 384, in __get_result
raise self._exception
File "/Users/greatora/anaconda3/lib/python3.6/site-packages/gremlin_python/driver/connection.py", line 63, in cb
f.result()
File "/Users/greatora/anaconda3/lib/python3.6/concurrent/futures/_base.py", line 425, in result
return self.__get_result()
File "/Users/greatora/anaconda3/lib/python3.6/concurrent/futures/_base.py", line 384, in __get_result
raise self._exception
File "/Users/greatora/anaconda3/lib/python3.6/concurrent/futures/thread.py", line 56, in run
result = self.fn(*self.args, **self.kwargs)
File "/Users/greatora/anaconda3/lib/python3.6/site-packages/gremlin_python/driver/protocol.py", line 74, in write
self._transport.write(message)
File "/Users/greatora/anaconda3/lib/python3.6/site-packages/gremlin_python/driver/tornado/transport.py", line 37, in write
lambda: self._ws.write_message(message, binary=True))
File "/Users/greatora/anaconda3/lib/python3.6/site-packages/tornado/ioloop.py", line 453, in run_sync
self.start()
File "/Users/greatora/anaconda3/lib/python3.6/site-packages/tornado/ioloop.py", line 863, in start
event_pairs = self._impl.poll(poll_timeout)
File "/Users/greatora/anaconda3/lib/python3.6/site-packages/tornado/platform/kqueue.py", line 66, in poll
kevents = self._kqueue.control(None, 1000, timeout)
OSError: [Errno 9] Bad file descriptor
I'm using Pythton3.7, gremlinpython==3.4.6, MacOS.
I'm still not entirely sure what the issue was, but this works.
import multiprocessing
from multiprocessing import Pool
import itertools
def graph_function(vertex_id_list):
graph = Graph()
g = graph.traversal().withRemote(DriverRemoteConnection('ws://localhost:8182/gremlin', 'g'))
res = []
for vertex_id in vertex_id_list:
res.append(g.V(str(vertex_id)).inE().values('benef_nm').dedup().toList())
return res
num_cores = 4
vertex_lst = g.V().limit(30).id().toList()
vertex_lsts = np.array_split(vertex_lst, num_cores)
with Pool(processes=num_cores) as pool:
results = pool.map(graph_function, vertex_lsts)
results = [*itertools.chain.from_iterable(results)]

Python Multiprocessing Process exits before operation

I have a python object - list of dictionaries which I want to fill with key-value pairs in each of those dicts but simultaneously using multiple processors and using the multiprocessing module in python. For that purpose I am using the Manager module for storing that python object. Here is the following code:
from pylab import *
from numpy.random import *
import multiprocessing
import threading
import random
def tasks_start(id, global_lists):
counter_lock = threading.Lock()
with counter_lock:
num = int(10*random.random())
global_lists[num] = {'1':'Random'}
print("Id: ", id)
print(global_lists[0])
if __name__ == '__main__':
numProcessors = 6
pool = multiprocessing.Pool(numProcessors)
global_list = multiprocessing.Manager().list(range(100))
for idx in range(100):
global_list[idx] = multiprocessing.Manager().dict()
tasks = []
for id in range(10):
tasks.append((id, global_list))
pool.starmap(tasks_start, tasks)
pool.close()
pool.join()
So what I am doing here is creating a list of dictionaries stored as global_list and then calling the tasks_start() method 10 times using the python's starmap() module (just so that I can later extend to multiple arguments) to fill the list of dictionaries. As a simple test case, I just use the random generator to randomly pick up one dictionary among the lists everytime and fill it with some value. When I run the program, the following error occurs:
multiprocessing.pool.RemoteTraceback:
"""
Traceback (most recent call last):
File "/usr/lib/python3.4/multiprocessing/pool.py", line 119, in worker
result = (True, func(*args, **kwds))
File "/usr/lib/python3.4/multiprocessing/pool.py", line 47, in starmapstar
return list(itertools.starmap(args[0], args[1]))
File "/home/cysis/inhibition_soum/motif_temporal_patterns/code_versions/2016/09/09_08/parallel_test/test_error_manager.py", line 14, in tasks_start
print(global_lists[0])
File "<string>", line 2, in __getitem__
File "/usr/lib/python3.4/multiprocessing/managers.py", line 732, in _callmethod
kind, result = conn.recv()
File "/usr/lib/python3.4/multiprocessing/connection.py", line 251, in recv
return ForkingPickler.loads(buf.getbuffer())
File "/usr/lib/python3.4/multiprocessing/managers.py", line 852, in RebuildProxy
return func(token, serializer, incref=incref, **kwds)
File "/usr/lib/python3.4/multiprocessing/managers.py", line 706, in __init__
self._incref()
File "/usr/lib/python3.4/multiprocessing/managers.py", line 756, in _incref
conn = self._Client(self._token.address, authkey=self._authkey)
File "/usr/lib/python3.4/multiprocessing/connection.py", line 495, in Client
c = SocketClient(address)
File "/usr/lib/python3.4/multiprocessing/connection.py", line 624, in SocketClient
s.connect(address)
FileNotFoundError: [Errno 2] No such file or directory
"""
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/home/cysis/inhibition_soum/motif_temporal_patterns/code_versions/2016/09/09_08/parallel_test/test_error_manager.py", line 29, in <module>
pool.starmap(tasks_start, tasks)
File "/usr/lib/python3.4/multiprocessing/pool.py", line 268, in starmap
return self._map_async(func, iterable, starmapstar, chunksize).get()
File "/usr/lib/python3.4/multiprocessing/pool.py", line 599, in get
raise self._value
FileNotFoundError: [Errno 2] No such file or directory
In my opinion before the last print(global_lists[0) is executed, the Manager exits and therefore is not able to find global_lists[0]. Can anybody shed some light on this sort of stuff?

Categories

Resources