I'm trying to compute a feature for every vertex in my graph using gremlinpython. It's too slow to sequentially iterate over every single vertex. While batching could help to provide a speedup, I thought first I'd try parallizing the query.
Broadly, 1. get the full set of vertices, 2. split them over num_cores=x, 3. iterate over each sub-vertex set in parallel.
But I'm getting the error "OSError: [Errno 9] Bad file descriptor". The below code is my latest attempt at solving this.
import multiprocessing
from gremlin_python.structure.graph import Graph
from gremlin_python.driver.driver_remote_connection import DriverRemoteConnection
from gremlin_python.process.traversal import lt
def create_traversal_object():
graph = Graph()
g = graph.traversal().withRemote(DriverRemoteConnection('ws://localhost:8182/gremlin', 'g'))
return g
g = create_traversal_object()
num_cores = 1
vertex_lsts = np.array_split(g.V().limit(30).id().toList(), num_cores)
class FeatureClass():
def __init__(self, g, vertex_list):
self.g = g
self.vertex_list = vertex_list
def orchestrator(self):
for vertex_id in self.vertex_list:
self.compute_number_of_names(float(vertex_id))
def get_names(self, vertex_id):
return self.g.V(vertex_id).inE().values('benef_nm').dedup().toList()
class Simulation(multiprocessing.Process):
def __init__(self, id, worker, *args, **kwargs):
# must call this before anything else
multiprocessing.Process.__init__(self)
self.id = id
self.worker = worker
self.args = args
self.kwargs = kwargs
sys.stdout.write('[%d] created\n' % (self.id))
def run(self):
sys.stdout.write('[%d] running ... process id: %s\n' % (self.id, os.getpid()))
self.worker.orchestrator()
sys.stdout.write('[%d] completed\n' % (self.id))
list_of_objects = [FeatureClass(create_traversal_object(), vertex_lst) for vertex_lst in vertex_lsts]
list_of_sim = [Simulation(id=k, worker=obj) for k, obj in enumerate(list_of_objects)]
for sim in list_of_sim:
sim.start()
Here's the full stack-trace, looks like it's an issue with tornado, which gremlinpython uses.
Process Simulation-1:
Traceback (most recent call last):
File "/Users/greatora/anaconda3/lib/python3.6/multiprocessing/process.py", line 258, in _bootstrap
self.run()
File "<ipython-input-4-b3177477fabe>", line 42, in run
self.worker.orchestrator()
File "<ipython-input-4-b3177477fabe>", line 23, in orchestrator
self.compute_number_of_names(float(vertex_id))
File "<ipython-input-4-b3177477fabe>", line 26, in compute_number_of_names
print(self.g.V(vertex_id).inE().values('benef_nm').dedup().count().next())
File "/Users/greatora/anaconda3/lib/python3.6/site-packages/gremlin_python/process/traversal.py", line 88, in next
return self.__next__()
File "/Users/greatora/anaconda3/lib/python3.6/site-packages/gremlin_python/process/traversal.py", line 47, in __next__
self.traversal_strategies.apply_strategies(self)
File "/Users/greatora/anaconda3/lib/python3.6/site-packages/gremlin_python/process/traversal.py", line 512, in apply_strategies
traversal_strategy.apply(traversal)
File "/Users/greatora/anaconda3/lib/python3.6/site-packages/gremlin_python/driver/remote_connection.py", line 148, in apply
remote_traversal = self.remote_connection.submit(traversal.bytecode)
File "/Users/greatora/anaconda3/lib/python3.6/site-packages/gremlin_python/driver/driver_remote_connection.py", line 53, in submit
result_set = self._client.submit(bytecode)
File "/Users/greatora/anaconda3/lib/python3.6/site-packages/gremlin_python/driver/client.py", line 108, in submit
return self.submitAsync(message, bindings=bindings).result()
File "/Users/greatora/anaconda3/lib/python3.6/concurrent/futures/_base.py", line 432, in result
return self.__get_result()
File "/Users/greatora/anaconda3/lib/python3.6/concurrent/futures/_base.py", line 384, in __get_result
raise self._exception
File "/Users/greatora/anaconda3/lib/python3.6/site-packages/gremlin_python/driver/connection.py", line 63, in cb
f.result()
File "/Users/greatora/anaconda3/lib/python3.6/concurrent/futures/_base.py", line 425, in result
return self.__get_result()
File "/Users/greatora/anaconda3/lib/python3.6/concurrent/futures/_base.py", line 384, in __get_result
raise self._exception
File "/Users/greatora/anaconda3/lib/python3.6/concurrent/futures/thread.py", line 56, in run
result = self.fn(*self.args, **self.kwargs)
File "/Users/greatora/anaconda3/lib/python3.6/site-packages/gremlin_python/driver/protocol.py", line 74, in write
self._transport.write(message)
File "/Users/greatora/anaconda3/lib/python3.6/site-packages/gremlin_python/driver/tornado/transport.py", line 37, in write
lambda: self._ws.write_message(message, binary=True))
File "/Users/greatora/anaconda3/lib/python3.6/site-packages/tornado/ioloop.py", line 453, in run_sync
self.start()
File "/Users/greatora/anaconda3/lib/python3.6/site-packages/tornado/ioloop.py", line 863, in start
event_pairs = self._impl.poll(poll_timeout)
File "/Users/greatora/anaconda3/lib/python3.6/site-packages/tornado/platform/kqueue.py", line 66, in poll
kevents = self._kqueue.control(None, 1000, timeout)
OSError: [Errno 9] Bad file descriptor
I'm using Pythton3.7, gremlinpython==3.4.6, MacOS.
I'm still not entirely sure what the issue was, but this works.
import multiprocessing
from multiprocessing import Pool
import itertools
def graph_function(vertex_id_list):
graph = Graph()
g = graph.traversal().withRemote(DriverRemoteConnection('ws://localhost:8182/gremlin', 'g'))
res = []
for vertex_id in vertex_id_list:
res.append(g.V(str(vertex_id)).inE().values('benef_nm').dedup().toList())
return res
num_cores = 4
vertex_lst = g.V().limit(30).id().toList()
vertex_lsts = np.array_split(vertex_lst, num_cores)
with Pool(processes=num_cores) as pool:
results = pool.map(graph_function, vertex_lsts)
results = [*itertools.chain.from_iterable(results)]
Related
I have a following problem. I am running a parallel task. I am getting this error:
Traceback (most recent call last):
File "/usr/lib/python3.8/multiprocessing/process.py", line 315, in _bootstrap
self.run()
File "/usr/lib/python3.8/multiprocessing/process.py", line 108, in run
self._target(*self._args, **self._kwargs)
File "eclat_model.py", line 127, in do_work
function(*args, work_queue, valid_list)
File "eclat_model.py", line 115, in eclat_parallel_helper
valid_list.extend(next_vectors)
File "<string>", line 2, in extend
File "/usr/lib/python3.8/multiprocessing/managers.py", line 834, in _callmethod
conn.send((self._id, methodname, args, kwds))
File "/usr/lib/python3.8/multiprocessing/connection.py", line 206, in send
self._send_bytes(_ForkingPickler.dumps(obj))
File "/usr/lib/python3.8/multiprocessing/connection.py", line 404, in _send_bytes
self._send(header)
File "/usr/lib/python3.8/multiprocessing/connection.py", line 368, in _send
n = write(self._handle, buf)
BrokenPipeError: [Errno 32] Broken pipe
Relevant functions in eclat_model.py look like this:
def eclat_parallel_helper(index, bit_vectors, min_support, work_queue, valid_list):
next_vectors = []
for j in range(index + 1, len(bit_vectors)):
item_vector = bit_vectors[index][0] | bit_vectors[j][0]
transaction_vector = bit_vectors[index][1] & bit_vectors[j][1]
support = get_vector_support(transaction_vector)
if support >= min_support:
next_vectors.append((item_vector, transaction_vector, support))
if len(next_vectors) > 0:
valid_list.extend(next_vectors)
for i in range(len(next_vectors)):
work_queue.put((eclat_parallel_helper, (i, next_vectors, min_support)))
def do_work(work_queue, valid_list, not_done):
# work queue entries have the form (function, args)
while not_done.value:
try:
function, args = work_queue.get_nowait()
except QueueEmptyError:
continue
function(*args, work_queue, valid_list)
work_queue.task_done()
work_queue.close()
EDIT:
Multiprocessing part of the code is as follows: bit_vectors is a list of lists, where each entry is of the form
[items, transactions, support], where items is a bit vector encoding which items appear in the itemset, vector is a bit vector encoding which transactions the itemset appears in, and support is the number of transactions in which the itemset occurs.
from multiprocessing import Process, JoinableQueue, Manager, Value, cpu_count
def eclat_parallel(bit_vectors, min_support):
not_done = Value('i', 1)
manager = Manager()
valid_list = manager.list()
work_queue = JoinableQueue()
for i in range(len(bit_vectors)):
work_queue.put((eclat_parallel_helper, (i, bit_vectors, min_support)))
processes = []
for i in range(cpu_count()):
p = Process(target=do_work, args=(work_queue, valid_list, not_done), daemon=True)
p.start()
processes.append(p)
work_queue.join()
not_done.value = 0
work_queue.close()
valid_itemset_vectors = bit_vectors
for element in valid_list:
valid_itemset_vectors.append(element)
for p in processes:
p.join()
return valid_itemset_vectors
What does this error mean, please? Am I appending too many elements into next_vectors list?
I had the same issue, in my case just added a delay (time.sleep(0.01)) to solve it.
The problem is that the individual processes are too fast on queue that causes the error.
Code:
from aiohttp import web
from aiortc.mediastreams import MediaStreamTrack
from aiortc import RTCPeerConnection, RTCSessionDescription
from aiortc.contrib.media import MediaPlayer
import asyncio
import json
import os
from multiprocessing import Process, freeze_support
from queue import Queue
import sys
import threading
from time import sleep
import fractions
import time
class RadioServer(Process):
def __init__(self,q):
super().__init__()
self.q = q
self.ROOT = os.path.dirname(__file__)
self.pcs = []
self.channels = []
self.stream_offers = []
self.requests = []
def run(self):
self.app = web.Application()
self.app.on_shutdown.append(self.on_shutdown)
self.app.router.add_get("/", self.index)
self.app.router.add_get("/radio.js", self.javascript)
self.app.router.add_get("/jquery-3.5.1.min.js", self.jquery)
self.app.router.add_post("/offer", self.offer)
threading.Thread(target=self.fill_the_queues).start()
web.run_app(self.app, access_log=None, host="192.168.1.20", port="8080", ssl_context=None)
def fill_the_queues(self):
while(True):
frame = self.q.get()
for stream_offer in self.stream_offers:
stream_offer.q.put(frame)
async def index(self,request):
content = open(os.path.join(self.ROOT, "index.html"), encoding="utf8").read()
return web.Response(content_type="text/html", text=content)
async def javascript(self,request):
content = open(os.path.join(self.ROOT, "radio.js"), encoding="utf8").read()
return web.Response(content_type="application/javascript", text=content)
async def jquery(self,request):
content = open(os.path.join(self.ROOT, "jquery-3.5.1.min.js"), encoding="utf8").read()
return web.Response(content_type="application/javascript", text=content)
async def offer(self,request):
params = await request.json()
offer = RTCSessionDescription(sdp=params["sdp"], type=params["type"])
pc = RTCPeerConnection()
self.pcs.append(pc)
self.requests.append(request)
# prepare epalxeis media
self.stream_offers.append(CustomRadioStream())
pc.addTrack(self.stream_offers[-1])
#pc.on("iceconnectionstatechange")
async def on_iceconnectionstatechange():
if pc.iceConnectionState == "failed":
self.pcs.remove(pc)
self.requests.remove(request)
print(str(request.remote)+" disconnected from radio server")
print("Current peer connections:"+str(len(self.pcs)))
# handle offer
await pc.setRemoteDescription(offer)
# send answer
answer = await pc.createAnswer()
await pc.setLocalDescription(answer)
return web.Response(content_type="application/json",text=json.dumps({"sdp": pc.localDescription.sdp, "type": pc.localDescription.type}))
async def on_shutdown(self,app):
# close peer connections
if self.pcs:
coros = [pc.close() for pc in self.pcs]
await asyncio.gather(*coros)
self.pcs = []
self.channels = []
self.stream_offers = []
"""
some other classes here such as CustomRadioStream and RadioOutputStream
"""
if __name__ == "__main__":
freeze_support()
q = Queue()
custom_server_child_process = RadioServer(q)
custom_server_child_process.start()
Error
Traceback (most recent call last):
File "123.py", line 106, in <module>
custom_server_child_process.start()
File "C:/msys64/mingw64/lib/python3.8/multiprocessing/process.py", line 121, i
n start
self._popen = self._Popen(self)
File "C:/msys64/mingw64/lib/python3.8/multiprocessing/context.py", line 224, i
n _Popen
return _default_context.get_context().Process._Popen(process_obj)
File "C:/msys64/mingw64/lib/python3.8/multiprocessing/context.py", line 327, i
n _Popen
return Popen(process_obj)
File "C:/msys64/mingw64/lib/python3.8/multiprocessing/popen_spawn_win32.py", l
ine 93, in __init__
reduction.dump(process_obj, to_child)
File "C:/msys64/mingw64/lib/python3.8/multiprocessing/reduction.py", line 60,
in dump
ForkingPickler(file, protocol).dump(obj)
TypeError: cannot pickle '_thread.lock' object
What I am doing wrong?
If I call the run function (instead of start) directly, then there is no problem, but i want to use processing for this class.
Edit: Ok with multiprocessing.Queue works fine but now with similar code there is this error:
$ python "Papinhio_player.py"
Traceback (most recent call last):
File "Papinhio_player.py", line 3078, in <module>
program = PapinhioPlayerCode()
File "Papinhio_player.py", line 250, in __init__
self.manage_decks_instance = Manage_Decks(self)
File "C:\python\scripts\Papinhio player\src\main\python_files/manage_decks.py"
, line 356, in __init__
self.custom_server_child_process.start()
File "C:/msys64/mingw64/lib/python3.8/multiprocessing/process.py", line 121, i
n start
self._popen = self._Popen(self)
File "C:/msys64/mingw64/lib/python3.8/multiprocessing/context.py", line 224, i
n _Popen
return _default_context.get_context().Process._Popen(process_obj)
File "C:/msys64/mingw64/lib/python3.8/multiprocessing/context.py", line 327, i
n _Popen
return Popen(process_obj)
File "C:/msys64/mingw64/lib/python3.8/multiprocessing/popen_spawn_win32.py", l
ine 93, in __init__
reduction.dump(process_obj, to_child)
File "C:/msys64/mingw64/lib/python3.8/multiprocessing/reduction.py", line 60,
in dump
ForkingPickler(file, protocol).dump(obj)
File "stringsource", line 2, in av.audio.codeccontext.AudioCodecContext.__redu
ce_cython__
TypeError: self.parser,self.ptr cannot be converted to a Python object for pickl
ing
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "C:/msys64/mingw64/lib/python3.8/multiprocessing/spawn.py", line 116, in
spawn_main
exitcode = _main(fd, parent_sentinel)
File "C:/msys64/mingw64/lib/python3.8/multiprocessing/spawn.py", line 126, in
_main
self = reduction.pickle.load(from_parent)
EOFError: Ran out of input
Some objects cannot be serialized then unserialized.
The stack trace you posted mentions :
TypeError: cannot pickle '_thread.lock' object
a lock, which holds a state in memory and gives guarantees that no other process can own the same lock at the same moment, is typically a very bad candidate for this operation -- what should be created when you deserialize it ?
To fix this : choose a way to select the relevant fields of the object you want to serialize, and pickle/unpickle that part.
I create a PyTable object W_hat where processes should share and save the results their instead of returning them.
from multiprocessing import Lock
from multiprocessing import Pool
import tables as tb
def parallel_l21(labels, X, lam, g, W_hat):
g_indxs = np.where(labels == g)[0]
tmp = rfs(X[g_indxs, 1:].T, X[:, :-1].T, gamma=lam, verbose=False).T
tmp[abs(tmp) <= 1e-6] = 0
with lock:
W_hat[:, g_indxs] = np.array(tmp)
def init_child(lock_):
global lock
lock = lock_
#Previous code is omitted.
n_ = X_test.shape[0]
tb.file._open_files.close_all()
f = tb.open_file(path_name + 'dot' + sub_num + str(lam) + '.h5', 'w')
filters = tb.Filters(complevel=5, complib='blosc')
W_hat = f.create_carray(f.root, 'data', tb.Float32Atom(), shape=(n_, n_), filters=filters)
W_hats = []
for i in np.unique(labels):
W_hats.append(W_hat)
lock = Lock()
with Pool(processes=cpu_count, initializer=init_child, initargs=(lock,)) as pool:
print(pool)
pool.starmap(parallel_l21, zip(repeat(labels), repeat(X), repeat(lam), np.unique(labels), W_hats))
Now, when running into starmap, this error shows up:
Traceback (most recent call last):
File "/Applications/PyCharm CE 2.app/Contents/plugins/python-ce/helpers/pydev/_pydevd_bundle/pydevd_exec2.py", line 3, in Exec
exec(exp, global_vars, local_vars)
File "<input>", line 1, in <module>
File "/usr/local/Cellar/python#3.8/3.8.6_1/Frameworks/Python.framework/Versions/3.8/lib/python3.8/multiprocessing/pool.py", line 372, in starmap
return self._map_async(func, iterable, starmapstar, chunksize).get()
File "/usr/local/Cellar/python#3.8/3.8.6_1/Frameworks/Python.framework/Versions/3.8/lib/python3.8/multiprocessing/pool.py", line 771, in get
raise self._value
File "/usr/local/Cellar/python#3.8/3.8.6_1/Frameworks/Python.framework/Versions/3.8/lib/python3.8/multiprocessing/pool.py", line 537, in _handle_tasks
put(task)
File "/usr/local/Cellar/python#3.8/3.8.6_1/Frameworks/Python.framework/Versions/3.8/lib/python3.8/multiprocessing/connection.py", line 206, in send
self._send_bytes(_ForkingPickler.dumps(obj))
File "/usr/local/Cellar/python#3.8/3.8.6_1/Frameworks/Python.framework/Versions/3.8/lib/python3.8/multiprocessing/reduction.py", line 51, in dumps
cls(buf, protocol).dump(obj)
File "stringsource", line 2, in tables.hdf5extension.Array.__reduce_cython__
TypeError: self.dims,self.dims_chunk,self.maxdims cannot be converted to a Python object for pickling
Note: I thought that the code works fine on Python 3.6.8 but it turns out that it is not the case.
I am trying out AWS Neptune for the first time using Chalice.
This is the entire error
Traceback (most recent call last):
File "/var/task/chalice/app.py", line 1104, in _get_view_function_response
response = view_function(**function_args)
File "/var/task/app.py", line 44, in getPosts
raise e
File "/var/task/app.py", line 37, in getPosts
result = g.V().has('name', 'test1').toList()
File "/var/task/gremlin_python/process/traversal.py", line 58, in toList
return list(iter(self))
File "/var/task/gremlin_python/process/traversal.py", line 48, in __next__
self.traversal_strategies.apply_strategies(self)
File "/var/task/gremlin_python/process/traversal.py", line 573, in apply_strategies
traversal_strategy.apply(traversal)
File "/var/task/gremlin_python/driver/remote_connection.py", line 149, in apply
remote_traversal = self.remote_connection.submit(traversal.bytecode)
File "/var/task/gremlin_python/driver/driver_remote_connection.py", line 55, in submit
result_set = self._client.submit(bytecode)
File "/var/task/gremlin_python/driver/client.py", line 111, in submit
return self.submitAsync(message, bindings=bindings).result()
File "/var/task/gremlin_python/driver/client.py", line 127, in submitAsync
return conn.write(message)
File "/var/task/gremlin_python/driver/connection.py", line 55, in write
self.connect()
File "/var/task/gremlin_python/driver/connection.py", line 45, in connect
self._transport.connect(self._url, self._headers)
File "/var/task/gremlin_python/driver/tornado/transport.py", line 36, in connect
lambda: websocket.websocket_connect(url))
File "/var/task/tornado/ioloop.py", line 576, in run_sync
return future_cell[0].result()
tornado.simple_httpclient.HTTPStreamClosedError: Stream closed
and here is my code
import logging
from chalice import Chalice, BadRequestError, NotFoundError
from gremlin_python import statics
from gremlin_python.structure.graph import Graph
from gremlin_python.process.graph_traversal import __
from gremlin_python.process.strategies import *
from gremlin_python.process.traversal import T, P, Operator
from gremlin_python.driver.driver_remote_connection import DriverRemoteConnection
from datetime import datetime
app = Chalice(app_name='chalice-neptune')
app.debug = True
logging.basicConfig()
logging.getLogger().setLevel(logging.INFO)
def setup_graph():
try:
graph = Graph()
connstring = 'ws://NEPTUNE-ENDPOINT-HERE:8182/gremlin'
g = graph.traversal().withRemote(DriverRemoteConnection(connstring, 'g'))
logging.info('Connected to Neptune')
except Exception as e:
logging.error(e, exc_info = True)
raise BadRequestError("Could not connect to Neptune")
return g
#app.route('/getPosts')
def getPosts():
g = setup_graph()
try:
result = g.V().has('name', 'test1').toList()
response = {
'status_code': 200,
'data': result
}
except Exception as e:
raise e
return response
Any one who have tried this?
I have followed the example found in this bucket gremlin-python-example
I know I have not missed anything from the example but it is still throwing stream closed error.
Apparently the only thing I changed was my connection string and it is now working fine.
connstring = 'wss://NEPTUNE-ENDPOINT-HERE:8182/gremlin'
I changed it from ws to wss.
As to the difference between the two you can refer to this answer
Difference between ws and wss?
I have a python object - list of dictionaries which I want to fill with key-value pairs in each of those dicts but simultaneously using multiple processors and using the multiprocessing module in python. For that purpose I am using the Manager module for storing that python object. Here is the following code:
from pylab import *
from numpy.random import *
import multiprocessing
import threading
import random
def tasks_start(id, global_lists):
counter_lock = threading.Lock()
with counter_lock:
num = int(10*random.random())
global_lists[num] = {'1':'Random'}
print("Id: ", id)
print(global_lists[0])
if __name__ == '__main__':
numProcessors = 6
pool = multiprocessing.Pool(numProcessors)
global_list = multiprocessing.Manager().list(range(100))
for idx in range(100):
global_list[idx] = multiprocessing.Manager().dict()
tasks = []
for id in range(10):
tasks.append((id, global_list))
pool.starmap(tasks_start, tasks)
pool.close()
pool.join()
So what I am doing here is creating a list of dictionaries stored as global_list and then calling the tasks_start() method 10 times using the python's starmap() module (just so that I can later extend to multiple arguments) to fill the list of dictionaries. As a simple test case, I just use the random generator to randomly pick up one dictionary among the lists everytime and fill it with some value. When I run the program, the following error occurs:
multiprocessing.pool.RemoteTraceback:
"""
Traceback (most recent call last):
File "/usr/lib/python3.4/multiprocessing/pool.py", line 119, in worker
result = (True, func(*args, **kwds))
File "/usr/lib/python3.4/multiprocessing/pool.py", line 47, in starmapstar
return list(itertools.starmap(args[0], args[1]))
File "/home/cysis/inhibition_soum/motif_temporal_patterns/code_versions/2016/09/09_08/parallel_test/test_error_manager.py", line 14, in tasks_start
print(global_lists[0])
File "<string>", line 2, in __getitem__
File "/usr/lib/python3.4/multiprocessing/managers.py", line 732, in _callmethod
kind, result = conn.recv()
File "/usr/lib/python3.4/multiprocessing/connection.py", line 251, in recv
return ForkingPickler.loads(buf.getbuffer())
File "/usr/lib/python3.4/multiprocessing/managers.py", line 852, in RebuildProxy
return func(token, serializer, incref=incref, **kwds)
File "/usr/lib/python3.4/multiprocessing/managers.py", line 706, in __init__
self._incref()
File "/usr/lib/python3.4/multiprocessing/managers.py", line 756, in _incref
conn = self._Client(self._token.address, authkey=self._authkey)
File "/usr/lib/python3.4/multiprocessing/connection.py", line 495, in Client
c = SocketClient(address)
File "/usr/lib/python3.4/multiprocessing/connection.py", line 624, in SocketClient
s.connect(address)
FileNotFoundError: [Errno 2] No such file or directory
"""
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/home/cysis/inhibition_soum/motif_temporal_patterns/code_versions/2016/09/09_08/parallel_test/test_error_manager.py", line 29, in <module>
pool.starmap(tasks_start, tasks)
File "/usr/lib/python3.4/multiprocessing/pool.py", line 268, in starmap
return self._map_async(func, iterable, starmapstar, chunksize).get()
File "/usr/lib/python3.4/multiprocessing/pool.py", line 599, in get
raise self._value
FileNotFoundError: [Errno 2] No such file or directory
In my opinion before the last print(global_lists[0) is executed, the Manager exits and therefore is not able to find global_lists[0]. Can anybody shed some light on this sort of stuff?