Python pickle error when using multiprocessing inside class method - python

In the class foo in foomodule.py below, I am getting an error in the run_with_multiprocessing method. The method breaks up the number of records in self._data into chunks and calls somefunc() using a subset of the data, for example somefunc(data[0:800], 800) in the first iteration, if limit = 800.
I have done this, because running 10 * 1k records vs. 1 * 10k records shows a great performance improvement in a variation of the run_with_multiprocessing function that does the same thing, just without multiprocessing. Now I want to use multiprocessing to see if I can improve performance even more.
I am running python 3.8.2 on Windows 8.1. I am fairly new to python and multiprocessing. Thank you so much for your help.
# foomodule.py
import multiprocessing
class foo:
def __init__(self, data, record_count):
self._data = data
self._record_count = record_count
def some_func(self, data, record_count):
# looping through self._data and doing some work
def run_with_multiprocessing(self, limit):
step = 0
while step < self._record_count:
if self._record_count - step < limit:
proc = multiprocessing.Process(target=self.some_func, args=(self._data[step:self._record_count], self._record_count-step))
proc.start()
proc.join()
step = self._record_count
break
proc = multiprocessing.Process(target=self.some_func, args=(self._data[step:self._record_count], self._record_count-step))
proc.start()
proc.join()
step += limit
return
When using the class in script.py, I get the following error:
import foomodule
# data is a mysql result set with, say, 10'000 rows
start = time.time()
bar = foomodule.foo(data, 10000)
limit = 800
bar.run_with_multiprocessing(limit)
end = time.time()
print("finished after " + str(round(end-start, 2)) + "s")
Traceback (most recent call last):
File "C:/coding/python/project/script.py", line 29, in <module>
bar.run_with_multiprocessing(limit)
File "C:\coding\python\project\foomodule.py", line 303, in run_with_multiprocessing
proc.start()
File "C:\...\Python\Python38-32\lib\multiprocessing\process.py", line 121, in start
self._popen = self._Popen(self)
File "C:\...\Python\Python38-32\lib\multiprocessing\context.py", line 224, in _Popen
return _default_context.get_context().Process._Popen(process_obj)
File "C:\...\Python\Python38-32\lib\multiprocessing\context.py", line 326, in _Popen
return Popen(process_obj)
File "C:\...\Python\Python38-32\lib\multiprocessing\popen_spawn_win32.py", line 93, in __init__
reduction.dump(process_obj, to_child)
File "C:\...\Python\Python38-32\lib\multiprocessing\reduction.py", line 60, in dump
ForkingPickler(file, protocol).dump(obj)
File "C:\...\Python\Python38-32\lib\socket.py", line 272, in __getstate__
raise TypeError(f"cannot pickle {self.__class__.__name__!r} object")
TypeError: cannot pickle 'SSLSocket' object

You divide and you will win
Problem
If a SSLSocket object you add it as argument in multiprocessing.Process(), the SSLSocket cannot be serialized.
Solution
As you can't serialize a SSLSocket, you do it in the subprocess (function passed as argument in multiprocessing.Process())
Server
#!/usr/bin/python3
import ssl,multiprocessing
from sources.ClientListener import ClientListener
class SocketServer:
def __init__(self,**kwargs):
self.args = kwargs["args"]
self.__createSocket()
def __handlerClients(self):
try:
while self.sock:
# The sock.accept() allows create a subprocess when there is a connection established
# IMPORTANT: I don't add SSL at socket object because else the SSLSocket object can't pickle when pass it by argument in processing.Process()
conn,addr = self.sock.accept()
eventChildStop = multiprocessing.Event()
subprocess = multiprocessing.Process(target=ClientListener, name="client", args=(conn,addr,eventChildStop))
# This thread is responsible of close the client's child process
threading.Thread(target=ClientListener.exitSubprocess,name="closeChildProcess",args=(eventChildStop,subprocess,)).start()
subprocess.start()
time.sleep(1)
except:
None
def __createSocket(self):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
#this allows address/port to be reused immediately instead before of the TIME_WAIT state
# https://stackoverflow.com/questions/12362542/python-server-only-one-usage-of-each-socket-address-is-normally-permitted
# #sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock.bind(("",self.PORT))
self.sock.listen(self.CLIENTS)
print(logFile().message(f"Good days. I am running ClassAdmin server, listenning {self.CLIENTS} clients by port {self.PORT}...",True,"INFO"))
#self.sockSSL = self.context.wrap_socket(sock,server_side=True)
self.__handlerClients()
if __name__=="__main__":
SocketServer(args=sys.argv)
As you can look, in the __handlerClients(self) method. I do a while loop of socket object. For each iteration I know if there is connection established thanks to:
conn,addr = self.sock.accept()
So, I pass the conn variable in the multiprocessing.Process(), because conn is a socket object.
The different between conn and self.sock is what the conn has the raddr parameter, and self.sock hasn't it and the laddr is 0.0.0.0
self.sock
<socket.socket fd=3, family=AddressFamily.AF_INET, type=SocketKind.SOCK_STREAM, proto=0, laddr=('0.0.0.0', 7788)>
conn
<socket.socket fd=5, family=AddressFamily.AF_INET, type=SocketKind.SOCK_STREAM, proto=0, laddr=('192.168.0.3', 7788), raddr=('192.168.0.20', 53078)>
multiprocessing
subprocess = multiprocessing.Process(target=ClientListener, name="client", args=(conn,addr,eventChildStop))
Is the same object.
Now go at ClientListener
ClientListener
class ClientListener:
def __init__(self,conn,addr,event):
# Get the connection's socket object and I in this connection add secure traffic encrypted with SSL thanks to object SSLSocket of socket module
self.addr = addr
self.conn = self.__SSLTunnel(conn)
self.nick = ""
self.__listenData()
# This creates a ssl tunnel with the ClassAdmin's certificate and private key
def __SSLTunnel(self,sock):
context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
context.load_cert_chain(Environment.SSL("crt"),Environment.SSL("key"))
return context.wrap_socket(sock,server_side=True)
def __listenData(self,sock):
# [...]
As you can look in the __init__(self,conn,addr,event) I get the conn variable of previous code. And in the self.conn save the same object but passed by SSLSocket
self.conn = self.__SSLTunnel(conn)
def __SSLTunnel(self,sock):
context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
context.load_cert_chain(Environment.SSL("crt"),Environment.SSL("key"))
return context.wrap_socket(sock,server_side=True)
Important
The SSLSocket is declared in self.conn because this can work with send() and recv() method.
data = self.conn.recv(1024)
self.conn.send("sig.SystemExit(-5000,'The nick exists and is connected',True)".encode("utf-8"))
The self.sock variable can't allow accept() method.
this throw a error:
[Errno 22] Invalid argument in /etc/ClassAdmin/sources/ClientListener.py:14
What you have a good day.

Related

Making pynng and socket talk to each other

TL;DR
I spin up a server using pynng, then a client from Python Standard Library socket will try to send messages to it.
The problem is that client can send the message, but server is oblivious to it. Therefore, it doesn't work.
Am I missing something? Some low-level protocol setting? Some termination character?
The reason why I'm doing this is that I will build a Python script that uses pynng to act as a server. Then a non-Python program (which I assume has knowledge of basic TCP protocols) will try to talk with this Python server. Thus I am using the IMHO most primitive socket library I could operate, the socket module in the standard library.
The details
I will present code snippets as I discuss, but I will show the full minimal code example at the end.
I am trying to spin up a server using pynng
def server():
with pynng.Pair0(listen=f'tcp://{HOST:s}:{PORT:d}', recv_timeout=10000) as s:
print("Server running")
data = s.recv() # Blocks forever here
print(data)
Then, client that looks like this will try to connect to it:
def client():
with socket.create_connection(address=(HOST, PORT), timeout=5) as s:
print("Client connected")
s.sendall(b'Hello world')
print("Client sent message")
I put them all together in using threading:
def main():
srv = threading.Thread(target=server)
cli = threading.Thread(target=client)
srv.start()
cli.start()
srv.join()
cli.join()
Minimum working code
All told, this is the minimum working code:
import socket
import pynng
import threading
HOST = "127.0.0.1"
PORT = 65432
def main():
srv = threading.Thread(target=server)
cli = threading.Thread(target=client)
srv.start()
cli.start()
srv.join()
cli.join()
def server():
with pynng.Pair0(listen=f'tcp://{HOST:s}:{PORT:d}', recv_timeout=10000) as s:
print("Server running")
data = s.recv() # Blocks forever here
print("Message received")
print(data)
def client():
with socket.create_connection(address=(HOST, PORT), timeout=5) as s:
print("Client connected")
s.sendall(b'Hello world')
print("Client sent message")
if __name__ == "__main__":
main()
Then I run this in the terminal
$ python main.py
It seems that the server is unable to recv messages, and the recv attempt thus times out at 10000ms.
Server running
Client connected
Client sent message
Exception in thread Thread-1:
Traceback (most recent call last):
File "/home/kmonisit/miniconda3/envs/engg/lib/python3.8/threading.py", line 932, in _bootstrap_inner
self.run()
File "/home/kmonisit/miniconda3/envs/engg/lib/python3.8/threading.py", line 870, in run
self._target(*self._args, **self._kwargs)
File "main.py", line 39, in server
data = s.recv() # Blocks forever here
File "/home/kmonisit/miniconda3/envs/engg/lib/python3.8/site-packages/pynng/nng.py", line 454, in recv
check_err(ret)
File "/home/kmonisit/miniconda3/envs/engg/lib/python3.8/site-packages/pynng/exceptions.py", line 201, in check_err
raise exc(string, err)
pynng.exceptions.Timeout: Timed out
pynng is based on Nanomsg Next Generation, which is an implementation of the Scalability Protocols. The scalability protocols work on many different transports, including tcp, but bare sockets are not compatible. However, with a little bit of prayer and elbow grease, they can be made compatible. Which is too say, you can implement the scalability protocols in pure Python if need be.
First, we need to know what the wire format is; thankfully that is documented in an RFC in the original nanomsg repository. An implementation of a Pair0 client is here:
class Pair0:
"""A poor implementation of the Pair0 protocol"""
def __init__(self, host, port, timeout=None):
self._sock = socket.create_connection(address=(host, port), timeout=timeout)
# https://github.com/nanomsg/nanomsg/blob/master/rfc/sp-tcp-mapping-01.txt
# upon making a connection, both ends are required to send this header
self._sock.send(b'\x00SP\x00\x00\x10\x00\x00')
print(self._sock.recv(8))
def send(self, data):
# messages are simply "length + payload". Length is 64-bit in network byte
# order.
packed = struct.pack('!Q', len(data))
self._sock.sendall(packed + data)
def recv(self):
size_bytes = self._sock.recv(8)
(size,) = struct.unpack('!Q', size_bytes)
received = 0
parts = []
while received < size:
data = self._sock.recv(size - received)
received += len(data)
parts.append(data)
return b''.join(parts)
And integrated into your test program:
import socket
import struct
import pynng
import threading
import time
HOST = "127.0.0.1"
PORT = 65432
def main():
srv = threading.Thread(target=server)
srv.start()
# sleep to give the server time to bind to the address
time.sleep(0.1)
_client = Pair0(HOST, PORT, 1)
_client.send(b'hello pynng')
_client.send(b'hope everything is going well for you')
print(_client.recv())
print(_client.recv())
srv.join()
def server():
with pynng.Pair0(listen=f'tcp://{HOST:s}:{PORT:d}', recv_timeout=1000) as s:
print("Server running")
for _ in range(2):
data = s.recv()
print("Message received")
print(data)
s.send(b'hello bad client')
s.send(b'I hope you are doing okay')
class Pair0:
"""A poor implementation of the Pair0 protocol"""
def __init__(self, host, port, timeout=None):
self._sock = socket.create_connection(address=(host, port), timeout=timeout)
# https://github.com/nanomsg/nanomsg/blob/master/rfc/sp-tcp-mapping-01.txt
# upon making a connection, both ends are required to send this header
self._sock.send(b'\x00SP\x00\x00\x10\x00\x00')
print(self._sock.recv(8))
def send(self, data):
# messages are simply "length + payload". Length is 64-bit in network byte
# order.
packed = struct.pack('!Q', len(data))
self._sock.sendall(packed + data)
def recv(self):
size_bytes = self._sock.recv(8)
(size,) = struct.unpack('!Q', size_bytes)
received = 0
parts = []
while received < size:
data = self._sock.recv(size - received)
received += len(data)
parts.append(data)
return b''.join(parts)
if __name__ == "__main__":
main()
Now, this is nowhere near as robust as the implementation in pynng (which relies on the underlying nng implementation). nng does The Right Thing™ in edge conditions, including losing network, handling multiple clients, keeping track of state machines, handling SIGINT, etc. This is also an imcomplete implementation, as it does not bind, etc.
Disclaimer: I am the author of pynng.

Raise TimeOutError after given time for multiprocessing.connection.Listener.accept()

I'm trying to interrupt multiprocessing.connection.Listener.accept(), but have thus far been unsuccessful. Since it doesn't provide a timeout parameter, I thought perhaps I could use socket.setdefaulttimeout() to interrupt it, as suggested in post I cannot find anymore, here on SO.
This didnt work. I then tried calling close() on the Listener() object. according to this post's answer, this should have worked.
It appears, however, that these objects to not play along with the usual socket-related solutions.
I can confirm that that the Listener is closed by the Timer object as expected, but the accept() call isn't interrupted.
The Code:
import logging
import socket
import os
from multiprocessing.connection import Listener
from queue import Queue, Empty
from threading import Thread, Event, Timer
class Node(Thread):
"""Base Class providing a AF_INET, AF_UNIX or AF_PIPE connection to its
data queue. It offers put() and get() method wrappers, and therefore
behaves like a Queue as well as a Thread.
Data from the internal queue is automatically fed to any connecting client.
"""
def __init__(self, sock_name, max_q_size=None, timeout=None,
*thread_args, **thread_kwargs):
"""Initialize class.
:param sock_name: UDS, TCP socket or pipe name
:param max_q_size: maximum queue size for self.q, default infinite
"""
self._sock_name = sock_name
self.connector = Listener(sock_name)
max_q_size = max_q_size if max_q_size else 0
self.q = Queue(maxsize=max_q_size)
self._running = Event()
self.connection_timer = Timer(timeout, self.connection_timed_out)
super(Node, self).__init__(*thread_args, **thread_kwargs)
def connection_timed_out(self):
"""Closes the Listener and shuts down Node if no Client connected.
:return:
"""
self.connector.close()
self.join()
def _start_connection_timer(self):
self.connection_timer.start()
def start(self):
self._running.set()
super(Node, self).start()
def join(self, timeout=None):
print("clearing..")
self._running.clear()
print("internal join")
super(Node, self).join(timeout=timeout)
print("Done")
def run(self):
while self._running.is_set():
print("Accepting connections..")
self._start_connection_timer()
try:
client = self.connector.accept()
self.connection_timer.cancel()
self.feed_data(client)
except (TimeoutError, socket.timeout):
continue
except Exception as e:
raise
print("Run() Terminated!")
def feed_data(self, client):
try:
while self._running.is_set():
try:
client.send(self.q.get())
except Empty:
continue
except EOFError:
return
if __name__ == '__main__':
import time
n = Node('/home/nils/git/spab2/test.uds', timeout=10)
n.start()
print("Sleeping")
time.sleep(15)
print("Manual join")
n.join()
I realize my question is a duplicate of this question - however, it is almost one year old and has not even received a comment. In addition, I'm using Unix Domain Sockets, as opposed to the linked post's TCP connection.
I managed to set the timeout in the following way in Python 2.7:
self.listener = mpc.Listener((address, port))
self.listener._listener._socket.settimeout(3)
With this, the accept() call is interrupted.
Result:
conn = self.listener.accept()
File "/usr/lib/python2.7/multiprocessing/connection.py", line 145, in accept
c = self._listener.accept()
File "/usr/lib/python2.7/multiprocessing/connection.py", line 275, in accept
s, self._last_accepted = self._socket.accept()
File "/usr/lib/python2.7/socket.py", line 202, in accept
sock, addr = self._sock.accept()
timeout: timed out
Regards,
Henri

Threaded UDP server bug

I've broken this code somehow and I can't fix it. The server/client code was written by someone else (mostly from the examples in py manuals), and I can't work out what's wrong.
I'm getting issues with super and init and that jazz, mostly because I don't fully understand and find most documentation on the subject leaves me more confused than when I started. For now, I'll be happy enough to get it working. It's likely to be some silly issue, fixed in one line.
Any ideas? I've tried not to paste in code not relevant, but I can add more or provide the whole file if it helps. The code falls over specifically when a handle thread is created. My test case is running code instances, and passing messages between them and it falls over on receipt of the first UDP message.
# Library imports
import threading
import SocketServer
import multiprocessing
# .. More code here ...
class ThreadedUDPServer(SocketServer.ThreadingMixIn, SocketServer.UDPServer):
pass
class NodeDaemon(ThreadedUDPServer):
def __init__(self, host, port, modules):
ThreadedUDPServer.__init__(self, (host, port), NodeProtocolHandler)
# Store parameters in the class
self.modules = modules
self.port = port
self.ip = host
# Check if we have enabled multithreaded listen daemon
if settings.MULTI:
self.server_thread = multiprocessing.Process(target=self.serve_forever)
else:
self.server_thread = threading.Thread(target=self.serve_forever)
# Make the server thread daemonic
self.server_thread.daemon = True
# Start the server thread
self.server_thread.start()
# Update our identity node info
self.modules.identity = NodeInfo(host, port)
def fetch_modules(self):
return self.modules
class NodeProtocolHandler(SocketServer.BaseRequestHandler):
"""
Handles nody things.
Check https://docs.python.org/2/library/socketserver.html
For more sweet deets.
"""
def __init__(self,*args,**kwargs):
super(SocketServer.BaseRequestHandler,self).__init__(args,kwargs)
# Grab modules references
self.modules = self.server.fetch_modules()
# ... More code here .. #
def handle(self):
"""
Main routine to handle all incoming UDP packets.
"""
# Grab the raw message data received
raw_data = self.request[0].strip()
# ... More code here ...
The error generated is:
Exception happened during processing of request from ('127.0.0.1', 60377)
----------------------------------------
Traceback (most recent call last):
File "C:\Python27\lib\SocketServer.py", line 593, in process_request_thread
self.finish_request(request, client_address)
File "C:\Python27\lib\SocketServer.py", line 334, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "C:\some_dir\node_daemon.py", line 60, in __init__
super(SocketServer.BaseRequestHandler,self).__init__(args,kwargs)
TypeError: must be type, not classobj
def __init__(self,*args,**kwargs):
- super(SocketServer.BaseRequestHandler,self).__init__(args, kwargs)
+ SocketServer.BaseRequestHandler.__init__(self,*args, **kwargs)

TCP-Server over SSL using SocketServer.TCPServer

i want to add ssl-support to an existing TCP-server which is based on the SocketServer.TCPServer class.
So i overrode the default constructor of the TCPServer class and added the ssl.wrap_socket(...)-call:
class MyTCPServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
def __init__(self, server_address, RequestHandlerClass, bind_and_activate=True):
# See SocketServer.TCPServer.__init__
# (added ssl-support):
SocketServer.BaseServer.__init__(self, server_address,
RequestHandlerClass)
self.socket = ssl.wrap_socket(
socket.socket(self.address_family, self.socket_type),
server_side=True,
certfile='cert.pem'
)
if bind_and_activate:
self.server_bind()
self.server_activate()
When starting the server, no error occurrs.
So i modified my simple test-client to support ssl, too:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock = ssl.wrap_socket(s)
sock.connect(('192.168.1.1', 54321))
Again no error occurrs, but the connect-call is blocking. When closing the client using Ctrl+C it shows the following:
Traceback (most recent call last):
File "exampleClient.py", line 10, in <module>
sock.do_handshake()
File "/usr/lib/python2.6/ssl.py", line 293, in do_handshake
self._sslobj.do_handshake()
KeyboardInterrupt
So the do_handshake is blocking when connecting. Does anyone knows how to fix the problem? I simply want to use an encrypted TCP-connection :)
The handshake is blocking because you are wrapping the socket after binding; the socket is listening for new connections, there is no client yet to accept your connections.
Wrap the socket when accepting a connection instead:
class MyTCPServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
def get_request(self):
(socket, addr) = SocketServer.TCPServer.get_request(self)
return (ssl.wrap_socket(socket, server_side=True, certfile="cert.pem"),
addr)
Now the handshake succeeds because there is a client on the other side to shake hands with.
There is no additional work necessary for the stream handler; the python ssl library gives you objects with the same interface as socket.socket().
You can also wrap the socket early, but do postpone the handshake until you accept a connection:
class MyTCPServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
def server_bind(self):
SocketServer.TCPServer.server_bind(self)
self.socket = ssl.wrap_socket(
self.socket, server_side=True, certfile="cert.pem",
do_handshake_on_connect=False)
def get_request(self):
(socket, addr) = SocketServer.TCPServer.get_request(self)
socket.do_handshake()
return (socket, addr)
Ok, i found a solution. Now i use something similar to
this using the OpenSSL-package:
Inside the MyTCPServer-Constructor:
SocketServer.BaseServer.__init__(self, server_address, RequestHandlerClass)
ctx = SSL.Context(SSL.SSLv23_METHOD)
cert = 'cert.pem'
ctx.use_privatekey_file(cert)
ctx.use_certificate_file(cert)
self.socket = SSL.Connection(ctx, socket.socket(self.address_family,
self.socket_type))
if bind_and_activate:
self.server_bind()
self.server_activate()
And in the setup-method of the StreamRequestHandler:
self.connection = self.request
self.rfile = socket._fileobject(self.request, "rb", self.rbufsize)
self.wfile = socket._fileobject(self.request, "wb", self.wbufsize)
This seems to work fine :-)
That's because you have to set the do_handshake_on_connect argument in your call to ssl.wrap_socket:
The parameter do_handshake_on_connect
specifies whether to do the SSL
handshake automatically after doing a
socket.connect(), or whether the
application program will call it
explicitly, by invoking the
SSLSocket.do_handshake() method.
Calling SSLSocket.do_handshake()
explicitly gives the program control
over the blocking behavior of the
socket I/O involved in the handshake.
Source: http://docs.python.org/library/ssl.html

Python Sharing a network socket with multiprocessing.Manager

I am currently writing a nginx proxy server module with a Request queue in front, so the requests are not dropped when the servers behind the nginx can't handle the requests (nginx is configured as a load balancer).
I am using
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
The idea is to put the request in a queue before handling them. I know multiprocessing.Queue supports only simple object and cannot support raw sockets, so I tried using a multiprocess.Manager to make a shared dictionary. The Manager also uses sockets for connection, so this method failed too. Is there a way to share network sockets between processes?
Here is the problematic part of the code:
class ProxyServer(Threader, HTTPServer):
def __init__(self, server_address, bind_and_activate=True):
HTTPServer.__init__(self, server_address, ProxyHandler,
bind_and_activate)
self.manager = multiprocessing.Manager()
self.conn_dict = self.manager.dict()
self.ticket_queue = multiprocessing.Queue(maxsize= 10)
self._processes = []
self.add_worker(5)
def process_request(self, request, client):
stamp = time.time()
print "We are processing"
self.conn_dict[stamp] = (request, client) # the program crashes here
#Exception happened during processing of request from ('172.28.192.34', 49294)
#Traceback (most recent call last):
# File "/usr/lib64/python2.6/SocketServer.py", line 281, in _handle_request_noblock
# self.process_request(request, client_address)
# File "./nxproxy.py", line 157, in process_request
# self.conn_dict[stamp] = (request, client)
# File "<string>", line 2, in __setitem__
# File "/usr/lib64/python2.6/multiprocessing/managers.py", line 725, in _callmethod
# conn.send((self._id, methodname, args, kwds))
#TypeError: expected string or Unicode object, NoneType found
self.ticket_queue.put(stamp)
def add_worker(self, number_of_workers):
for worker in range(number_of_workers):
print "Starting worker %d" % worker
proc = multiprocessing.Process(target=self._worker, args = (self.conn_dict,))
self._processes.append(proc)
proc.start()
def _worker(self, conn_dict):
while 1:
ticket = self.ticket_queue.get()
print conn_dict
a=0
while a==0:
try:
request, client = conn_dict[ticket]
a=1
except Exception:
pass
print "We are threading!"
self.threader(request, client)
U can use multiprocessing.reduction to transfer the connection and socket objects between processes
Example Code
# Main process
from multiprocessing.reduction import reduce_handle
h = reduce_handle(client_socket.fileno())
pipe_to_worker.send(h)
# Worker process
from multiprocessing.reduction import rebuild_handle
h = pipe.recv()
fd = rebuild_handle(h)
client_socket = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM)
client_socket.send("hello from the worker process\r\n")
Looks like you need to pass file descriptors between processes (assuming Unix here, no clue about Windows). I've never done this in Python, but here is link to python-passfd project that you might want to check.
You can look at this code - https://gist.github.com/sunilmallya/4662837 which is
multiprocessing.reduction socket server with parent processing passing connections to client after accepting connections

Categories

Resources