I am currently writing a nginx proxy server module with a Request queue in front, so the requests are not dropped when the servers behind the nginx can't handle the requests (nginx is configured as a load balancer).
I am using
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
The idea is to put the request in a queue before handling them. I know multiprocessing.Queue supports only simple object and cannot support raw sockets, so I tried using a multiprocess.Manager to make a shared dictionary. The Manager also uses sockets for connection, so this method failed too. Is there a way to share network sockets between processes?
Here is the problematic part of the code:
class ProxyServer(Threader, HTTPServer):
def __init__(self, server_address, bind_and_activate=True):
HTTPServer.__init__(self, server_address, ProxyHandler,
bind_and_activate)
self.manager = multiprocessing.Manager()
self.conn_dict = self.manager.dict()
self.ticket_queue = multiprocessing.Queue(maxsize= 10)
self._processes = []
self.add_worker(5)
def process_request(self, request, client):
stamp = time.time()
print "We are processing"
self.conn_dict[stamp] = (request, client) # the program crashes here
#Exception happened during processing of request from ('172.28.192.34', 49294)
#Traceback (most recent call last):
# File "/usr/lib64/python2.6/SocketServer.py", line 281, in _handle_request_noblock
# self.process_request(request, client_address)
# File "./nxproxy.py", line 157, in process_request
# self.conn_dict[stamp] = (request, client)
# File "<string>", line 2, in __setitem__
# File "/usr/lib64/python2.6/multiprocessing/managers.py", line 725, in _callmethod
# conn.send((self._id, methodname, args, kwds))
#TypeError: expected string or Unicode object, NoneType found
self.ticket_queue.put(stamp)
def add_worker(self, number_of_workers):
for worker in range(number_of_workers):
print "Starting worker %d" % worker
proc = multiprocessing.Process(target=self._worker, args = (self.conn_dict,))
self._processes.append(proc)
proc.start()
def _worker(self, conn_dict):
while 1:
ticket = self.ticket_queue.get()
print conn_dict
a=0
while a==0:
try:
request, client = conn_dict[ticket]
a=1
except Exception:
pass
print "We are threading!"
self.threader(request, client)
U can use multiprocessing.reduction to transfer the connection and socket objects between processes
Example Code
# Main process
from multiprocessing.reduction import reduce_handle
h = reduce_handle(client_socket.fileno())
pipe_to_worker.send(h)
# Worker process
from multiprocessing.reduction import rebuild_handle
h = pipe.recv()
fd = rebuild_handle(h)
client_socket = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM)
client_socket.send("hello from the worker process\r\n")
Looks like you need to pass file descriptors between processes (assuming Unix here, no clue about Windows). I've never done this in Python, but here is link to python-passfd project that you might want to check.
You can look at this code - https://gist.github.com/sunilmallya/4662837 which is
multiprocessing.reduction socket server with parent processing passing connections to client after accepting connections
Related
In the class foo in foomodule.py below, I am getting an error in the run_with_multiprocessing method. The method breaks up the number of records in self._data into chunks and calls somefunc() using a subset of the data, for example somefunc(data[0:800], 800) in the first iteration, if limit = 800.
I have done this, because running 10 * 1k records vs. 1 * 10k records shows a great performance improvement in a variation of the run_with_multiprocessing function that does the same thing, just without multiprocessing. Now I want to use multiprocessing to see if I can improve performance even more.
I am running python 3.8.2 on Windows 8.1. I am fairly new to python and multiprocessing. Thank you so much for your help.
# foomodule.py
import multiprocessing
class foo:
def __init__(self, data, record_count):
self._data = data
self._record_count = record_count
def some_func(self, data, record_count):
# looping through self._data and doing some work
def run_with_multiprocessing(self, limit):
step = 0
while step < self._record_count:
if self._record_count - step < limit:
proc = multiprocessing.Process(target=self.some_func, args=(self._data[step:self._record_count], self._record_count-step))
proc.start()
proc.join()
step = self._record_count
break
proc = multiprocessing.Process(target=self.some_func, args=(self._data[step:self._record_count], self._record_count-step))
proc.start()
proc.join()
step += limit
return
When using the class in script.py, I get the following error:
import foomodule
# data is a mysql result set with, say, 10'000 rows
start = time.time()
bar = foomodule.foo(data, 10000)
limit = 800
bar.run_with_multiprocessing(limit)
end = time.time()
print("finished after " + str(round(end-start, 2)) + "s")
Traceback (most recent call last):
File "C:/coding/python/project/script.py", line 29, in <module>
bar.run_with_multiprocessing(limit)
File "C:\coding\python\project\foomodule.py", line 303, in run_with_multiprocessing
proc.start()
File "C:\...\Python\Python38-32\lib\multiprocessing\process.py", line 121, in start
self._popen = self._Popen(self)
File "C:\...\Python\Python38-32\lib\multiprocessing\context.py", line 224, in _Popen
return _default_context.get_context().Process._Popen(process_obj)
File "C:\...\Python\Python38-32\lib\multiprocessing\context.py", line 326, in _Popen
return Popen(process_obj)
File "C:\...\Python\Python38-32\lib\multiprocessing\popen_spawn_win32.py", line 93, in __init__
reduction.dump(process_obj, to_child)
File "C:\...\Python\Python38-32\lib\multiprocessing\reduction.py", line 60, in dump
ForkingPickler(file, protocol).dump(obj)
File "C:\...\Python\Python38-32\lib\socket.py", line 272, in __getstate__
raise TypeError(f"cannot pickle {self.__class__.__name__!r} object")
TypeError: cannot pickle 'SSLSocket' object
You divide and you will win
Problem
If a SSLSocket object you add it as argument in multiprocessing.Process(), the SSLSocket cannot be serialized.
Solution
As you can't serialize a SSLSocket, you do it in the subprocess (function passed as argument in multiprocessing.Process())
Server
#!/usr/bin/python3
import ssl,multiprocessing
from sources.ClientListener import ClientListener
class SocketServer:
def __init__(self,**kwargs):
self.args = kwargs["args"]
self.__createSocket()
def __handlerClients(self):
try:
while self.sock:
# The sock.accept() allows create a subprocess when there is a connection established
# IMPORTANT: I don't add SSL at socket object because else the SSLSocket object can't pickle when pass it by argument in processing.Process()
conn,addr = self.sock.accept()
eventChildStop = multiprocessing.Event()
subprocess = multiprocessing.Process(target=ClientListener, name="client", args=(conn,addr,eventChildStop))
# This thread is responsible of close the client's child process
threading.Thread(target=ClientListener.exitSubprocess,name="closeChildProcess",args=(eventChildStop,subprocess,)).start()
subprocess.start()
time.sleep(1)
except:
None
def __createSocket(self):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
#this allows address/port to be reused immediately instead before of the TIME_WAIT state
# https://stackoverflow.com/questions/12362542/python-server-only-one-usage-of-each-socket-address-is-normally-permitted
# #sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock.bind(("",self.PORT))
self.sock.listen(self.CLIENTS)
print(logFile().message(f"Good days. I am running ClassAdmin server, listenning {self.CLIENTS} clients by port {self.PORT}...",True,"INFO"))
#self.sockSSL = self.context.wrap_socket(sock,server_side=True)
self.__handlerClients()
if __name__=="__main__":
SocketServer(args=sys.argv)
As you can look, in the __handlerClients(self) method. I do a while loop of socket object. For each iteration I know if there is connection established thanks to:
conn,addr = self.sock.accept()
So, I pass the conn variable in the multiprocessing.Process(), because conn is a socket object.
The different between conn and self.sock is what the conn has the raddr parameter, and self.sock hasn't it and the laddr is 0.0.0.0
self.sock
<socket.socket fd=3, family=AddressFamily.AF_INET, type=SocketKind.SOCK_STREAM, proto=0, laddr=('0.0.0.0', 7788)>
conn
<socket.socket fd=5, family=AddressFamily.AF_INET, type=SocketKind.SOCK_STREAM, proto=0, laddr=('192.168.0.3', 7788), raddr=('192.168.0.20', 53078)>
multiprocessing
subprocess = multiprocessing.Process(target=ClientListener, name="client", args=(conn,addr,eventChildStop))
Is the same object.
Now go at ClientListener
ClientListener
class ClientListener:
def __init__(self,conn,addr,event):
# Get the connection's socket object and I in this connection add secure traffic encrypted with SSL thanks to object SSLSocket of socket module
self.addr = addr
self.conn = self.__SSLTunnel(conn)
self.nick = ""
self.__listenData()
# This creates a ssl tunnel with the ClassAdmin's certificate and private key
def __SSLTunnel(self,sock):
context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
context.load_cert_chain(Environment.SSL("crt"),Environment.SSL("key"))
return context.wrap_socket(sock,server_side=True)
def __listenData(self,sock):
# [...]
As you can look in the __init__(self,conn,addr,event) I get the conn variable of previous code. And in the self.conn save the same object but passed by SSLSocket
self.conn = self.__SSLTunnel(conn)
def __SSLTunnel(self,sock):
context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
context.load_cert_chain(Environment.SSL("crt"),Environment.SSL("key"))
return context.wrap_socket(sock,server_side=True)
Important
The SSLSocket is declared in self.conn because this can work with send() and recv() method.
data = self.conn.recv(1024)
self.conn.send("sig.SystemExit(-5000,'The nick exists and is connected',True)".encode("utf-8"))
The self.sock variable can't allow accept() method.
this throw a error:
[Errno 22] Invalid argument in /etc/ClassAdmin/sources/ClientListener.py:14
What you have a good day.
i'm building a kind of simulator that uses thrift protocol.
But when im executing multiple threads of my virtual equipments sending messages, the program breaks after a short time by receiving them, think the buffer is overloaded or something like that, or not, so i'm here asking for some help if its possible.
Here's the main pieces of my code
A class for threading:
class ThreadManager (threading.Thread):
def __init__(self, name, obj, client, layout):
threading.Thread.__init__(self)
self.name = name
self.obj = obj
self.client = client
self.layout = layout
def run(self):
print ("Starting " + self.name)
while True:
sleep(2)
self.obj.auto_gen_msg(self.client, layout=self.layout)
The method for generating messages:
def auto_gen_msg(self, client, layout='', min_delay=15, max_delay=30):
if not layout:
msg = self.gen_message(self.draw_random_model())
else:
msg = self.gen_message(layout)
wait = randint(min_delay, max_delay)
sleep(wait)
print(self.eqp_type, " delivered a message ...")
getattr(client, msg[0])(*msg[1])
The main:
def start(layout, equipment, number):
try:
host = 'localhost'
transport = TSocket.TSocket(host, port=9090)
transport = TTransport.TBufferedTransport(transport)
protocol = TCompactProtocol.TCompactProtocol(transport)
client = SuiteService.Client(protocol)
transport.open()
equips = [Equipment(equipment) for i in range(number)]
threads = [ThreadManager(i.eqp_type, i, client, layout) for i in equips]
for i in range(len(threads)):
threads[i].start()
sleep(2)
while True:
pass
transport.close()
except Thrift.TException as tx:
print ("%s " % (tx.message))
The error haunting me:
Traceback (most recent call last):
File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/threading.py", line 916, in _bootstrap_inner
self.run()
File "/Users/lem4fia/Documents/sg/loki/loki-thrift/loki_thrift/loki_thrift/lib/thread_manager.py", line 39, in run
self.obj.auto_gen_msg(self.client, layout=self.layout)
File "/Users/lem4fia/Documents/sg/loki/loki-thrift/loki_thrift/loki_thrift/lib/virtual.py", line 281, in auto_gen_msg
getattr(client, msg[0])(*msg[1])
File "/Users/lem4fia/Documents/sg/loki/thrift-server/thrift_server/suite/SuiteService.py", line 4895, in v1
self.send_v1(ir, ts, ch, o1, o2, o3, o4, o5, o6, o7)
File "/Users/lem4fia/Documents/sg/loki/thrift-server/thrift_server/suite/SuiteService.py", line 4899, in send_v1
self._oprot.writeMessageBegin('v1', TMessageType.CALL, self._seqid)
File "/Users/lem4fia/Documents/sg/loki/lokiv/lib/python3.6/site-packages/thrift-0.11.0-py3.6-macosx-10.6-intel.egg/thrift/protocol/TCompactProtocol.py", line 156, in writeMessageBegin
assert self.state == CLEAR
AssertionError
Curiously, it doesnt bug if instancing 2 virtual equipments in thread, but 10 virtual equipments (sometimes less than this) is sufficient to raise this error.
Can someone please gimme a light? :)
The problem there is that it seems that you have to use one diferent Transport object for each thread. This is probably related to Thrift's implementation!
Reference here : http://grokbase.com/t/thrift/user/134s16ks4m/single-connection-and-multiple-threads
As a general rule 1), Thrift is not intended to be used across multiple threads.
This is, at least to my knowledge, true for all currently supported languages.
One instance per thread will do.
1) aside from server-end things like TThreadedServer or TThreadPoolServer
I've broken this code somehow and I can't fix it. The server/client code was written by someone else (mostly from the examples in py manuals), and I can't work out what's wrong.
I'm getting issues with super and init and that jazz, mostly because I don't fully understand and find most documentation on the subject leaves me more confused than when I started. For now, I'll be happy enough to get it working. It's likely to be some silly issue, fixed in one line.
Any ideas? I've tried not to paste in code not relevant, but I can add more or provide the whole file if it helps. The code falls over specifically when a handle thread is created. My test case is running code instances, and passing messages between them and it falls over on receipt of the first UDP message.
# Library imports
import threading
import SocketServer
import multiprocessing
# .. More code here ...
class ThreadedUDPServer(SocketServer.ThreadingMixIn, SocketServer.UDPServer):
pass
class NodeDaemon(ThreadedUDPServer):
def __init__(self, host, port, modules):
ThreadedUDPServer.__init__(self, (host, port), NodeProtocolHandler)
# Store parameters in the class
self.modules = modules
self.port = port
self.ip = host
# Check if we have enabled multithreaded listen daemon
if settings.MULTI:
self.server_thread = multiprocessing.Process(target=self.serve_forever)
else:
self.server_thread = threading.Thread(target=self.serve_forever)
# Make the server thread daemonic
self.server_thread.daemon = True
# Start the server thread
self.server_thread.start()
# Update our identity node info
self.modules.identity = NodeInfo(host, port)
def fetch_modules(self):
return self.modules
class NodeProtocolHandler(SocketServer.BaseRequestHandler):
"""
Handles nody things.
Check https://docs.python.org/2/library/socketserver.html
For more sweet deets.
"""
def __init__(self,*args,**kwargs):
super(SocketServer.BaseRequestHandler,self).__init__(args,kwargs)
# Grab modules references
self.modules = self.server.fetch_modules()
# ... More code here .. #
def handle(self):
"""
Main routine to handle all incoming UDP packets.
"""
# Grab the raw message data received
raw_data = self.request[0].strip()
# ... More code here ...
The error generated is:
Exception happened during processing of request from ('127.0.0.1', 60377)
----------------------------------------
Traceback (most recent call last):
File "C:\Python27\lib\SocketServer.py", line 593, in process_request_thread
self.finish_request(request, client_address)
File "C:\Python27\lib\SocketServer.py", line 334, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "C:\some_dir\node_daemon.py", line 60, in __init__
super(SocketServer.BaseRequestHandler,self).__init__(args,kwargs)
TypeError: must be type, not classobj
def __init__(self,*args,**kwargs):
- super(SocketServer.BaseRequestHandler,self).__init__(args, kwargs)
+ SocketServer.BaseRequestHandler.__init__(self,*args, **kwargs)
I am trying to create an httpserver on a separate thread which should process a single get request and shutdown if a single parameter is passed in the url.
import sys
import threading
import BaseHTTPServer
from SimpleHTTPServer import SimpleHTTPRequestHandler
class WebServer(SimpleHTTPRequestHandler,threading.Thread):
port = 9000
protocol_version = "HTTP/1.1"
code = ""
httpd = None
isRunning = False
def do_GET(self):
self.send_response(200)
self.send_header("Content-type","text/html")
self.end_headers()
self.wfile.write("webpage")
try:
self.code = split(self.path,"=")[1]
except:
pass
else:
self.isRunning=False
self.stop()
def do_POST(self):
pass
def __init__(self,port=9000):
threading.Thread.__init__(self)
self.port=port
def run(self):
while True:
server_address = ("127.0.0.1",self.port)
try:
self.httpd = BaseHTTPServer.HTTPServer(server_address,WebServer)
except:
self.port +=1
else:
break
self.isRunning=True
self.httpd.serve_forever()
server = WebServer(1)
server.start()
while(server.isRunning==False):
pass
print "Server running on port: %d" %(server.port)
server.join()
print "code: "+server.code
But an error occurs when a request is sent to it:
Server running on port: 1025
----------------------------------------
Exception happened during processing of request from ('127.0.0.1', 56462)
Traceback (most recent call last):
File "/usr/lib/python2.7/SocketServer.py", line 295, in _handle_request_noblock
self.process_request(request, client_address)
File "/usr/lib/python2.7/SocketServer.py", line 321, in process_request
self.finish_request(request, client_address)
File "/usr/lib/python2.7/SocketServer.py", line 334, in finish_request
self.RequestHandlerClass(request, client_address, self)
TypeError: __init__() takes at most 2 arguments (4 given)
----------------------------------------
when using curl to connect to the server,it gives this error:
curl: (52) Empty reply from server
you're trying to do too many things with one class.
the way that the SimpleHTTPRequestHandler works is that the BaseHTTPServer creates a new instance to handle each request. it does that by calling the constructor. but you have added a new constructor which you are using to create the server itself.
is that clear?
when the BaseHttpServer receives a request it takes the WebServer class (which you pass in as a subclass of SimpleHTTPRequestHandler where you create self.httpd) and tries to create an instance. it expects the constructor for the SimpleHTTPRequestHandler subclass to have four arguments, but you've added a new constructor which takes only two (and is intended to create a web server, not a handler).
it's weird because you're probably not used to things creating a new instance. but that's how it works.
so separate your server and your request handler into two separate classes (as kichik says).
I want to send data from a client to the server in a TLS TCP socket from multiple client subprocesses so I share the same ssl socket with all subprocesses. Communication works with one subprocess, but if I use more than one subprocesses, the TLS server crashes with an ssl.SSLError (SSL3_GET_RECORD:decryption failed or bad record mac).
More specific: It does not depend which process first calls the SSLSocket.write() method, but this process is the only one from this time on which can call it. If another process calls write(), the server will result in the exception described above.
I used this basic code:
tlsserver.py
import socket, ssl
def deal_with_client(connstream):
data = connstream.read()
while data:
print data
data = connstream.read()
connstream.close()
bindsocket = socket.socket()
bindsocket.bind(('127.0.0.1', 9998))
bindsocket.listen(5)
while True:
newsocket, fromaddr = bindsocket.accept()
connstream = ssl.wrap_socket(newsocket,
server_side=True,
certfile="srv.crt",
keyfile="srv.key",
ssl_version=ssl.PROTOCOL_TLSv1)
deal_with_client(connstream)
tlsclient.py
import socket, ssl
import multiprocessing
class SubProc:
def __init__(self, sock):
self.sock = sock
def do(self):
self.sock.write("Test")
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
ssl_sock = ssl.wrap_socket(s)
ssl_sock.connect(('127.0.0.1', 9998))
print "Connected to", repr(ssl_sock.getpeername())
for x in (1,2):
subproc = SubProc(ssl_sock)
proc = multiprocessing.Process(target=subproc.do)
And this is the backtrace:
Traceback (most recent call last):
File "tlsserver.py", line 21, in <module>
deal_with_client(connstream)
File "tlsserver.py", line 7, in deal_with_client
data = connstream.read()
File "/usr/lib64/python2.6/ssl.py", line 136, in read
return self._sslobj.read(len)
ssl.SSLError: [Errno 1] _ssl.c:1325: error:1408F119:SSL routines:SSL3_GET_RECORD:decryption failed or bad record mac
The problem is that you're re-using the same connection for both processes. The way SSL encrypts data makes this fail -- the two processes would have to communicate with each other about the state of the shared SSL connection. Even if you do make it work, or if you didn't use SSL, the data would arrive at the server all jumbled up; you would have no real way of distinguishing which bytes came from which process.
What you need to do is give each process its own SSL connection, by making the connection in subproc.do. Alternatively, don't have the subprocesses communicate with the server at all, but rather communicate with the main process, and have the main process relay it over the SSL connection.