Using multiprocessing module with BaseHTTPRequestHandler - python

I'm trying to use the python multiprocessing module to run a server in another Thread using the http.server.BaseHTTPRequestHandler module. I am stuck though and am running into a '_thread.lock' issue.
I don't want to use the threading module because I'd rather use true multi-threading with the multi-processing module.
If anyone knows what I am doing incorrectly or can point me to a good library to use that would be awesome.
import multiprocessing
from http.server import ThreadingHTTPServer, BaseHTTPRequestHandler
if __name__ == '__main__':
httpd = ThreadingHTTPServer(('localhost', 4433), BaseHTTPRequestHandler)
manager = multiprocessing.Manager()
manager.http_server = httpd
running_server = multiprocessing.Process(target=manager.http_server.serve_forever)
running_server.start()
Stack Trace:
File "/Users/redacted/python/test2/test1.py", line 10, in <module>
running_server.start()
File "/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/multiprocessing/process.py", line 121, in start
self._popen = self._Popen(self)
File "/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/multiprocessing/context.py", line 224, in _Popen
return _default_context.get_context().Process._Popen(process_obj)
File "/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/multiprocessing/context.py", line 284, in _Popen
return Popen(process_obj)
File "/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/multiprocessing/popen_spawn_posix.py", line 32, in __init__
super().__init__(process_obj)
File "/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/multiprocessing/popen_fork.py", line 19, in __init__
self._launch(process_obj)
File "/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/multiprocessing/popen_spawn_posix.py", line 47, in _launch
reduction.dump(process_obj, fp)
File "/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/multiprocessing/reduction.py", line 60, in dump
ForkingPickler(file, protocol).dump(obj)
TypeError: cannot pickle '_thread.lock' object

Python uses pickle to pass objects to another process when using multiprocess module. In your case, the thread lock used in the httpserver is not pickleable. So it reports the error.
What you can do is start the http server in another process completely like this:
import multiprocessing
from http.server import ThreadingHTTPServer, BaseHTTPRequestHandler
def startServer():
httpd = ThreadingHTTPServer(('localhost', 4433), BaseHTTPRequestHandler)
httpd.serve_forever()
if __name__ == '__main__':
manager = multiprocessing.Manager()
running_server = multiprocessing.Process(target=startServer)
running_server.start()
Also, you might want to try a different port other than 4433. I cannot connect to this port on my windows machine. But if I use 8000 everything works fine.

Related

Python - How to expose a prometheus server from a subprocess?

I have a service which initiates 3 sub-process from main. I need a prometheus metrics server in one of the process. But I get "AttributeError: Can't pickle local object 'MultiProcessValue..MmapedValue'" when the process with prometheus server is started.
main.py:
from os.path import dirname, join
environ['PROMETHEUS_MULTIPROC_DIR'] = join(dirname(__file__), 'prom')
import multiprocessing
from sub_process import SubProcessClass
environ['PROMETHEUS_MULTIPROC_DIR'] = join(dirname(__file__), 'prom')
if __name__ == "__main__":
...
multiprocessing.Process(target=SubProcessClass().f).start()
sub_process.py:
from my_metrics import MyMetric
class SubProcessClass:
def __init__(self):
self.metrics = MyMetric(8090)
self.metrics.start()
def f(self):
print("In f")
self.metrics.inc_my_counter()
my_metrics.py:
from prometheus_client import Counter, CollectorRegistry, multiprocess
from prometheus_client import start_http_server
class MyMetric:
def __init__(self, port):
self.port = port
self.registry = CollectorRegistry()
multiprocess.MultiProcessCollector(self.registry)
self.my_counter = Counter('counter_name', 'counter_desc', registry=self.registry)
def start(self):
start_http_server(self.port)
def inc_my_counter(self):
self.my_counter.inc()
Getting below exception on running main.py
Traceback (most recent call last):
File "<project_path>\source\main.py", line 15, in <module>
multiprocessing.Process(target=MyClass().f).start()
File "<python_path>\lib\multiprocessing\process.py", line 121, in start
self._popen = self._Popen(self)
File "<python_path>\lib\multiprocessing\context.py", line 224, in _Popen
return _default_context.get_context().Process._Popen(process_obj)
File "<python_path>\lib\multiprocessing\context.py", line 327, in _Popen
return Popen(process_obj)
File "<python_path>\lib\multiprocessing\popen_spawn_win32.py", line 93, in __init__
reduction.dump(process_obj, to_child)
File "<python_path>\lib\multiprocessing\reduction.py", line 60, in dump
ForkingPickler(file, protocol).dump(obj)
AttributeError: Can't pickle local object 'MultiProcessValue.<locals>.MmapedValue'
I am running python 3.9.7 on windows.

python multiprocessing manager cannot load list from distributed node

After serveral test, I find this problem caused by the dim of manager.list(manager.list(...)). But I really need it to be 2 dims. Any suggestion would be appreciated!
I'm trying to build a server and multiple clients across multiple nodes.
One node act as server which initial manager.list() for other client to use.
Other nodes act as client which attach server to get list and deal with it.
Firewall is closed. And when put server and client on a single node, it works fine.
Got problem like this:
Traceback (most recent call last):
File "main.py", line 352, in <module>
train(args)
File "main.py", line 296, in train
args, proc_manager, device)
File "main.py", line 267, in make_gossip_buffer
mng,sync_freq=args.sync_freq, num_nodes=args.num_nodes)
File "/home/think/gala-master-distprocess-changing_to_multinodes/gala/gpu_gossip_buffer.py", line 49, in __init__
r_events = read_events[rank]
File "<string>", line 2, in __getitem__
File "/home/think/anaconda3/envs/AC/lib/python3.7/multiprocessing/managers.py", line 819, in _callmethod
kind, result = conn.recv()
File "/home/think/anaconda3/envs/AC/lib/python3.7/multiprocessing/connection.py", line 251, in recv
return _ForkingPickler.loads(buf.getbuffer())
File "/home/think/anaconda3/envs/AC/lib/python3.7/multiprocessing/managers.py", line 943, in RebuildProxy
return func(token, serializer, incref=incref, **kwds)
File "/home/think/anaconda3/envs/AC/lib/python3.7/multiprocessing/managers.py", line 793, in __init__
self._incref()
File "/home/think/anaconda3/envs/AC/lib/python3.7/multiprocessing/managers.py", line 847, in _incref
conn = self._Client(self._token.address, authkey=self._authkey)
File "/home/think/anaconda3/envs/AC/lib/python3.7/multiprocessing/connection.py", line 492, in Client
c = SocketClient(address)
File "/home/think/anaconda3/envs/AC/lib/python3.7/multiprocessing/connection.py", line 620, in SocketClient
s.connect(address)
FileNotFoundError: [Errno 2] No such file or directory
Server runs on a single node.
Code of server are shown below:
import torch.multiprocessing as mp
from multiprocessing.managers import ListProxy, BarrierProxy, AcquirerProxy, EventProxy
from gala.arguments import get_args
mp.current_process().authkey = b'abc'
def server(manager,host, port, key, args):
read_events = manager.list([manager.list([manager.Event() for _ in range(num_learners)])
for _ in range(num_learners)])
manager.register('get_read_events', callable=lambda : read_events, proxytype=ListProxy)
print('start service at', host)
s = manager.get_server()
s.serve_forever()
if __name__ == '__main__':
mp.set_start_method('spawn')
args = get_args()
manager = mp.Manager()
server(manager,'10.107.13.120', 5000, b'abc', args)
Client runs on other nodes. those nodes connect server with ethernet. CLient ip is 10.107.13.80
Code of client are shown below:
import torch.multiprocessing as mp
mp.current_process().authkey = b'abc'
def make_gossip_buffer(mng):
read_events = mng.get_read_events()
gossip_buffer = GossipBuffer(parameters)
def train(args):
proc_manager = mp.Manager()
proc_manager.register('get_read_events')
proc_manager.__init__(address=('10.107.13.120', 5000), authkey=b'abc')
proc_manager.connect()
make_gossip_buffer(proc_manager)
if __name__ == "__main__":
mp.set_start_method('spawn')
train(args)
Any help would be appreciated!

Python multiprocessing throws can't pickle _thread.RLock objects

I am trying to delgate a long processing task to background, so I can keep my UI responsive. Instead of using multithreading, which is not truly concurrent process, I implement my background task as multiprocessing. However I kept encounter error says
Traceback (most recent call last):
File "C:\source\MyApp\MyApp\env\lib\site-packages\engineio\server.py", line 411, in _trigger_event
return self.handlers[event](*args)
File "C:\source\MyApp\MyApp\env\lib\site-packages\socketio\server.py", line 522, in _handle_eio_message
self._handle_event(sid, pkt.namespace, pkt.id, pkt.data)
File "C:\source\MyApp\MyApp\env\lib\site-packages\socketio\server.py", line 458, in _handle_event
self._handle_event_internal(self, sid, data, namespace, id)
File "C:\source\MyApp\MyApp\env\lib\site-packages\socketio\server.py", line 461, in _handle_event_internal
r = server._trigger_event(data[0], namespace, sid, *data[1:])
File "C:source\MyApp\MyApp\env\lib\site-packages\socketio\server.py", line 490, in _trigger_event
return self.handlers[namespace][event](*args)
File "C:\source\MyApp\MyApp\env\lib\site-packages\flask_socketio\__init__.py", line 251, in _handler
*args)
File "C:\source\MyApp\MyApp\env\lib\site-packages\flask_socketio\__init__.py", line 634, in _handle_event
ret = handler(*args)
File "C:\source\MyApp\MyApp\MyApp\routes.py", line 171, in StartProcess
myClassObj.Start()
File "C:\source\MyApp\MyAppv\src\Controller.py", line 121, in Start
p.start()
File "C:\Program Files (x86)\Microsoft Visual Studio\Shared\Python36_64\lib\multiprocessing\process.py", line 105, in start
self._popen = self._Popen(self)
File "C:\Program Files (x86)\Microsoft Visual Studio\Shared\Python36_64\lib\multiprocessing\context.py", line 223, in _Popen
return _default_context.get_context().Process._Popen(process_obj)
File "C:\Program Files (x86)\Microsoft Visual Studio\Shared\Python36_64\lib\multiprocessing\context.py", line 322, in _Popen
return Popen(process_obj)
File "C:\Program Files (x86)\Microsoft Visual Studio\Shared\Python36_64\lib\multiprocessing\popen_spawn_win32.py", line 65, in __init__
reduction.dump(process_obj, to_child)
File "C:\Program Files (x86)\Microsoft Visual Studio\Shared\Python36_64\lib\multiprocessing\reduction.py", line 60, in dump
ForkingPickler(file, protocol).dump(obj)
TypeError: can't pickle _thread.RLock objects
I am wondering where do I do wrong in using multiprocessing.
class MyClass(Object):
def __init__(self):
self._logger= logging.getLogger('MyClassObj')
pass
def Start(self):
p = Process(self.Test, args(1,))
p.start()
def Test(self, number):
print(number)
I am calling this class method from flask route.py, where
#socketio.on('StartProcess')
def StartProcess(msg):
""""Do Processing"""
myClassObj.Start()
return 'OK'
This route.py is not the entry point of the my application, which this is called by runserver.py
from os import environ
from MyApp import routes
from MyApp import app
from flask_socketio import SocketIO
from MyApp.routes import socketio
if __name__ == '__main__':
HOST = environ.get('SERVER_HOST', 'localhost')
try:
PORT = int(environ.get('SERVER_PORT', '5555'))
except ValueError:
PORT = 5555
socketio.run(app, host='0.0.0.0', port=PORT, debug=False)
I do keep seeing people mention the multiprocessing needs to run under a statement
if __name__ == '__main__':
But I am not sure how to correctly use multiprocessing in my case because at the entry point (runserver.py), I do not need my background process.
=== Edit ====
I created a very simple example to further elaborate this problem. My application has a simple flask app structure.
entry point runserver.py:
from os import environ
from MultiprocessDemo import app
if __name__ == '__main__':
HOST = environ.get('SERVER_HOST', 'localhost')
try:
PORT = int(environ.get('SERVER_PORT', '5100'))
except ValueError:
PORT = 5100
app.run(HOST, PORT,threaded=False)
route-level views.py:
from datetime import datetime
from flask import render_template
from multiprocessing import Process
from MultiprocessDemo import app
from flask_socketio import SocketIO, emit
from MultiprocessDemo.src.MultiprocessClass import MyClass
app.config['SECRET_KEY'] = 'secret!'
#socketio = SocketIO(app)
obj = MyClass()
#app.route('/')
def index():
"""Request To Fire Multiprocess."""
return render_template(
'Demo.html'
)
#app.route('/TriggerMultiprocess', methods=['GET','POST'])
def TriggerMultiprocess():
print('request to trigger')
obj .Start()
return render_template(
'Demo.html'
)
The MyClass object which create and execute multiprocess
#import queue #Queue as of python 2.x
from multiprocessing import Queue
import threading
import cv2
import os, errno, sys
import logging
import datetime
import time
from multiprocessing import Process
class MyClass(object):
def __init__(self):
# ==Where Issue happens ==========================
self._logger= logging.getLogger('MyClassObj')
self._logger.setLevel(logging.DEBUG)
format = logging.Formatter('%(levelname)s - %(asctime)s - (module)s - %(thread)d - %(message)s')
pass
def Start(self):
self.jobs = []
self._nThread = 3
for i in range (0, self._nThread):
thread = Process(target=self.Test, args=('classobj',))
self.jobs.append(thread)
# Start the threads (i.e. calculate the random number lists)
for j in self.jobs:
j.start()
#bk thread to consume tasks
def Test(self, name):
i = 0
while i < 20:
print('hello, {}'.format(name))
I found out that if the MyClass does not contain member of python logger, the multiprocess will be executed without issue, otherwise, it will throw the same error I've encountered previously. TypeError: can't pickle _thread.RLock objects
However, for the same class, if I call them from the following script, without flask, I will not encounter any issue with pickling, whether or not logger is part of the class member.
from MultiprocessClass import MyClass
import time
if __name__ == '__main__':
a = MyClass()
a.Start()
print("done")

AttributeError: 'Context' object has no attribute 'wrap_socket'

I am trying to set up a Flask server that uses an OpenSSL context.
However, since I moved the script on a different server, it keeps throwing the following error, no matter if I am using Python 2.7 or 3.4 and no matter which SSL method I chose (SSLv23 / TLSv1/...):
File "/usr/lib/python3.4/threading.py", line 920, in _bootstrap_inner
self.run()
File "/usr/lib/python3.4/threading.py", line 868, in run
self._target(*self._args, **self._kwargs)
File "/usr/local/lib/python3.4/dist-packages/werkzeug/serving.py", line 602, in inner
passthrough_errors, ssl_context).serve_forever()
File "/usr/local/lib/python3.4/dist-packages/werkzeug/serving.py", line 506, in make_server
passthrough_errors, ssl_context)
File "/usr/local/lib/python3.4/dist-packages/werkzeug/serving.py", line 450, in __init__
self.socket = ssl_context.wrap_socket(self.socket,
AttributeError: 'Context' object has no attribute 'wrap_socket'
The according code below:
if __name__ == "__main__":
context = SSL.Context(SSL.SSLv23_METHOD)
context.use_privatekey_file('key.key')
context.use_certificate_file('cert.crt')
app.run(host='0.0.0.0', port=80, ssl_context=context, threaded=True, debug=True)
Thank you very much in advance! I am happy for any help
As of 0.10, Werkzeug doesn't support OpenSSL contexts anymore. This decision was made because it is easier to support ssl.SSLContext across Python versions. Your option to re-write this code is this one:
if __name__ == "__main__":
context = ('cert.crt', 'key.key')
app.run(host='0.0.0.0', port=80, ssl_context=context, threaded=True, debug=True)
See http://werkzeug.pocoo.org/docs/latest/serving/ for all possibilities.

Python soap using soaplib (server) and suds (client)

This question is related to:
Python SOAP server / client
In the case of soap with python, there are recommendation to use soaplib (http://wiki.github.com/jkp/soaplib) as soap server and suds (https://fedorahosted.org/suds/) as soap client.
My target is to create soap services in python that can be consumed by several clients (java, etc).
I tried the HelloWorld example from soaplib (http://trac.optio.webfactional.com/wiki/HelloWorld).
It works well when the client is also using soaplib.
Then, I tried to use suds as client consuming the HelloWorld services and it fail.
-Why this is happening? Does soaplib server has problems to consumed by different clients?
Here the code for the server:
from soaplib.wsgi_soap import SimpleWSGISoapApp
from soaplib.service import soapmethod
from soaplib.serializers.primitive import String, Integer, Arraycode
class HelloWorldService(SimpleWSGISoapApp):
#soapmethod(String,Integer,_returns=Array(String))
def say_hello(self,name,times):
results = []
for i in range(0,times):
results.append('Hello, %s'%name)
return results
if __name__=='__main__':
from cherrypy.wsgiserver import CherryPyWSGIServer
#from cherrypy._cpwsgiserver import CherryPyWSGIServer
# this example uses CherryPy2.2, use cherrypy.wsgiserver.CherryPyWSGIServer for CherryPy 3.0
server = CherryPyWSGIServer(('localhost',7789),HelloWorldService())
server.start()
This is the soaplib client:
from soaplib.client import make_service_client
from SoapServerTest_1 import HelloWorldService
client = make_service_client('http://localhost:7789/',HelloWorldService())
print client.say_hello("Dave",5)
Results:
>>> ['Hello, Dave', 'Hello, Dave', 'Hello, Dave', 'Hello, Dave', 'Hello, Dave']
This is the suds client:
from suds.client import Client
url = 'http://localhost:7789/HelloWordService?wsdl'
client1 = Client(url)
client1.service.say_hello("Dave",5)
Results:
>>> Unhandled exception while debugging...
Traceback (most recent call last):
File "C:\Python25\Lib\site-packages\RTEP\Sequencing\SoapClientTest_1.py", line 10, in <module>
client1.service.say_hello("Dave",5)
File "c:\python25\lib\site-packages\suds\client.py", line 537, in __call__
return client.invoke(args, kwargs)
File "c:\python25\lib\site-packages\suds\client.py", line 597, in invoke
result = self.send(msg)
File "c:\python25\lib\site-packages\suds\client.py", line 626, in send
result = self.succeeded(binding, reply.message)
File "c:\python25\lib\site-packages\suds\client.py", line 658, in succeeded
r, p = binding.get_reply(self.method, reply)
File "c:\python25\lib\site-packages\suds\bindings\binding.py", line 158, in get_reply
result = unmarshaller.process(nodes[0], resolved)
File "c:\python25\lib\site-packages\suds\umx\typed.py", line 66, in process
return Core.process(self, content)
File "c:\python25\lib\site-packages\suds\umx\core.py", line 48, in process
return self.append(content)
File "c:\python25\lib\site-packages\suds\umx\core.py", line 63, in append
self.append_children(content)
File "c:\python25\lib\site-packages\suds\umx\core.py", line 140, in append_children
cval = self.append(cont)
File "c:\python25\lib\site-packages\suds\umx\core.py", line 61, in append
self.start(content)
File "c:\python25\lib\site-packages\suds\umx\typed.py", line 77, in start
found = self.resolver.find(content.node)
File "c:\python25\lib\site-packages\suds\resolver.py", line 341, in find
frame = Frame(result, resolved=known, ancestry=ancestry)
File "c:\python25\lib\site-packages\suds\resolver.py", line 473, in __init__
resolved = type.resolve()
File "c:\python25\lib\site-packages\suds\xsd\sxbasic.py", line 63, in resolve
raise TypeNotFound(qref)
TypeNotFound: Type not found: '(string, HelloWorldService.HelloWorldService, )'
try to import primitives into your class:
class HelloWorldService(SimpleWSGISoapApp):
from soaplib.serializers.primitive import String, Integer, Arraycode
#soapmethod(String,Integer,_returns=Array(String))
this bug is fixed if you get the latest sources from the trunk, see https://github.com/soaplib/soaplib/pull/12 for details

Categories

Resources