How to get client connection object in tornado websocket, Python - python

I am using tornado websocket for simple test code.
In the test code, i want to get tornado.websocket.WebSocketHandler.
For example, I used this way below.
class ConvPlayerInterface(object):
class WebsocketHandler(tornado.websocket.WebSocketHandler):
client = None
queue = ipcQueue.IpcQueue()
def open(self):
print 'new connection'
self.client = self #in my simple code, it handles only one client.
self.write_message("Connection Open")
def on_message(self, message):
self.queue.put(message)
def on_close(self):
print 'connection closed'
def __init__(self, url='/ws'):
self.application = tornado.web.Application([(url, self.WebsocketHandler),])
self.httpServer = tornado.httpserver.HTTPServer(self.application)
self.httpServer.listen(8888)
self.queue = self.WebsocketHandler.queue
self.ioLoop = threading.Thread(target = tornado.ioloop.IOLoop.instance().start)
def start(self):
self.ioLoop.start()
def get(self):
return self.queue.get()
def put(self, command):
self.WebsocketHandler.client.write_message(command)
But the point when it calls self.WebsocketHandler.client.write_message(command) in put() method, Python says client is Non type.
Any advice?
And how usually it is used to get client connection handler object in tornado?

In this part of your code
def put(self, command):
self.WebsocketHandler.client.write_message(command)
you are accessing to WebsocketHandler class, not a class member.
And the "client" attribute of WebsocketHandler is None, as expected.
WebsocketHandler instance will be created for each request tornado will accept, so there can be several websocket handlers simultaneously.
If you really want to have handle only one connection - you can do something like this:
class ConvPlayerInterface(object):
the_only_handler = None
class WebsocketHandler(tornado.websocket.WebSocketHandler):
client = None
queue = ipcQueue.IpcQueue()
def open(self):
print 'new connection'
ConvPlayerInterface.the_only_handler = self
self.write_message("Connection Open")
def on_message(self, message):
self.queue.put(message)
def on_close(self):
ConvPlayerInterface.the_only_handler = None
print 'connection closed'
def __init__(self, url='/ws'):
self.application = tornado.web.Application([(url, self.WebsocketHandler),])
self.httpServer = tornado.httpserver.HTTPServer(self.application)
self.httpServer.listen(8888)
self.queue = self.WebsocketHandler.queue
self.ioLoop = threading.Thread(target = tornado.ioloop.IOLoop.instance().start)
def start(self):
self.ioLoop.start()
def get(self):
return self.queue.get()
def put(self, command):
if self.the_only_handler is not None
self.the_only_handler.write_message(command)

Related

Python3 asyncio - callback for add_done_callback do not updates self variable in server class

I have two servers, created with asyncio.start_server:
asyncio.start_server(self.handle_connection, host = host, port = port) and running in one loop:
loop.run_until_complete(asyncio.gather(server1, server2))
loop.run_forever()
I'm using asyncio.Queue to communicate between servers. Messages from Server2, added via queue.put(msg) successfully receives by queue.get() in Server1. I'm running queue.get() by asyncio.ensure_future and using as callback for
add_done_callback method from Server1:
def callback(self, future):
msg = future.result()
self.msg = msg
But this callback not working as expected - self.msg do not updates. What am I doing wrong?
UPDATED
with additional code to show max full example:
class Queue(object):
def __init__(self, loop, maxsize: int):
self.instance = asyncio.Queue(loop = loop, maxsize = maxsize)
async def put(self, data):
await self.instance.put(data)
async def get(self):
data = await self.instance.get()
self.instance.task_done()
return data
#staticmethod
def get_instance():
return Queue(loop = asyncio.get_event_loop(), maxsize = 10)
Server class:
class BaseServer(object):
def __init__(self, host, port):
self.instance = asyncio.start_server(self.handle_connection, host = host, port = port)
async def handle_connection(self, reader: StreamReader, writer: StreamWriter):
pass
def get_instance(self):
return self.instance
#staticmethod
def create():
return BaseServer(None, None)
Next I'm running the servers:
loop.run_until_complete(asyncio.gather(server1.get_instance(), server2.get_instance()))
loop.run_forever()
In the handle_connection of server2 I'm calling queue.put(msg), in the handle_connection of server1 I'm registered queue.get() as task:
task_queue = asyncio.ensure_future(queue.get())
task_queue.add_done_callback(self.process_queue)
The process_queue method of server1:
def process_queue(self, future):
msg = future.result()
self.msg = msg
The handle_connection method of server1:
async def handle_connection(self, reader: StreamReader, writer: StreamWriter):
task_queue = asyncio.ensure_future(queue.get())
task_queue.add_done_callback(self.process_queue)
while self.msg != SPECIAL_VALUE:
# doing something
Although task_queue is done, self.process_queue called, self.msg never updates.
Basically as you are using asynchronous structure, I think you can directly await the result:
async def handle_connection(self, reader: StreamReader, writer: StreamWriter):
msg = await queue.get()
process_queue(msg) # change it to accept real value instead of a future.
# do something

How to send message from the child process to websocket-client in Tornado?

I have the Tornado server. It receives messages from websocket-connections. I need to run the worker function as a separate process and the worker should answer to client. The main idea is to work in a parallel mode. Something like this
def worker(ws,message):
input = json.loads(message)
t = input["time"]
time.sleep(t)
ws.write_message("Hello, World!"*int(t))
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.render('index.html')
class WebSocket(tornado.websocket.WebSocketHandler):
def check_origin(self, origin):
return True
def open(self):
print("WebSocket opened")
self.application.webSocketsPool.append(self)
def on_message(self, message):
for key, value in enumerate(self.application.webSocketsPool):
if value == self:
p = Process(target=worker, args=(value.ws_connection,message,))
p.start()
def on_close(self):
print("WebSocket closed")
for key, value in enumerate(self.application.webSocketsPool):
if value == self:
del self.application.webSocketsPool[key]
Of course, this doesn't work because of pickling error. How to solve this problem?

Why is this zmq code not working?

This test doesn't work.
class PrintHandler(MessageHandler):
def handle_message(self, message):
print(message)
class FileHandler(MessageHandler):
def handle_message(self, message):
with open('nana', 'w') as f:
f.write(message)
class SubscribeProcess(Process):
def __init__(self, handler):
super(SubscribeProcess, self).__init__(group=None, target=None, name=None, args=(), kwargs={})
self.handler = handler
def run(self):
self.address = TcpAddress(host='127.0.0.1', port=5555)
subscriber = ZmqSubscriber(ZmqBlockingConnection(address=self.address, bind=False))
subscriber.set_message_handler(self.handler)
print('............')
class TestZmqSubscriber(TestCase):
def test_set_message_handler(self):
address = TcpAddress(host='127.0.0.1', port=5555)
pub_connection = ZmqBlockingConnection(address, bind=True)
publisher = ZmqPublisher(pub_connection)
p = SubscribeProcess(handler=PrintHandler())
p.start()
while True:
publisher.publish('Message number {}'.format(2))
I now that's this is not the unit test actually. But I want to see the received messages in console first. Then I will write proper test.
While this two scripts work perfectly.
connection = ZmqBlockingConnection(TcpAddress(host='127.0.0.1', port=5555), bind=False)
sub = ZmqSubscriber(connection)
sub.set_message_handler(PrintHandler())
address = TcpAddress(host='127.0.0.1', port=5555)
pub_connection = ZmqBlockingConnection(address, bind=True)
publisher = ZmqPublisher(pub_connection)
while True:
publisher.publish('Message number {}'.format(2))
Inside of subscriber.set_message_handler(handler) is actually this
def start_receiving_messages(self, message_handler):
while True:
message_handler.handle_message(self.socket.recv())
And in debugger I see that the code hangs infinitely in socket.recv()
Maybe I'm using multiprocessing wrong?
EDIT1
class ZmqBlockingConnection(Connection):
def start_receiving_messages(self, message_handler):
while True:
message_handler.handle_message(self.socket.recv())
def send_message(self, message):
self.socket.send(message)
def __init__(self, address, bind, hwm=1000):
self.hwm = hwm
self.bind = bind
self.address = address
self.socket = None
def set_hwm(self, hwm):
self.socket.set_hwm(hwm)
def configure(self, socket_type):
self.socket = zmq.Context().socket(socket_type)
if self.bind:
self.socket.bind(str(self.address))
else:
self.socket.connect(str(self.address))
self.set_hwm(self.hwm)
OK, the problem was in
def configure(self, socket_type):
self.socket = zmq.Context().instance().socket(socket_type)
if self.bind:
self.socket.bind(str(self.address))
else:
self.socket.connect(str(self.address))
self.set_hwm(self.hwm)
so instead of using singleton I started to create context instances and now it's working.

Receiving zmq message with error every one hour

I should say, that before I've started to use zmq reactor instead of poller everything worked fine.
class BaseZmqReceiver(BaseZmqNode):
__metaclass__ = ABCMeta
def __init__(self, host, port, hwm, bind, on_receive_callback):
super(BaseZmqReceiver, self).__init__(host=host, port=port, bind=bind, hwm=hwm)
self.node.on_message_callback = on_receive_callback
self.stream = ZMQStream(self.socket)
self.stream.on_recv(self.on_message_received)
ZmqLoopRunner().start()
def on_message_received(self, message):
return self.node.on_message_callback(message)
def create_node(self):
return ReceivingNode(None, None)
class ZmqLoopRunner(Thread):
def __init__(self):
super(ZmqLoopRunner, self).__init__()
self.loop = IOLoop.instance()
self.daemon = True
def run(self):
self.loop.start()
def stop(self):
self.loop.stop()
class ZmqSubscriber(BaseZmqReceiver):
def __init__(self, host, port, on_receive_callback, bind=False, hwm=1000):
super(ZmqSubscriber, self).__init__(host=host, port=port, hwm=hwm, bind=bind,
on_receive_callback=on_receive_callback)
def create_socket(self):
socket = self.context.socket(zmq.SUB)
socket.setsockopt(zmq.SUBSCRIBE, "")
return socket
Here is my zmq code.
And I'm basically just receiving multipart message in callback.
def on_message(message):
part1, part2 = message
And every one hour I've got message that consist of only one part. So I got
TypeError: need more than one value to unpack.
EDIT here is my full zmq code.
https://drive.google.com/file/d/0B7jQezPDaLZFQWxBMUdXQkxnS1k/edit?usp=sharing

Python socket hangs program

I am working on a simple HTTP server in Python. I am taking bits and pieces from here: http://hg.python.org/cpython/file/3.3/Lib/socketserver.py to see how Python's standard library handles it.
My problem is that as soon as I try to accept requests my program hangs. Here is my code its only 100 lines so I'll just post it directly here.
I have a process() function which is in a loop that loops forever and it's suppose to handle new connections. Inside I have a print statement that only gets printed once.
print('processing') in TCPServer.process()
I have tried threading off process() but I get the same result.
"""."""
import socket
import select
from abc import abstractmethod, ABCMeta
class BaseServer(metaclass=ABCMeta):
def __init__(self, server_address, server_port, RequestHandlerClass):
self._server_address = server_address
self._server_port = server_port
self._RequestHandlerClass = RequestHandlerClass
self._running = False
def serve_forever(self):
self._running = True
while self._running:
self.process()
#abstractmethod
def process(self):
pass
def shutdown(self):
self._running = False
class TCPServer(BaseServer):
def __init__(self,
server_address,
server_port,
RequestHandlerClass,
address_family=socket.AF_INET,
socket_type=socket.SOCK_STREAM,
request_queue_size=1,
bind=True):
super(TCPServer, self).__init__(server_address,
server_port,
RequestHandlerClass)
self._address_family = address_family
self._socket_type = socket_type
self._request_queue_size = request_queue_size
self._socket = socket.socket(self._address_family, self._socket_type)
self._read_list = [self._socket]
if bind:
self.bind()
def bind(self):
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self._socket.bind((self._server_address, self._server_port))
self._socket.listen(self._request_queue_size)
def shutdown(self):
super().shutdown()
self._socket.close()
def process(self):
print('processing')
readable, writeable, errored = select.select(self._read_list, [], [])
for socket in readable:
if socket is self._socket:
client_socket, client_address = self._socket.accept()
self._read_list.append(client_socket)
print('connection from: ', client_address)
else:
self._RequestHandlerClass(client_socket)
self._read_list.remove(client_socket)
class BaseRequestHandler(metaclass=ABCMeta):
def __init__(self, client_socket):
self._client_socket = client_socket
self.setup()
try:
self.handle()
finally:
self.finish()
#abstractmethod
def setup(self):
pass
#abstractmethod
def handle(self):
pass
#abstractmethod
def finish(self):
pass
class HTTPRequestHandler(BaseRequestHandler):
def setup(self):
print('REQUEST SETUP')
print(self._client_socket.recv(2048))
def handle(self):
print('REQUEST HANDLE')
def finish(self):
print('REQUEST FINISH')
self._client_socket.close()
if __name__ == '__main__':
tcp_server = TCPServer(server_address='',
server_port=9000,
RequestHandlerClass=HTTPRequestHandler)
tcp_server.serve_forever()
I ran your code but couldn't make it hang. However, there is a fatal error in your process() function where you refer to client_socket in the else: branch, but client_socket is not defined at that point. You probably meant to refer to socket.
I was able to make two connections to the server on port 9000, and get "connection from:" lines for each. As soon as one of those connections sent something, your server would crash for the above reason.

Categories

Resources