Reconnecting to ZMQ feed after disconnect - python

I have this simple python script which connects to a ZMQ feed and spits out some data:
#!/usr/bin/env python2
import zlib
import zmq
import simplejson
def main():
context = zmq.Context()
subscriber = context.socket(zmq.SUB)
# Connect to the first publicly available relay.
subscriber.connect('tcp://relay-us-east-1.eve-emdr.com:8050')
# Disable filtering.
subscriber.setsockopt(zmq.SUBSCRIBE, "")
while True:
# Receive raw market JSON strings.
market_json = zlib.decompress(subscriber.recv())
# Un-serialize the JSON data to a Python dict.
market_data = simplejson.loads(market_json)
# Dump typeID
results = rowsets = market_data.get('rowsets')[0];
print results['typeID']
if __name__ == '__main__':
main()
This is running on my home server. Sometimes, my home server loses connectivity to the internet, the curse of being a residential connection. When the network does drop out and come back on, however, the script stalls. Is there any way to reinitialize connection? I'm still new to python, a point in the right direction would be wonderful. =)

Not sure this is still relevant, but here goes:
Use a timeout (examples here, here and here). On ZMQ < 3.0 it would look something like this (not tested):
#!/usr/bin/env python2
import zlib
import zmq
import simplejson
def main():
context = zmq.Context()
while True:
subscriber = context.socket(zmq.SUB)
# Connect to the first publicly available relay.
subscriber.connect('tcp://relay-us-east-1.eve-emdr.com:8050')
# Disable filtering.
subscriber.setsockopt(zmq.SUBSCRIBE, "")
this_call_blocks_until_timeout = recv_or_timeout(subscriber, 60000)
print 'Timeout'
subscriber.close()
def recv_or_timeout(subscriber, timeout_ms)
poller = zmq.Poller()
poller.register(subscriber, zmq.POLLIN)
while True:
socket = dict(self._poller.poll(stimeout_ms))
if socket.get(subscriber) == zmq.POLLIN:
# Receive raw market JSON strings.
market_json = zlib.decompress(subscriber.recv())
# Un-serialize the JSON data to a Python dict.
market_data = simplejson.loads(market_json)
# Dump typeID
results = rowsets = market_data.get('rowsets')[0];
print results['typeID']
else:
# Timeout!
return
if __name__ == '__main__':
main()
ZMQ > 3.0 allows you to set the socket's RCVTIMEO option, which will cause its recv() to raise a timeout error, without the need of a Poller object.

Related

Why is ZeroMQ multipart sending/reciving wrong messages?

In python I'm creating an application also using ZeroMQ. I'm using the PUSH/PULL method to send the loading status of one script to another. The message received on the PULL script runs inside of a Thread. The PULL script looks like this:
import time
from threading import Thread
import threading
import os
import zmq
import sys
context = zmq.Context()
zmqsocket = context.socket(zmq.PULL)
zmqsocket.bind("tcp://*:5555")
class TaskstatusUpdater(Thread):
def __init__(self):
Thread.__init__(self)
def run(self):
while True:
# Wait for next request from client
task_id = int(zmqsocket.recv_multipart()[0])
taskcolorstat = int(zmqsocket.recv_multipart()[1])
taskstatus = zmqsocket.recv_multipart()[2]
time.sleep(0.1)
print(task_id, taskstatus, taskcolorstat)
thread = TaskstatusUpdater()
thread.start()
The PUSH part sends constantly updates about the status of the other script. It looks something like this:
import time
import sys
import zmq
# zmq - client startup and connecting
try:
context = zmq.Context()
print("Connecting to server…")
zmqsocket = context.socket(zmq.PUSH)
zmqsocket.connect("tcp://localhost:5555")
print("succesful")
except:
print('error could not connect to service')
# zmq - client startup and connecting
for i in range(10):
zmqsocket.send_multipart([b_task_id, b"0", b"first message"])
time.sleep(3)# doing stuff
zmqsocket.send_multipart([b_task_id, b"1", b"second message"])
b_task_id is generated earlier in the program and is a simple binary value created out of an integer. There are multiple of those PUSH scripts running at the same time and thru the b_task_id I can define which script is responding to the PULL.
It is now often the case that those multipart messages get mixed up between each other. Can somebody explain to me why that is and how I can fix this problem?
For example, sometimes the output is:
2 b'second message' 0
The output that I was expecting is:
2 b'second message' 1

Run incoming requests in threads

I wrote a python script that connects to an TCP socket (plaintext) and wait for requests. The TCP connection is persistent, alls requests and response are handle over the same one connection. See https://openvpn.net/community-resources/management-interface/ for technical docs.
My current script works fine, but it's single threaded. I'm using the python select api for wait until new data on the socket is available, then do something and write a response back. While do some stuff (it could be time.sleep(30) as example) the whole application is blocked a new request wont be answer.
Since the requests have identifier, the responses must not send in the same order as the requests send.
Minimalistic code example of the current solution:
import select
import socket
import time
def _socket_recv(_sock) -> str:
"""Receive bytes from socket and convert to string.
"""
buffer_size = 4096 # 4 KiB
data = b""
while True:
part = _sock.recv(buffer_size)
data += part
if len(part) < buffer_size:
# either 0 or end of data
break
return data.decode("utf-8")
def do_work(data: str) -> None:
print(data)
# Do something
time.sleep(10)
ret = "status 1\n"
s.send(bytes(ret, "utf-8"))
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(("{}".format('127.0.0.1'), 8081))
# Password
s.send(bytes("pass\n", "utf-8"))
while True:
# wait until data is available
_ = select.select([s], [], [])[0]
# CLIENT notifications may be multi-line, and
# the sequentiality of a given CLIENT notification, its associated environmental
# variables, and the terminating ">CLIENT:ENV,END" line are guaranteed to be
# atomic.
notifications = _socket_recv(s)
if notifications.startswith(">CLIENT:"):
do_work(notifications)
Maybe a ThreadPool/WorkerPool should be a good approach, but how to manage concurrents writes to the TCP socket? Should the thread write to the socket? There are known frameworks?
You may introduce a buffer between socket and process engine, as below.
import select
import socket
import time
def _socket_recv(_sock) -> str:
"""Receive bytes from socket and convert to string.
"""
buffer_size = 4096 # 4 KiB
data = b""
while True:
part = _sock.recv(buffer_size)
data += part
if len(part) < buffer_size:
# either 0 or end of data
break
return data.decode("utf-8")
buffer=[]
def do_work() -> None:
while True:
if len(buffer)>0:
data=buffer.pop(0)
print(data)
# Do something
time.sleep(10)
ret = "status 1\n"
s.send(bytes(ret, "utf-8"))
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(("{}".format('127.0.0.1'), 8081))
threading.Thread(target=do_work).start()
# Password
s.send(bytes("pass\n", "utf-8"))
while True:
# wait until data is available
_ = select.select([s], [], [])[0]
# CLIENT notifications may be multi-line, and
# the sequentiality of a given CLIENT notification, its associated environmental
# variables, and the terminating ">CLIENT:ENV,END" line are guaranteed to be
# atomic.
notifications = _socket_recv(s)
if notifications.startswith(">CLIENT:"):
buffer.append(notifications)

Python ZMQ examples over WLAN network

For a project I need to communicate between C++ and Python via ZMQ over the WLAN network.
If I use my C++ implementation, everything works fine. I just type in the IP+Port number at the client.bind("tcp:// ...) and I can send messages via WLAN.
If I try the same with the Python Code, it does not work.
So I just tested the python examples (so no C++ anymore): http://zguide.zeromq.org/py:durapub
http://zguide.zeromq.org/py:durasub
I replaced the >localhost< in the client with the IP of my host computer. I do not receive any messages. I am using exactly the code from the example, except the replacement.
Here is the Code:
PUBLISHER:
import zmq
import time
context = zmq.Context()
# Subscriber tells us when it's ready here
sync = context.socket(zmq.PULL)
sync.bind("tcp://*:5564")
# We send updates via this socket
publisher = context.socket(zmq.PUB)
publisher.bind("tcp://*:5565")
# Wait for synchronization request
sync_request = sync.recv()
# Now broadcast exactly 10 updates with pause
for n in xrange(10):
msg = "Update %d" % n
publisher.send(msg)
time.sleep(1)
publisher.send("END")
time.sleep(1) # Give 0MQ/2.0.x time to flush output
SUBSCRIBER
import zmq
import time
context = zmq.Context()
# Connect our subscriber socket
subscriber = context.socket(zmq.SUB)
subscriber.setsockopt(zmq.IDENTITY, "Hello")
subscriber.setsockopt(zmq.SUBSCRIBE, "")
subscriber.connect("tcp://192.168.2.119:5565")
# Syncronize with the publisher
sync = context.socket(zmq.PUSH)
sync.connect("tcp://192.168.2.119:5564")
sync.send("")
# Get updates, expect random Ctrl-C death
while True:
data = subscriber.recv()
print data
if data == "END":
break
Its exactly the example code, except that I changed localhost to the IP Adress of my publisher in the Subscriber-Code. Btw, I did the same in the C++ example Code and it works.

Pass parameters from one notebook to another

I have two notebooks in python.
The first one checks if a file exits in the data lake. I want to return a Boolean from here and the filePath if it exits.
The next notebook will then uses these Params as in input. How is this possible?
Also could I use a condition IF in the pipeline to check the returned Boolean?
Kind of new to Azure ADF
One method to pass messages between separate python scripts or jupyter notebooks is to use the pyzmq library. Run pairserver in one notebook and pairclient in another. You will see messages being passed from one to the other. This does add an extra dependency to your code, but pyzmq is a mature package.
pairserver.ipynb
#!/usr/bin/python3
import zmq
import random
import time
port = '5556'
context = zmq.Context()
socket = context.socket(zmq.PAIR)
socket.bind('tcp://*:%s' % port)
while True:
socket.send(b'Server message to client')
msg = socket.recv()
print(msg)
time.sleep(1)
pairclient.ipynb
#!/usr/bin/python3
import zmq
import random
import sys
import time
port = '5556'
context = zmq.Context()
socket = context.socket(zmq.PAIR)
socket.connect("tcp://localhost:%s" % port)
while True:
msg = socket.recv()
print(msg)
socket.send_string("client message to server")
time.sleep(1)

How to do fan-out in ZeroMQ? Forwarding from a set of topics to multiple clients

I need to implement a server that receives a request specifying a set of topics via control channel (req-rep), and then as response sends a URL pointing to a publisher socket that will be opened for this specific client or a rejection message (because of insufficient privileges).
I managed to implement a version that can handle only one client at the time (with two endless loops), but I do not know which pattern to use to handle multiple clients concurrently.
It's important for me that the sockets for different clients stay separate.
Here's simplified code:
import zmq
context = zmq.Context()
upstream_port = 10000
upstream_host = 'localhost'
control_channel_port = 11000
upstream_addr = 'tcp://{}:{}'.format(upstream_host, upstream_port)
def should_grant(request):
'''
Permission checking - irrelevant to the question
'''
return True
def bind_downstream():
downstream = context.socket(zmq.PUB)
addr = 'tcp://*'
port = downstream.bind_to_random_port(addr)
return downstream, port
def bind_control_channel():
control_channel_sock = context.socket(zmq.REP)
control_channel_sock.bind('tcp://*:{}'.format(control_channel_port))
return control_channel_sock
def connect_upstream(topics):
raw_data = context.socket(zmq.SUB)
for t in topics:
raw_data.setsockopt_unicode(zmq.SUBSCRIBE, unicode(t))
raw_data.connect(upstream_addr)
return raw_data
if __name__ == '__main__':
print("Binding control channel socket on {}".format('tcp://*:{}'.format(control_channel_port)))
control_channel = bind_control_channel()
while True:
request = control_channel.recv_json()
print("Received request {}".format(request))
if should_grant(request):
(downstream_sock, downstream_port) = bind_downstream()
print("Downstream socket open on {}".format('tcp://*:{}'.format(downstream_port)))
print("Connecting to upstream on {}".format(upstream_addr))
upstream_sock = connect_upstream(request['topics'])
control_channel.send_json({'status': 'ok', 'port': downstream_port})
while True:
parts = upstream_sock.recv_multipart() # Simple forwarding
downstream_sock.send_multipart(parts)
else:
control_channel.send_json({'status': 'rejected'})
The correct way to do this would be to use threads.
Your main program or thread would handle the control channel loop. As soon as a connection appears, you would create the upstream and downstream sockets but handle the actual transfer in a thread. I am not sure if the code below works as I do not have a client that would work with it, but give it a go and see what happens. You will get the idea nevertheless.
from threading import Thread
....
....
class ClientManager(Thread):
def __init__(self, ups, downs):
super(ClientManager, self).__init__(self)
self.upstream_socket = ups
self.downstream_socket = downs
def run(self):
while True:
_parts = self.upstream_socket.recv_multipart()
self.downstream_socket.send_multipart(_parts)
if __name__ == '__main__':
print("Binding control channel socket on {}".format('tcp://*:{}'.format(control_channel_port)))
control_channel = bind_control_channel()
while True:
request = control_channel.recv_json()
print("Received request {}".format(request))
if should_grant(request):
(downstream_sock, downstream_port) = bind_downstream()
print("Downstream socket open on {}".format('tcp://*:{}'.format(downstream_port)))
print("Connecting to upstream on {}".format(upstream_addr))
upstream_sock = connect_upstream(request['topics'])
control_channel.send_json({'status': 'ok', 'port': downstream_port})
_nct = ClientManager(upstream_sock, downstream_sock)
_nct.daemon = True
_nct.start()
else:
control_channel.send_json({'status': 'rejected'})

Categories

Resources