I'm working on a project where a data capture system sends EMG data to a python script through an Arduino Uno's serial port and the python script classifies the user's intent by processing a chunk of that data. The data capture system sends samples at 250Hz and I require my python script to not miss samples the capture system is sending while classifying.
For this I want two parallelly running processes:
A process continuously capturing the data and buffering it. (using PySerial) - running every 0.004s
A process taking data from the buffer, preprocess it and classifying it. - this process approximately takes 0.03 seconds to run per loop excluding data acquisition
I don't know how to proceed with this. I've tried and read through a lot of articles but to no avail.
I tried threading, but apparently threading is not true parallelism, so I gave up trying to make it work.
I tried using multiprocessing but the multiprocessing module doesn't like pickling pyserial.
Here's what I tried: (It's probably full of mistakes, I hacked it up but I expected some output, also this is not indefinitely buffering, but I'm trying to get at least the first buffer printed)
Driver Code:
import GatherData as gd
import serial.tools.list_ports
from multiprocessing import Pool
if __name__ == '__main__':
arr = []
serialInst = serial.Serial()
ports = serial.tools.list_ports.comports()
portList = []
for Port in ports:
portList.append(str(Port))
print(str(Port))
val = input("Select Port: COM")
for x in range(0,len(portList)):
if(portList[x].startswith("COM"+str(val))):
portVar = "COM"+str(val)
print(portList[x])
serialInst.baudrate = 115200
serialInst.port = portVar
serialInst.open()
p = Pool(6) # Number of concurrent processes
arr = p.starmap(gd.collect, (arr, serialInst)) # Start all processes
p.map(gd.printnum(arr))
p.close()
p.join()
GatherData.py:
import serial.tools.list_ports
import numpy as np
import pandas as pd
# import Dependencies as dp
def collect(array,serialInst):
ch = 4
if not array:
return makearr()
else:
nextw = array[1:]
temparr = []
if (serialInst.in_waiting):
packet = serialInst.readline()
# now = time.time()
line_as_list = packet.split(b'\t')
#print(line_as_list)
for i in range(1,ch+1):
try:
rand1=line_as_list[i]
#print("rand1", rand1)
rand1List = rand1.split(b'\n')
rand1f = float(rand1List[0])
temparr.append(rand1f)
#print(temparr)
except IndexError:
return collect(array) #add pca
except ValueError:
return collect(array) #add pca
nextw.append(temparr)
print(nextw)
return nextw
def makearr(serialInst):
final = []
counter = 0
while(counter<50):
temparr = []
if (serialInst.in_waiting):
packet = serialInst.readline()
line_as_list = packet.split(b'\t')
flag = 0
for i in range(1,5):
try:
rand1=line_as_list[i]
#print("rand1", rand1)
rand1List = rand1.split(b'\n')
rand1f = float(rand1List[0])
temparr.append(rand1f)
except IndexError:
flag = 1
continue
except ValueError:
flag = 1
continue
if(flag == 0):
final.append(temparr)
counter=counter+1
return final
def printnum(array):
while True:
if array:
print(len(array), end='\r')
The errors:
COM8 - Arduino Uno (COM8)
Select Port: COM8
COM8 - Arduino Uno (COM8)
Traceback (most recent call last):
File "D:\anaconda\envs\tf\lib\site-packages\spyder_kernels\py3compat.py", line 356, in compat_exec
exec(code, globals, locals)
File "c:\users\indra\documents\igibup.py", line 34, in <module>
arr = p.starmap(gd.collect, (arr, serialInst)) # Start all processes
File "D:\anaconda\envs\tf\lib\multiprocessing\pool.py", line 276, in starmap
return self._map_async(func, iterable, starmapstar, chunksize).get()
File "D:\anaconda\envs\tf\lib\multiprocessing\pool.py", line 657, in get
raise self._value
File "D:\anaconda\envs\tf\lib\multiprocessing\pool.py", line 431, in _handle_tasks
put(task)
File "D:\anaconda\envs\tf\lib\multiprocessing\connection.py", line 206, in send
self._send_bytes(_ForkingPickler.dumps(obj))
File "D:\anaconda\envs\tf\lib\multiprocessing\reduction.py", line 51, in dumps
cls(buf, protocol).dump(obj)
ValueError: ctypes objects containing pointers cannot be pickled
I don't understand how to proceed.
Related
I have a dummy example, I want to apply multiprocessing in it. Consider a scenario where you have a stream of numbers(which I call frame) incoming one by one. And I want to assign it to any single process that is available currently. So I am creating 4 processes that are running a while loop, seeing if any element in queue, than apply function on it.
The problem is that when I join it, it gets stuck in any while loop, even though I close the while loop before it. But somehow it gets stuck inside it.
Code:
# step 1, 4 processes
import multiprocessing as mp
import os
import time
class MpListOperations:
def __init__(self):
self.results_queue = mp.Manager().Queue()
self.frames_queue = mp.Manager().Queue()
self.flag = mp.Manager().Value(typecode='b',value=True)
self.list_nums = list(range(0,5000))
def process_list(self):
print(f"Process id {os.getpid()} started")
while self.flag.value:
# print(self.flag.value)
if self.frames_queue.qsize():
self.results_queue.put(self.frames_queue.get()**2)
def create_processes(self, no_of_processes = mp.cpu_count()):
print("Creating Processes")
self.processes = [mp.Process(target=self.process_list) for _ in range(no_of_processes)]
def start_processes(self):
print(f"starting processes")
for process in self.processes:
process.start()
def join_process(self):
print("Joining Processes")
while True:
if not self.frames_queue.qsize():
self.flag.value=False
print("JOININNG HERE")
for process in self.processes:
exit_code = process.join()
print(exit_code)
print("BREAKING DONE")
break
def stream_frames(self):
print("Streaming Frames")
for frame in self.list_nums:
self.frames_queue.put(frame)
if __name__=="__main__":
start = time.time()
mp_ops = MpListOperations()
mp_ops.create_processes()
mp_ops.start_processes()
mp_ops.stream_frames()
mp_ops.join_process()
print(time.time()-start)
Now if I add a timeout parameter in join, even 0, i.e exit_code = process.join(0) it works. I want to understand in this scenario, if this code is correct, what should be the value of timeout? Why is it working with timeout and not without it? What is the proper way to implement multiprocessing with it?
If you look at the documentation for a managed queue you will see that the qsize method only returns an approximate size. I would therefore not use it for testing when all the items have been taken of the frames queue. Presumably you want to let the processes run until all frames have been processed. The simplest way I know would be to put N sentinel items on the frames queue after the actual frames have been put where N is the number of processes getting from the queue. A sentinel item is a special value that cannot be mistaken for an actual frame and signals to the process that there are no more items for it to get from the queue (i.e. a quasi end-of-file item). In this case we can use None as the sentinel items. Each process then just continues to do get operations on the queue until it sees a sentinel item and then terminates. There is therefore no need for the self.flag attribute.
Here is the updated and simplified code. I have made some other minor changes that have been commented:
import multiprocessing as mp
import os
import time
class MpListOperations:
def __init__(self):
# Only create one manager process:
manager = mp.Manager()
self.results_queue = manager.Queue()
self.frames_queue = manager.Queue()
# No need to convert range to a list:
self.list_nums = range(0, 5000)
def process_list(self):
print(f"Process id {os.getpid()} started")
while True:
frame = self.frames_queue.get()
if frame is None: # Sentinel?
# Yes, we are done:
break
self.results_queue.put(frame ** 2)
def create_processes(self, no_of_processes = mp.cpu_count()):
print("Creating Processes")
self.no_of_processes = no_of_processes
self.processes = [mp.Process(target=self.process_list) for _ in range(no_of_processes)]
def start_processes(self):
print("Starting Processes")
for process in self.processes:
process.start()
def join_processes(self):
print("Joining Processes")
for process in self.processes:
# join returns None:
process.join()
def stream_frames(self):
print("Streaming Frames")
for frame in self.list_nums:
self.frames_queue.put(frame)
# Put sentinels:
for _ in range(self.no_of_processes):
self.frames_queue.put(None)
if __name__== "__main__":
start = time.time()
mp_ops = MpListOperations()
mp_ops.create_processes()
mp_ops.start_processes()
mp_ops.stream_frames()
mp_ops.join_processes()
print(time.time()-start)
Prints:
Creating Processes
Starting Processes
Process id 28 started
Process id 29 started
Streaming Frames
Process id 33 started
Process id 31 started
Process id 38 started
Process id 44 started
Process id 42 started
Process id 45 started
Joining Processes
2.3660173416137695
Note for Windows
I have modified method start_processes to temporarily set attribute self.processes to None:
def start_processes(self):
print("Starting Processes")
processes = self.processes
# Don't try to pickle list of processes:
self.processes = None
for process in processes:
process.start()
# Restore attribute:
self.processes = processes
Otherwise under Windows we get a pickle error trying to serialize/deserialize a list of processes containing two or more multiprocessing.Process instances. The error is "TypeError: cannot pickle 'weakref' object." This can be demonstrated with the following code where we first try to pickle a list of 1 process and then a list of 2 processes:
import multiprocessing as mp
import os
class Foo:
def __init__(self, number_of_processes):
self.processes = [mp.Process(target=self.worker) for _ in range(number_of_processes)]
self.start_processes()
self.join_processes()
def start_processes(self):
processes = self.processes
for process in self.processes:
process.start()
def join_processes(self):
for process in self.processes:
process.join()
def worker(self):
print(f"Process id {os.getpid()} started")
print(f"Process id {os.getpid()} ended")
if __name__== "__main__":
foo = Foo(1)
foo = Foo(2)
Prints:
Process id 7540 started
Process id 7540 ended
Traceback (most recent call last):
File "C:\Booboo\test\test.py", line 26, in <module>
foo = Foo(2)
File "C:\Booboo\test\test.py", line 7, in __init__
self.start_processes()
File "C:\Booboo\test\test.py", line 13, in start_processes
process.start()
File "C:\Program Files\Python38\lib\multiprocessing\process.py", line 121, in start
self._popen = self._Popen(self)
File "C:\Program Files\Python38\lib\multiprocessing\context.py", line 224, in _Popen
return _default_context.get_context().Process._Popen(process_obj)
File "C:\Program Files\Python38\lib\multiprocessing\context.py", line 327, in _Popen
return Popen(process_obj)
File "C:\Program Files\Python38\lib\multiprocessing\popen_spawn_win32.py", line 93, in __init__
reduction.dump(process_obj, to_child)
File "C:\Program Files\Python38\lib\multiprocessing\reduction.py", line 60, in dump
ForkingPickler(file, protocol).dump(obj)
TypeError: cannot pickle 'weakref' object
Process id 18152 started
Process id 18152 ended
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "C:\Program Files\Python38\lib\multiprocessing\spawn.py", line 116, in spawn_main
exitcode = _main(fd, parent_sentinel)
File "C:\Program Files\Python38\lib\multiprocessing\spawn.py", line 126, in _main
self = reduction.pickle.load(from_parent)
EOFError: Ran out of input
The target loop is stuck in the get() method of your loop. This is because multiple processes could see that the queue wasn't empty, but only 1 of them was able to get the last item. The remaining processes are waiting for the next item to be available from the queue.
You might need to add a Lock when you are reading the size of the Queue object And getting the object of that queue.
Or alternatively, you avoid reading the size of the queue by simply using the queue.get() method with a timeout that allows us to check the flag regularly
import queue
TIMEOUT = 1 # seconds
class MpListOperations:
#[...]
def process_list(self):
print(f"Process id {os.getpid()} started")
previous = self.flag.value
while self.flag.value:
try:
got = self.frames_queue.get(timeout=TIMEOUT)
except queue.Empty:
pass
else:
print(f"Gotten {got}")
self.results_queue.put(got**2)
_next = self.flag.value
if previous != _next:
print(f"Flag change: {_next}")
$ python ./test_mp.py
Creating Processes
starting processes
Process id 36566 started
Streaming Frames
Process id 36565 started
Process id 36564 started
Process id 36570 started
Process id 36567 started
Gotten 0
Process id 36572 started
Gotten 1
Gotten 2
Gotten 3
Process id 36579 started
Gotten 4
Gotten 5
Gotten 6
Process id 36583 started
Gotten 7
# [...]
Gotten 4997
Joining Processes
Gotten 4998
Gotten 4999
JOININNG HERE
Flag change: False
Flag change: False
Flag change: False
Flag change: False
Flag change: False
Flag change: False
Flag change: False
Flag change: False
Exit code : None
Exit code : None
Exit code : None
Exit code : None
Exit code : None
Exit code : None
Exit code : None
Exit code : None
BREAKING DONE
1.4375360012054443
Alternatively, using a multiprocessing.Pool object:
def my_func(arg):
time.sleep(0.002)
return arg**2
def get_input():
for i in range(5000):
yield i
time.sleep(0.001)
if __name__=="__main__":
start = time.time()
mp_pool = mp.Pool()
result = mp_pool.map(my_func, get_input())
mp_pool.close()
mp_pool.join()
print(len(result))
print(f"Duration: {time.time()-start}")
Giving:
$ python ./test_mp.py
5000
Duration: 6.847279787063599
I have a script that is supposed to connect to some Linux workstations, run a command and generate a CSV file to email. I'm having issues with concurrent.futures. This is a snippet of my code:
#!/usr/bin/python3
import csv
import os
from concurrent.futures import ThreadPoolExecutor
from datetime import datetime
from getpass import getuser as username
from glob import glob
from netmiko import ConnectHandler as NetConnect
from paramiko import ssh_exception as PExcept
from time import sleep, perf_counter
start = datetime.now()
yr = start.year
month = start.month
day = start.day
hour = start.hour
minute = start.minute
mo = f"{month:02d}"
d = f"{day:02d}"
h = f"{hour:02d}"
mi = f"{minute:02d}"
tsent = f"{h}:{mi}"
def connect_node(device, writer, lock):
# SSH Exceptions
paramiko_exceptions = (
PExcept.AuthenticationException,
PExcept.BadAuthenticationType,
PExcept.BadHostKeyException,
PExcept.ChannelException,
PExcept.ConfigParseError,
PExcept.CouldNotCanonicalize,
PExcept.NoValidConnectionsError,
PExcept.PartialAuthentication,
PExcept.PasswordRequiredException,
PExcept.ProxyCommandFailure,
PExcept.SSHException,
)
try:
conn = NetConnect(**device) # Testing ssh connection to device
except paramiko_exceptions as e:
print(
f"\nOops! Can't connect to {str.rstrip(device['host'])}\n{e}\n"
+ "*"*120
)
csvdata = (str.strip(device["host"]), "UNREACHABLE")
writer.writerow(csvdata)
return
# Check the mac address format
status_check = conn.send_command("checkStation.sh")
sleep(1)
status_list = status_check.splitlines()
with lock:
if "PASSED" in status_list[-1]:
csvdata = (str.strip(device["host"]), "PASSED")
writer.writerow(csvdata)
else:
csvdata = (str.strip(device["host"]), "FAILED")
writer.writerow(csvdata)
conn.disconnect() # Closing connection to device
return
# Splitting the thread_list into smaller chunks for safe multithreading
def split(a, n):
k, m = divmod(len(a), n)
return (a[i*k+min(i, m):(i+1)*k+min(i+1, m)] for i in range(n))
fileList = glob("*.csv", recursive=True)
for filePath in fileList:
try:
os.remove(filePath)
except OSError:
print("Error while deleting file")
pass
# _main_ #
password = "pa55w0rd" # Put your password Eg. "pa55w0rd"
# Creates a report file
report = open(f"AuditReport-{yr}{mo}{d}.csv", "a+")
writer = csv.writer(report)
csvHeader = ("Station-ID", "CheckStation-Status")
writer.writerow(csvHeader) # Write header csv report file.
device_list = open("DeviceList.txt") # Opening the file with device names
station_list = [] # Create list to store threads in
t1 = perf_counter()
print("Please wait while we try to log into your devices...")
# Create connection dictionary
for device_name in device_list:
device = {
"device_type": "linux",
"host": device_name,
"username": username,
"password": password,
"verbose": False,
"global_delay_factor": 3.0,
"conn_timeout": 6.0,
"session_timeout": 6.0,
"auth_timeout": 6.0,
"banner_timeout": 6.0,
"allow_auto_change": False,
"fast_cli": False,
}
station_list.append(device)
with ThreadPoolExecutor(max_workers=12) as executor:
futures = executor.map(connect_node, station_list)
for future in futures:
exc = future.exception()
if exc:
print('Task failed!!\n' + str(exc))
else:
result = future.result()
writer.writerow(result)
t2 = perf_counter()
print(f"Completed!!\nIt took {t2 - t1:0.2f} seconds.")
The traceback I'm receiving is the following when I try to run the code:
"C:\Program Files\Python39\python.exe" "C:/Users/nunya/Documents/Visual Studio 2019/Scripts/Python/StationChecks1.py"
Please wait while we try to log into your devices...
Traceback (most recent call last):
File "C:\Users\nunya\Documents\Visual Studio 2019\Scripts\Python\StationChecks1.py", line 133, in <module>
for future in futures:
File "C:\Program Files\Python39\lib\concurrent\futures\_base.py", line 600, in result_iterator
yield fs.pop().result()
File "C:\Program Files\Python39\lib\concurrent\futures\_base.py", line 433, in result
return self.__get_result()
File "C:\Program Files\Python39\lib\concurrent\futures\_base.py", line 389, in __get_result
raise self._exception
File "C:\Program Files\Python39\lib\concurrent\futures\thread.py", line 52, in run
result = self.fn(*self.args, **self.kwargs)
TypeError: connect_node() missing 2 required positional arguments: 'writer' and 'lock'
Process finished with exit code 1
Any help would be greatly appreciated.
I would like to control an actuator with a python script in MODBUS RTU
master. I tried to use the library minimalmodbus to communicate (write
bit, write & read registers) with my slave.
When I start my code, I have some errors. So, someone can I help me to find a solution?
My code:
import minimalmodbus
import os
import struct
import sys
import serial
import time
instrument = minimalmodbus.Instrument('/dev/ttyRS485', 1)
instrument.serial.port
instrument.serial.baudrate = 9600
instrument.serial.parity = serial.PARITY_NONE
instrument.serial.bytesize = 8
instrument.serial.stopbits = 1
instrument.mode = minimalmodbus.MODE_RTU
instrument.serial.timeout = 0.05
modbus = instrument.write_bit(0x0427, 1)
print (modbus)
alarme = instrument.write_bit(0x0404, 1)
print (alarme)
alarme = instrument.write_bit(0x0404, 0)
print (alarme)
on = instrument.write_bit(0x0403, 1)
print (on)
home = instrument.write_bit(0x040B, 1)
print (home)
position = instrument.write_register(0x9900, 0, number_of_decimals=2,functioncode=16, signed=False)
print (position)
posi = instrument.write_register(0x9901, 6000, number_of_decimals=2,functioncode=16, signed=False)
print (posi)
Errors:
========================= RESTART: /home/pi/test.py =========================
None
None
None
None
None
None
Traceback (most recent call last):
File "/home/pi/.local/lib/python3.5/site-packages/minimalmodbus.py", line 2448, in _pack
result = struct.pack(formatstring, value)
struct.error: 'H' format requires 0 <= number <= 65535
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/pi/test.py", line 36, in <module>
posi = instrument.write_register(0x9901, 6000, number_of_decimals=2, functioncode=16, signed=False)
File "/home/pi/.local/lib/python3.5/site-packages/minimalmodbus.py",line 518, in write_register
payloadformat=_PAYLOADFORMAT_REGISTER,
File "/home/pi/.local/lib/python3.5/site-packages/minimalmodbus.py",line 1166, in _generic_command
payloadformat,
File "/home/pi/.local/lib/python3.5/site-packages/minimalmodbus.py",line 1514, in _create_payload
value, number_of_decimals, signed=signed
File "/home/pi/.local/lib/python3.5/site-packages/minimalmodbus.py", line 1991, in
_num_to_twobyte_string outstring = _pack(formatcode, integer)
File "/home/pi/.local/lib/python3.5/site-packages/minimalmodbus.py", line 2454, in _pack
raise ValueError(errortext.format(value, formatstring))
ValueError: The value to send is probably out of range, as the num-to-bytestring conversion failed.
Value: 600000 Struct format code is: >H
In response to your request in the comments for an alternative library, here is what I use to read modbus with the pymodbus library:
import pymodbus
from pymodbus.pdu import ModbusRequest
from pymodbus.client.sync import ModbusSerialClient as ModbusClient
from pymodbus.transaction import ModbusRtuFramer
client = ModbusClient(
method = 'rtu'
,port='/dev/tty.usbserial-AQ00BYCR'
,baudrate=38400
,parity = 'O'
,timeout=1
)
connection = client.connect()
registers = client.read_holding_registers(0,100,unit=1)# start_address, count, slave_id
print (registers.registers)
Note that in the above, the reading begins from address 0 and continues to address 100, for slave_id 1.
To write registers, do the following:
write = client.write_register(1,425,unit=1)# address = 1, value to set = 425, slave ID = 1
So I have two version of taking pics with picamera.
This one works when I run.
import cameraTrigger as ct
from picamera import PiCamera
import time
n=1
TOTAL_PICS=30
while n <= TOTAL_PICS:
img = ct.takePic()
n+=1
cameraTrigger.py
import time
import socket
import pickle as p
import numpy as np
import cv2
from picamera import PiCamera
from common import constantSource as cs
camera = PiCamera()
size = cs.getImageSize()
camera.resolution = size
def takePic(path=None):
if path is not None:
start = time.time()
camera.capture(path)
end = time.time()
print("Trigger time: " + str(end-start))
data = None
elif path is None:
start = time.time()
data = np.empty((size[1], size[0], 3), dtype=np.uint8)
camera.capture(data, "bgr")
end = time.time()
print("Trigger time: " + str(end-start))
return data
However, this one (an integrated script) does not work. It stops in line 3 (camera = PiCamera()).
from picamera import PiCamera
import time
camera = PiCamera()
camera.resolution = (720,560)
time.sleep(2.0)
n=1
TOTAL_PICS=30
while n <= TOTAL_PICS:
img = np.empty((560,720,3),dtype=np.uint8)
camera.capture(img,"bgr")
n+=1
with this error
mmal: mmal_vc_port_enable: failed to enable port vc.null_sink:in:0(OPQV): ENOSPC
mmal: mmal_port_enable: failed to enable connected port (vc.null_sink:in:0(OPQV))0x10ad0a0 (ENOSPC)
mmal: mmal_connection_enable: output port couldn't be enabled
Traceback (most recent call last):
File "stereoCalibration_pi.py", line 17, in <module>
left = PiCamera()
File "/home/pi/.virtualenvs/cv/local/lib/python3.5/site-packages/picamera/camera.py", line 433, in __init__
self._init_preview()
File "/home/pi/.virtualenvs/cv/local/lib/python3.5/site-packages/picamera/camera.py", line 513, in _init_preview
self, self._camera.outputs[self.CAMERA_PREVIEW_PORT])
File "/home/pi/.virtualenvs/cv/local/lib/python3.5/site-packages/picamera/renderers.py", line 558, in __init__
self.renderer.inputs[0].connect(source).enable()
File "/home/pi/.virtualenvs/cv/local/lib/python3.5/site-packages/picamera/mmalobj.py", line 2212, in enable
prefix="Failed to enable connection")
File "/home/pi/.virtualenvs/cv/local/lib/python3.5/site-packages/picamera/exc.py", line 184, in mmal_check
raise PiCameraMMALError(status, prefix)
picamera.exc.PiCameraMMALError: Failed to enable connection: Out of resources
What is the reason of the error I encounter?? Thank you for your help in advance!!
Say I want to ping something from different locations, so I wrap ping commandline tool into python and use pyro4 prc library to call it.
I have a python Pyro4 nameserver
import Pyro4
Pyro4.config.COMPRESSION = True
Pyro4.naming.startNSloop("ipofnameserver")
And simple ping server:
class Pinger(object):
def ping(self, host):
return subprocess.check_output(["ping", host, "-c 4"])
pinger = Pinger()
daemon = Pyro4.Daemon() # make a Pyro daemon
ns = Pyro4.locateNS(host = "ipofnameservre", port=9090) # find the name server
uri = daemon.register(pinger) # register the greeting object as a Pyro object
print ns.register("location1", uri)
print "Ready. Object uri =", uri # print the uri so we can use it in the client later
daemon.requestLoop()
As long as I have only two pingservers everything is ok, but after I add third one nameserver stop responding. Every pingserver has unique name of course.
For example, I want to check availability of the servers:
ns = Pyro4.locateNS(host = "nameserverip", port=9090)
names = ns.list().keys()
print names
print ns.list()
for n in names:
if n == 'Pyro.NameServer': continue
proxy = Pyro4.Proxy("PYRONAME:"+n)
try:
print n, proxy._Proxy__pyroCreateConnection()
except:
print "offline"
This works with two pingservers, but with three it just waits for something. Traceback of this script terminated with ctrl+C:
ns = Pyro4.locateNS(host = "nameserverip", port=9090)
File "/usr/local/lib/python2.7/dist-packages/Pyro4/naming.py", line 319, in locateNS
proxy.ping()
File "/usr/local/lib/python2.7/dist-packages/Pyro4/core.py", line 146, in __call__
return self.__send(self.__name, args, kwargs)
File "/usr/local/lib/python2.7/dist-packages/Pyro4/core.py", line 250, in _pyroInvoke
self.__pyroCreateConnection()
File "/usr/local/lib/python2.7/dist-packages/Pyro4/core.py", line 312, in __pyroCreateConnection
msgType, flags, seq, data = MessageFactory.getMessage(conn, None)
File "/usr/local/lib/python2.7/dist-packages/Pyro4/core.py", line 665, in getMessage
headerdata = connection.recv(cls.HEADERSIZE)
File "/usr/local/lib/python2.7/dist-packages/Pyro4/socketutil.py", line 323, in recv
return receiveData(self.sock, size)
File "/usr/local/lib/python2.7/dist-packages/Pyro4/socketutil.py", line 104, in receiveData
data=sock.recv(size, socket.MSG_WAITALL)
strace shows the following:
socket(PF_INET, SOCK_STREAM, IPPROTO_IP) = 3 fcntl(3, F_GETFL)
= 0x2 (flags O_RDWR) fcntl(3, F_SETFL, O_RDWR) = 0 connect(3, {sa_family=AF_INET, sin_port=htons(9090),
sin_addr=inet_addr("ipofnameserver")}, 16) = 0 setsockopt(3,
SOL_SOCKET, SO_KEEPALIVE, [1], 4) = 0 recvfrom(3,
The following example is not working either, as it unable to resolve names into pyro_uri, because it just waits for something like in previous example. Interesting thing about this example that it prints fls, which contains names of all remote pingservers. Then adding fourth pingserver I'm unable even to print names of registered pingservers.
def _ping((host, firing_location)):
pinger = Pyro4.Proxy("PYRONAME:" + firing_location)
return pinger.ping(host)
def ping(host):
ns = Pyro4.locateNS(host = "178.209.52.240", port=9090)
names = ns.list().keys()
fls = []
for name in names:
if name == 'Pyro.NameServer': continue
fls.append(name)
print fls
p = Pool(len(fls))
jobs = p.map(_ping, zip([host]*len(fls), fls) )
for j in jobs:
print j.split("/")[-3], "±", j.split("/")[-1][:-1]
return jobs
I'm struggling with it for two days and have no idea of what's wrong with my code.