I'm writing simple script that will check for SSH connection, and I cannot understand, why it hangs on one thread.
class myThread(threading.Thread):
def __init__(self, hostname ):
threading.Thread.__init__(self)
self.hostname = hostname
def run(self):
return self.doSSH(self.hostname)
def doSSH(self,hostname):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((hostname, 22))
result = s.recv(1024)
if re.findall(r'^SSH.+?SSH.+',result) :
return "Up"
else :
return "Down"
def main():
q = Queue.Queue()
completeHostlist = ["host1","host2","google.com","host3"]
for hostname in completeHostlist:
thread = myThread(hostname)
thread.daemon = True
q.put_nowait(thread.run())
q.get_nowait()
I don't understand why this script hangs at google.com? I would be expecting it to spawn daemon thread and continue with host3. As soon it finish host3 it has to kill thread with Google and return results. What I did wrong ?
I already figured out about run() and start(). Anyway this is not working as expected, after all host[1-3] threads was started, script stuck at thread with google , waiting it to end. Should it be kill it at the end of the script ?
Should i be using multiprocessing instead of multithreading , to spawn separate process for each host?
In your code you do q.put_nowait(thread.run()). That immediately runs the ssh thing on the current thread. You need to call thread specific method to start the thread. You need to call thread.start().
Not sure what you're doing with the Queue.
Don't call .run() method directly for any thread. As #Sorin said call thread.start() instead.
You don't need to define a new thread class, a function is enough in this case:
from Queue import Queue
from threading import Thread
def is_ssh_up(result_queue, hostname, port=22):
# try to connect here
# ...
# write results
result_queue.put((hostname, True)) # Up
def main():
q = Queue()
hosts = ["host1", "host2", "google.com", "host3"]
for hostname in hosts: # start worker threads
t = Thread(target=is_ssh_up, args=[q, hostname])
t.daemon = True
t.start()
for _ in hosts: # collect results
hostname, is_up = q.get()
print("%s is %s" % (hostname, "Up" if is_up else "Down"))
Or you could use a thread pool:
from multiprocessing.pool import ThreadPool
def is_ssh_up(hostname, port=22):
# try to connect here
# ...
# return results
return hostname, is_up
hosts = ["host1", "host2", "google.com", "host3"]
pool = ThreadPool(20) # limit number of concurrent connections to 20
for hostname, is_up in pool.imap_unordered(is_ssh_up, hosts):
status = "Up" if is_up else "Down" if is_up is not None else "Unknown"
print("%s status is %s" % (hostname, status))
Related
I have a function that uses threading to connect to a number of network devices and runs commands against them
#!/usr/bin/python3
# Importing Netmiko modules
from netmiko import Netmiko
from netmiko.ssh_exception import (
NetMikoAuthenticationException,
NetMikoTimeoutException,
)
import signal, os, json, re
# Queuing and threading libraries
from queue import Queue
import threading
class MyFunction:
def __init__(self):
# Get the password
self.password = "password"
# Switch IP addresses from text file that has one IP per line
self.hops = ["switch1", "switch2", "switch3"]
self.hop_info = []
# Set up thread count for number of threads to spin up
self.num_threads = 8
# This sets up the queue
self.enclosure_queue = Queue()
# Set up thread lock so that only one thread prints at a time
self.print_lock = threading.Lock()
self.command = "show ip route 127.0.0.1 | json"
def deviceconnector(self, i, q):
while True:
# and aren't required
print("{}: Waiting for IP address...".format(i))
ip = q.get()
print("{}: Acquired IP: {}".format(i, ip))
# k,v passed to net_connect
device_dict = {
"host": ip,
"username": "admin",
"password": self.password,
"device_type": "cisco_ios",
}
try:
net_connect = Netmiko(**device_dict)
except NetMikoTimeoutException:
with self.print_lock:
print("\n{}: ERROR: Connection to {} timed-out.\n".format(i, ip))
q.task_done()
continue
except NetMikoAuthenticationException:
with self.print_lock:
print(
"\n{}: ERROR: Authenticaftion failed for {}. Stopping script. \n".format(
i, ip
)
)
q.task_done()
output = net_connect.send_command(self.command)
with self.print_lock:
print("{}: Printing output...".format(i))
print(output)
# Disconnect from device
net_connect.disconnect
q.task_done()
def main(self):
for i in range(len(self.hops)):
thread = threading.Thread(
target=self.deviceconnector, args=(i, self.enclosure_queue)
)
# Set the thread as a background daemon/job
thread.setDaemon(True)
# Start the thread
thread.start()
for hop in self.hops:
self.enclosure_queue.put(hop)
# Wait for all tasks in the queue to be marked as completed (task_done)
self.enclosure_queue.join()
print("*** Script complete")
if __name__ == "__main__":
# Calling the main function
run = Trace()
run.main()
This prints the output of the commands fine but instead of printing the output, I want to do the threading in a function so the returned data can be manipulated and used as part of another function. I can't seem to get the threading functionality in main() to allow this.
Update
I'm using this in an api endpoint so taking out the
if name == "main":
and calling it from another a main.py. The problem I'm having is that it hangs and doesn't return but can't quite figure out why. It only hangs when ran from main.py, not when if has the main function.
You are already writing the data to your Queue, so you just have to get them back from there
my_data = q.get()
Did you write the code? If so, use your Queue :)
I am trying to implement a Python (2.6.x/2.7.x) thread pool that would check for network connectivity(ping or whatever), the entire pool threads must be killed/terminated when the check is successful.
So I am thinking of creating a pool of, let's say, 10 worker threads. If any one of them is successful in pinging, the main thread should terminate all the rest.
How do I implement this?
This is not a compilable code, this is just to give you and idea how to make threads communicate..
Inter process or threads communication happens through queues or pipes and some other ways..here I'm using queues for communication.
It works like this.. I'll send ip addresses in in_queue and add response to out_queue, my main thread monitors out_queue and if it gets desired result, it marks all the threads to terminate.
Below is the pinger thread definition..
import threading
from Queue import Queue, Empty
# A thread that pings ip.
class Pinger(threading.Thread):
def __init__(self, kwargs=None):
threading.Thread.__init__(self)
self.kwargs = kwargs
self.stop_pinging = False
def run(self):
ip_queue = self.kwargs.get('in_queue')
out_queue = self.kwargs.get('out_queue')
while not self.stop_pinging:
try:
data = ip_quque.get(timeout=1)
ping_status = ping(data)
# This is pseudo code, you've to takecare of
# your own ping.
if ping_status:
out_queue.put('success')
# you can even break here if you don't want to
# continue after one success
else:
out_queue.put('failure')
if ip_queue.empty()
break
except Empty, e:
pass
Here is the main thread block..
# Create the shared queue and launch both thread pools
in_queue = Queue()
out_queue = Queue()
ip_list = ['ip1', 'ip2', '....']
# This is to add all the ips to the queue or you can
# customize to add through some producer way.
for ip in ip_list:
in_queue.put(ip)
pingerer_pool = []
for i in xrange(1, 10):
pingerer_worker = Pinger(kwargs={'in_queue': in_queue, 'out_queue': out_queue}, name=str(i))
pingerer_pool.append(pinger_worker)
pingerer_worker.start()
while 1:
if out_queue.get() == 'success':
for pinger in pinger_pool:
pinger_worker.stop_pinging = True
break
Note: This is a pseudo code, you should make this workable as you like.
I'm writing a server that operates with a fixed number of workers, each with different properties (in the snippet below, n is such a property.
Upon getting a request, I would like to put it into a queue, so the first available worker can deal with the task.
Unfortunately, the socket gets closed when it's enqueued.
import threading
from queue import Queue
import socketserver
thread = True
queue = Queue()
class BasicHandler(socketserver.BaseRequestHandler):
def handle(self):
while True:
sock = self.request
byte = sock.recv(10)
print(byte)
class ThreadedHandler(socketserver.BaseRequestHandler):
def handle(self):
queue.put(self.request)
def worker(n):
print('Started worker ' + str(n))
while True:
sock = queue.get()
byte = sock.recv(10)
print(byte)
if thread:
[threading.Thread(target=worker, args=(n,)).start() for n in range(2)]
handler = ThreadedHandler
else:
handler = BasicHandler
socketserver.TCPServer.allow_reuse_address = True
server = socketserver.TCPServer(("localhost", 9999), handler)
server.serve_forever()
Running the above snippet with thread = False works as fine, but when I try to connect to the thread = True version, telnet immediately says:
Connection closed by foreign host.
and the server prints:
Started worker 0
Started worker 1
b''
The request is automatically closed, when the method ThreadedHandler.handler finished. You have to override TCPServer.shutdown_request if you want to keep the socket open.
During my exploration of IPC, dealing specifically with threads and sockets in python 3.4.1,
i experienced something that's a bit curious and i don't quite understand what's going on.
Currently (and successfully (for now)) i am using an anonymous pipe with os.pipe() to send
a termination signal to a thread holding a socket connection.
My goal was to terminate the thread in a graceful manner. I tried used a boolean flag at first, but since the select call was blocking, i had to send a termination signal to something that select.select could read; a socket, pipe, stdin, etc, hence breaking the select call.
Before I discovered how to use a pipe to communicate with the thread and penetrate the select call, i broke off development into a testing branch.
Let me explain my situation.
Basically this works...:
import os
import threading
import select
class MyThread(threading.Thread):
def __init__(self, pipein):
threading.Thread.__init__(self)
# The pipe to listen for terminate signals on
self.pipein = pipein
self.stopped = False
self.inputs = [self.pipein]
def run(self):
print("Thread-1 Started")
while not self.stopped:
inputready, outputready, errors = select.select(self.inputs, [], [])
for i in inputready:
if i == self.pipein:
signal = os.read(self.pipein, 64)
# 64 An arbitrary length that should be enough in any case
print("The thread received stuff on the pipe: %s" % signal)
if signal == b'stop':
print("Stop command received.")
print("Exiting.")
self.stopped = True
break
if __name__ == "__main__":
# Create the communication pipes
pipe = os.pipe()
# Start the worker thread
print("Starting the worker thread...")
t = MyThread(pipe[0])
t.start()
print("Worker thread started.")
stopped = False
# Enter the main loop
while not stopped:
command = input("Command to send to thread: ")
os.write(pipe[1], bytes(command, 'UTF-8'))
stopped = True
and if i type 'stop' in the terminal i get this:
localhost:ipc.git $ python3 pipes.py
Starting the worker thread...
Thread-1 Started
Worker thread started.
Command to send to thread: stop
The thread received stuff on the pipe: b'stop'
Stop command received.
Exiting.
localhost:ipc.git $ clear
and this doesn't:
import os
import threading
import select
class MyThread(threading.Thread):
def __init__(self, pipein):
threading.Thread.__init__(self)
# The pipe to listen for terminate signals on
self.pipein = pipein
self.stopped = False
self.inputs = [self.pipein]
def run(self):
print("Thread-1 Started")
while not self.stopped:
inputready, outputready, errors = select.select(self.inputs, [], [])
for i in inputready:
if i == self.pipein:
signal = os.read(self.pipein, 64)
# 64 An arbitrary length that should be enough in any case
print("The thread received stuff on the pipe: %s" % signal)
if signal == b'stop':
print("Stop command received.")
print("Exiting.")
self.stopped = True
break
if __name__ == "__main__":
# Create the communication pipes
pipein, pipeout = os.pipe() # Seperate into reader fd and writer fd
# Start the worker thread
print("Starting the worker thread...")
t = MyThread(pipein) # Give the thread the receiver
t.start()
print("Worker thread started.")
stopped = False
# Enter the main loop
while not stopped:
command = input("Command to send to thread: ")
# Write on the variable of pipe[1]: pipeout
os.write(pipeout, bytes(command, 'UTF-8'))
stopped = True
The difference is, is get a
OSError: [Errno 9] Bad file descriptor
when trying to read of write from a variable created from pipe
like:
pipein, pipeout = os.pipe()
or
pipe = os.pipe()
pipein = pipe[0]
pipeout = pipe[1]
however if i use the pipe[0] and pipe[1] to read and write respectively with os.read() and os.write() it works just fine!
So creating any sort of variable to pipe[0] or pipe[1] does not work, and i get and OSError.
Same thing applies if i create a class call Communicator and put the pipe[0] and pipe[1] as instance variables.
Could anyone explain why this is the case? Will i never be able to write to variables of pipe[1], or is this just because i going between threads?
If you know of another way for inter thread communication that can be used within or interupt a select call, i'm all ears.
I tried an instance of io.StringIO or io.{OtherIOHere} but they to don't support a fileno() call so they don't work with select
I would like to create a class to contain my communication pipes for better useability but until i find out why variable of a pipe don't work i cant.
Any input or advice is appreciated.
Edit:
Added some debug tests:
import os
import threading
import time
import select
class MyThread(threading.Thread):
def __init__(self, pipein):
threading.Thread.__init__(self)
self.pipein = pipein
self.stopped = False
self.inputs = [self.pipein]
def run(self):
print("Thread-1 Started")
while not self.stopped:
inputready, outputready, errors = select.select(self.inputs, [], [])
for i in inputready:
if i == self.pipein:
signal = os.read(self.pipein, 64)
print("The thread received stuff on the pipe: %s" % signal)
if signal == b'stop':
print("Stop command received.")
print("Exiting.")
self.stopped = True
break
if __name__ == "__main__":
# Create the communication pipes
pipe = os.pipe()
pipein = pipe[0]
pipeout = pipe[1]
# Some Print debugs
print(type(pipein))
print(type(pipeout))
print(pipein)
print(pipeout)
print(type(pipe))
print(type(pipe[0]))
print(type(pipe[1]))
print(pipe[0])
print(pipe[1])
# Start the worker thread
print("Starting the worker thread...")
t = MyThread(pipein)
t.start()
print("Worker thread started.")
# Enter the main loop
stopped = False
while not stopped:
command = input("Command to send to thread: ")
os.write(pipeout, bytes(command, 'UTF-8'))
stopped = True
# Dave, the funny thing is, this works now and have not the faintest idea why. I did the same this is two different projects. In both cases i couldn't write to a variable of pipe[1]
localhost:ipc.git $ python3 pipes.py
<class 'int'>
<class 'int'>
3
4
<class 'tuple'>
<class 'int'>
<class 'int'>
3
4
Starting the worker thread...
Thread-1 Started
Worker thread started.
Command to send to thread: stop
The thread received stuff on the pipe: b'stop'
Stop command received.
Exiting.
localhost:ipc.git $
Edit 2
Ok I have created the Communicator class to communicate between threads with a pipe. It comes with easy-to-use read() and write() methods. Everything seems to be hunky dory. Wonder why it didn't work before. Must have been a system related thing. Perhaps my works with sockets and threads has it on edge.
Here is the complete functional code:
import os
import threading
import select
class MyThread(threading.Thread):
def __init__(self, comm):
threading.Thread.__init__(self)
self.comm = comm
self.stopped = False
self.inputs = [self.comm.pipein]
def run(self):
print("Thread-1 Started")
while not self.stopped:
inputready, outputready, errors = select.select(self.inputs, [], [])
for i in inputready:
if i == self.comm.pipein:
signal = self.comm.read()
print("The thread received stuff on the pipe: %s" % signal)
if signal == b'stop':
print("Stop command received.")
print("Exiting.")
self.stopped = True
break
class Communicator:
def __init__(self):
self.pipe = os.pipe()
self.pipein = self.pipe[0]
self.pipeout = self.pipe[1]
def write(self, msg):
os.write(self.pipeout, msg)
def read(self):
return os.read(self.pipein, 64)
if __name__ == "__main__":
# Create the communication pipes
#pipe = os.pipe()
#pipein = pipe[0]
#pipeout = pipe[1]
# Use the communicator class
comm = Communicator()
# Some Print debugs
# Start the worker thread
print("Starting the worker thread...")
t = MyThread(comm)
t.start()
print("Worker thread started.")
# Enter the main loop
stopped = False
while not stopped:
command = input("Command to send to thread: ")
comm.write(b'stop')
stopped = True
Thanks for your help guys.
I copied and pasted your two examples of code into 2 files on my macbook, ran them with python 3.4.1 (from macports), entered 'stop', and they both worked.
What operating system are you using?
Edit: Looks like you "fixed" it. Good job. ;)
I am building an algorithmic trading platform using Python. Multiple algorithms are monitoring the market and execute trades accordingly daily from 09:30 to 16:00.
What I'm looking for is to start and stop algorithms arbitrarily from a client. Therefore I want to have a server script running using multiprocessing and a client which can start/stop/list algorithms (which should run in separate process) at any given time.
Any examples of how this can be done? The majority of online examples are for queue servers, which do not seem to fit my problem.
EDIT:
I am trying to to this with the package multiprocessing. The idea of using a queue seems wrong to me, as I know an arbitrary number of processes will for a fact run for the whole day or at least until I say stop. I'm not trying to run a short script and let a worker consume the next job from a queue once the previous is done. Actually I'm thinking of having a server script using a Manager which will run forever and just start new scripts in separate processes/threads when requested. I would however, like to be able to send a stop signal to a process to kill it. I do have a feeling that I'm doing this kinda backwards :-) What I have is:
server.py:
import multiprocessing as mp
from multiprocessing import Process
from multiprocessing.managers import BaseManager
from time import strftime
class Server(object):
def __init__(self, port=50000, authkey=''):
self.processes = {}
self._authkey = authkey
self.port = port
self.server = None
self.running = False
BaseManager.register('get_process', callable=lambda: self)
def start_server(self):
manager = BaseManager(address=('', self.port), authkey=self._authkey)
self.server = manager.get_server()
try:
self._logmessage("Server started")
self.running = True
self.server.serve_forever()
except (KeyboardInterrupt, SystemExit):
self.shutdown()
def start_process(self, mod, fcn, *args, **kwargs):
mod = __import__(mod, globals(), locals(), ['object'], -1)
key = "{0}.{1}".format(mod, fcn)
assert not key in self.processes, \
"Process named '%s' already exists" % key
p = Process(target=getattr(mod, fcn), name=mod, args=(None, ), kwargs=kwargs)
self._logmessage("Process '%s' started" % key)
p.start()
# p.join()
self.processes[key] = p
def stop_process(self, key):
self.processes[key].terminate()
del self.processes[key]
def get_processes(self):
return self.processes.keys()
def shutdown(self):
for child in mp.active_children():
child.terminate()
self.server.shutdown()
self.running = False
print "Shutting down"
def _logmessage(self, msg):
print "%s: %s" % (strftime('%Y-%m-%d %H:%M:%S'), msg)
if __name__ == '__main__':
server = Server(authkey='abc')
try:
server.start_server()
except (KeyboardInterrupt, SystemExit):
server.shutdown()
client.py:
from multiprocessing.managers import BaseManager
import time
class Client(object):
def __init__(self, host='', port=50000, authkey=''):
self.host = host
self.port = port
self.manager = None
self.process = None
self._type_id = 'get_process'
self._authkey = authkey
self.manager = BaseManager(address=(self.host, self.port), authkey=self._authkey)
BaseManager.register(self._type_id)
def connect(self):
try:
self.manager.connect()
self._logmessage("Connected to server")
except:
self._logmessage("Could not connect to server")
self.process = getattr(self.manager, self._type_id)()
def start_process(self, mod, fcn):
self.process.start_process(mod, fcn)
self._logmessage("Process '%s' started" % fcn)
def list_processes(self):
print self.process.get_processes()
#property
def connected(self):
return self.manager._state.value == self.manager._state.STARTED
def _logmessage(self, msg):
print "%s: %s" % (time.strftime('%Y-%m-%d %H:%M:%S'), msg)
def test(data):
while True:
print time.time()
time.sleep(1.)
if __name__ == '__main__':
from algotrading.server.process_client import Client
client = Client(authkey='abc')
client.connect()
client.start_process("algotrading.server.process_client", "test")
client.list_processes()
Check out Supervisord which allows for remote management of processes, plus automatic start/restart configurability.
Depending on your scalability and disaster-recovery needs, you may be thinking about distributing your "monitoring/trading processes" across running multiple servers. While supervisord is really only designed to manage a single machine, you could build a manager app which coordinates multiple servers, each running supervisord, via it's xml-rpc interface.
Cron or Celery could be used for your daily start/stop scheduling.
You could implement a socket server which listens to the clients and launches threads to execute an algorithm.
I think RPC would be the simplest solution.
Some inspiration: What is the current choice for doing RPC in Python?