Python Tornado web service for long running process - python

I want to write a web service which processes the request on the background. The service puts the request into a queue and responds the client immediately.
My problem in the code below is that while loop in BackgroundThread().run() function doesn't work.
While loop in BackgroundThread.run() method doesn't act like infinite.It only go into while loop once.
Thank you.
Code:
class BackgroundThread(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
def run(self):
global queue
while True:
item = queue.get()
if item is not None:
#long running process
time.sleep(random.randint(10, 100) / 1000.0)
print "task", item, "finished"
queue = Queue.Queue()
class MyHandler(tornado.web.RequestHandler):
#gen.coroutine
def get(self):
global queue
self.write('OK')
self.finish()
filePath = self.get_arguments("filePath")
queue.put(filePath)
print queue.qsize()
if __name__=='__main__':
try:
BackgroundThread().start()
BackgroundThread().start()
app = tornado.web.Application([(r'/', MyHandler)])
print("server opened on port : 8000")
server = tornado.httpserver.HTTPServer(app)
server.bind(8000)
server.start(4) # Specify number of subprocesses
tornado.ioloop.IOLoop.current().start()
except KeyboardInterrupt:
print '^C received, shutting down the web server'
sys.exit(1)

I just add try except block because when queue is empty in while loop,it get an exception and doesn't iterate.
I got the answer and here is the code :
class BackgroundThread(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
def run(self):
global queue
print("qwerqwer0")
while 1==1:
print("qwerqwer1")
print("qwerqwer2")
try:
item = queue.get()
queue.task_done()
except Queue.Empty:
print("empty")
pass
if item is not None:
print("qwerqwerqwer")
#long running process
print "task ", item, " finished"
queue = Queue.Queue()
class MyHandler(tornado.web.RequestHandler):
#gen.coroutine
def get(self):
global queue
self.write('OK')
self.finish()
filePath = self.get_arguments("filePath")
queue.put(filePath)
print queue.qsize()
if __name__=='__main__':
try:
#BackgroundThread().start()
BackgroundThread().start()
app = tornado.web.Application([(r'/', MyHandler)])
print("server opened on port : 8000")
server = tornado.httpserver.HTTPServer(app)
server.bind(8000)
server.start(4) # Specify number of subprocesses
tornado.ioloop.IOLoop.current().start()
except KeyboardInterrupt:
print '^C received, shutting down the web server'
sys.exit(1)

Related

Python Multithreading Not Working

I am new to learning python and got an exercise to create a multithreaded script to take a list of 10 public ftp servers and connect to them anonymously and just do a directory listing. I have the following code and it works when i use the ftp connect within the run function but when i try to create an "ftp" function and utilize it keeps erroring out and then the terminal gets stuck and can't kill the program or get out, which i can't figure out why that keeps happening either?
!/usr/bin/python
import threading
import Queue
import time
from ftplib import FTP
sites = ["speedtest.tele2.net", "test.rebex.net", "test.talia.net", "ftp.swfwmd.state.fl.us", "ftp.heanet.ie", "ftp.rediris.es", "ftp.ch.freebsd.org", "ftp.mirror.nl", "ftp.ussg.iu.edu", "ftp.uni-bayreu$
class WorkerThread(threading.Thread) :
def __init__(self, queue) :
threading.Thread.__init__(self)
self.queue = queue
#def ftp(ip) :
# server = FTP(ip)
# server.login()
# server.retrlines('LIST')
def run(self) :
print "In WorkerThread"
while True :
counter = self.queue.get()
print "Connecting to FTP Server %s" % counter
#self.ftp(counter)
#print "Ordered to sleep for %d seconds!" % counter
#time.sleep(counter)
#print "Finished sleeping for %d seconds" % counter
server = FTP(counter)
server.login()
server.retrlines('LIST')
self.queue.task_done()
queue = Queue.Queue()
for i in range(10) :
print "Creating WorkerThread : %d" % i
worker = WorkerThread(queue)
worker.setDaemon(True)
worker.start()
print "WorkerThread %d Created!" % i
for j in sites :
queue.put(j)
queue.join()
print "All Tasks Over!"
As suggested by:
Is there any way to kill a Thread in Python?
you should put a stop condition a make the threads check on it. Together with the join it allows for the thread to be terminated gracefully. Without entering into some other implication try the code below.
#!/usr/bin/python
import threading
import Queue
import time
from ftplib import FTP
sites = ["speedtest.tele2.net"]
class WorkerThread(threading.Thread):
def __init__(self, queue):
threading.Thread.__init__(self)
self.queue = queue
self._stop = threading.Event()
def ftp(self, ip):
server = FTP(ip)
server.login()
server.retrlines('LIST')
def run(self):
print "In WorkerThread"
while not self.stopped():
counter = self.queue.get()
print "Connecting to FTP Server %s" % counter
self.ftp(counter)
self.queue.task_done()
def stop(self):
self._stop.set()
def stopped(self):
return self._stop.is_set()
if __name__ == '__main__':
queue = Queue.Queue()
for i in range(10):
print "Creating WorkerThread : %d" % i
worker = WorkerThread(queue)
worker.setDaemon(True)
worker.start()
worker.stop()
print "WorkerThread %d Created!" % i
for j in sites:
queue.put(j)
queue.join()
print "All Tasks Over!"

Sending data from outside socket object in python

I made a Client socket object, which I instantiate and it keeps alive a connection with the server, which is working fine, but I'm wondering if there is a way to call the socket.send event from outside the instance. I was about to make a stack for the messages and check the stack in the while loop and if it's not empty then send the oldest data to the server, which would be just fine for me, but my problem is that the stack only updates after the while loop(I tried breaking out, then it updated).
So my question would be, is there a way to update the global stack simultaneously with the while loop running? Or is there any other way to call the socket.send event outside the object?
import socket
import sys
import select
import threading
SERVER_IP = '192.168.1.4'
PORT = 8686
TIMEOUT = 5
BUF_SIZE = 1024
MESSAGES = ['testdata1', 'testdata2']
class Client(threading.Thread):
def __init__(self, host=SERVER_IP, port=PORT):
threading.Thread.__init__(self)
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock = socket.create_connection((host, port), 1)
self.sock.setblocking(0)
while 1:
try:
global MESSAGES
ready = select.select([self.sock], [], [], TIMEOUT*1000)
if ready[0]:
buf = self.sock.recv(BUF_SIZE)
print buf
#TODO:do stuff with buf
print 'messages left:'+str(len(MESSAGES))
if len(MESSAGES)>0:
self.sock.send(MESSAGES.pop())
except KeyboardInterrupt:
self.sock.close()
sys.exit(1)
except Exception, e:
print '\n[ERR] %s' % e
self.sock.close()
sys.exit(1)
def run(self):
pass
def sendData(self, data):
global MESSAGES
print 'appending data:%s' % data
MESSAGES.append(data)
def main():
client = Client()
client.start()
client.sendData("test1")
client.sendData("test2")
client.sendData("test3")
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
sys.exit(1)
Client.__init__() does not return because it enters an infinite while loop. Hence control is never returned to the main thread, and the Client thread is not actually started.
Instead you should move the while loop into the run() method. Then the __init__() method will return control to the main thread, which can then start the thread, and request that the client send messages via sendData().
class Client(threading.Thread):
def __init__(self, host=SERVER_IP, port=PORT):
threading.Thread.__init__(self)
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock = socket.create_connection((host, port), 1)
self.sock.setblocking(0)
def run(self):
while 1:
try:
global MESSAGES
ready = select.select([self.sock], [], [], TIMEOUT*1000)
if ready[0]:
buf = self.sock.recv(BUF_SIZE)
print buf
#TODO:do stuff with buf
print 'messages left:'+str(len(MESSAGES))
if len(MESSAGES)>0:
self.sock.send(MESSAGES.pop())
except KeyboardInterrupt:
self.sock.close()
sys.exit(1)
except Exception, e:
print '\n[ERR] %s' % e
self.sock.close()
sys.exit(1)
def sendData(self, data):
global MESSAGES
print 'appending data:%s' % data
MESSAGES.append(data)
Instead of using the global MESSAGES list you should probably create a Queue for communicating between the main thread and the worker thread(s), particularly if more than one worker thread is running. Something like this (untested!):
import Queue
class Client(threading.Thread):
def __init__(self, msg_queue, host=SERVER_IP, port=PORT):
threading.Thread.__init__(self)
self.msg_queue = msg_queue
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock = socket.create_connection((host, port), 1)
self.sock.setblocking(0)
def run(self):
while 1:
try:
ready = select.select([self.sock], [], [], TIMEOUT*1000)
if ready[0]:
buf = self.sock.recv(BUF_SIZE)
print buf
#TODO:do stuff with buf
print 'messages left:'+ str(self.msg_queue.qsize())
try:
msg = self.msg_queue.get_nowait()
self.sock.send(msg)
except Queue.Empty:
pass
except KeyboardInterrupt:
self.sock.close()
sys.exit(1)
except Exception, e:
print '\n[ERR] %s' % e
self.sock.close()
sys.exit(1)
def main():
# create a queue and pass it to the client
msg_queue = Queue.Queue()
client = Client(msg_queue)
client.start()
msg_queue.put("test1")
msg_queue.put("test2")
msg_queue.put("test3")
The thing should work if you move your loop from
__init__() into run()
method instead.
Your thread is not a thread this way, process blocks at client = Client(...).
Why do you mix select and threads? Is this really necessary? If you want asynchronous sending and receiving without threads use asyncore module.
Or remove select from your code. The socket.recv() will block until it receives data in blocking mode, but as this is a thread, I don't see anything wrong about that. If in nonblocking mode, recv() will just return None if there is no data to receive if I remember correctly. So you don't really need select. Just check if recv() returned None. If it does, sleep some time before trying again.
The way you did it troubles your OS twice. Once for reading a socket, and second time to get the status of a socket where timeout is used to simulate sleep() more than anything else. Then the loop checks again making select() system call right after timeout confirmed that there is nothing to do for that socket.

Python multiprocessing: Start/stop processes on server

I am building an algorithmic trading platform using Python. Multiple algorithms are monitoring the market and execute trades accordingly daily from 09:30 to 16:00.
What I'm looking for is to start and stop algorithms arbitrarily from a client. Therefore I want to have a server script running using multiprocessing and a client which can start/stop/list algorithms (which should run in separate process) at any given time.
Any examples of how this can be done? The majority of online examples are for queue servers, which do not seem to fit my problem.
EDIT:
I am trying to to this with the package multiprocessing. The idea of using a queue seems wrong to me, as I know an arbitrary number of processes will for a fact run for the whole day or at least until I say stop. I'm not trying to run a short script and let a worker consume the next job from a queue once the previous is done. Actually I'm thinking of having a server script using a Manager which will run forever and just start new scripts in separate processes/threads when requested. I would however, like to be able to send a stop signal to a process to kill it. I do have a feeling that I'm doing this kinda backwards :-) What I have is:
server.py:
import multiprocessing as mp
from multiprocessing import Process
from multiprocessing.managers import BaseManager
from time import strftime
class Server(object):
def __init__(self, port=50000, authkey=''):
self.processes = {}
self._authkey = authkey
self.port = port
self.server = None
self.running = False
BaseManager.register('get_process', callable=lambda: self)
def start_server(self):
manager = BaseManager(address=('', self.port), authkey=self._authkey)
self.server = manager.get_server()
try:
self._logmessage("Server started")
self.running = True
self.server.serve_forever()
except (KeyboardInterrupt, SystemExit):
self.shutdown()
def start_process(self, mod, fcn, *args, **kwargs):
mod = __import__(mod, globals(), locals(), ['object'], -1)
key = "{0}.{1}".format(mod, fcn)
assert not key in self.processes, \
"Process named '%s' already exists" % key
p = Process(target=getattr(mod, fcn), name=mod, args=(None, ), kwargs=kwargs)
self._logmessage("Process '%s' started" % key)
p.start()
# p.join()
self.processes[key] = p
def stop_process(self, key):
self.processes[key].terminate()
del self.processes[key]
def get_processes(self):
return self.processes.keys()
def shutdown(self):
for child in mp.active_children():
child.terminate()
self.server.shutdown()
self.running = False
print "Shutting down"
def _logmessage(self, msg):
print "%s: %s" % (strftime('%Y-%m-%d %H:%M:%S'), msg)
if __name__ == '__main__':
server = Server(authkey='abc')
try:
server.start_server()
except (KeyboardInterrupt, SystemExit):
server.shutdown()
client.py:
from multiprocessing.managers import BaseManager
import time
class Client(object):
def __init__(self, host='', port=50000, authkey=''):
self.host = host
self.port = port
self.manager = None
self.process = None
self._type_id = 'get_process'
self._authkey = authkey
self.manager = BaseManager(address=(self.host, self.port), authkey=self._authkey)
BaseManager.register(self._type_id)
def connect(self):
try:
self.manager.connect()
self._logmessage("Connected to server")
except:
self._logmessage("Could not connect to server")
self.process = getattr(self.manager, self._type_id)()
def start_process(self, mod, fcn):
self.process.start_process(mod, fcn)
self._logmessage("Process '%s' started" % fcn)
def list_processes(self):
print self.process.get_processes()
#property
def connected(self):
return self.manager._state.value == self.manager._state.STARTED
def _logmessage(self, msg):
print "%s: %s" % (time.strftime('%Y-%m-%d %H:%M:%S'), msg)
def test(data):
while True:
print time.time()
time.sleep(1.)
if __name__ == '__main__':
from algotrading.server.process_client import Client
client = Client(authkey='abc')
client.connect()
client.start_process("algotrading.server.process_client", "test")
client.list_processes()
Check out Supervisord which allows for remote management of processes, plus automatic start/restart configurability.
Depending on your scalability and disaster-recovery needs, you may be thinking about distributing your "monitoring/trading processes" across running multiple servers. While supervisord is really only designed to manage a single machine, you could build a manager app which coordinates multiple servers, each running supervisord, via it's xml-rpc interface.
Cron or Celery could be used for your daily start/stop scheduling.
You could implement a socket server which listens to the clients and launches threads to execute an algorithm.
I think RPC would be the simplest solution.
Some inspiration: What is the current choice for doing RPC in Python?

python thread exception cause stop the process

in below code if i change one of the url to something invalid the whole process will stop and i couldn't exit form terminal using ctrl+c . so my question is how should i handle exception in my main thread run method and if an error happen trigger it and go to the next list element without fail the whole process:
#!/usr/bin/env python
import Queue
import threading
import urllib2
import time
hosts = ["http://yahoo.com", "http://google.com", "http://amazon.com","http://apple.com"]
queue = Queue.Queue()
class ThreadUrl(threading.Thread):
"""Threaded Url Grab"""
def __init__(self, queue):
threading.Thread.__init__(self)
self.queue = queue
def run(self):
while True:
#grabs host from queue
host = self.queue.get()
#grabs urls of hosts and prints first 1024 bytes of page
url = urllib2.urlopen(host)
print "connected"
#signals to queue job is done
self.queue.task_done()
start = time.time()
def main():
#spawn a pool of threads, and pass them queue instance
for i in range(5):
t = ThreadUrl(queue)
t.setDaemon(True)
t.start()
#populate queue with data
for host in hosts:
queue.put(host)
#wait on the queue until everything has been processed
queue.join()
main()
print "Elapsed Time: %s" % (time.time() - start)
use a finally block to make sure the thread always signals even when there is an error.
def run(self):
while True:
#grabs host from queue
host = self.queue.get()
#grabs urls of hosts and prints first 1024 bytes of page
try:
url = urllib2.urlopen(host)
print "connected"
except urllib2.URLError:
print "couldn't connect to %s" % host
finally:
#signals to queue job is done
self.queue.task_done()

Killing multi-threaded SocketServer

I'm trying to figure out why I can't kill my multi threaded SocketServer via a CRTL-C.
Basically I have that :
import SocketServer,threading
class TEST(SocketServer.BaseRequestHandler):
def server_bind(self):
self.socket.setsockopt(SOL_SOCKET, SO_REUSEADDR,SO_REUSEPORT, 1)
self.socket.bind(self.server_address)
self.socket.setblocking(0)
def handle(self):
request, socket = self.request
data = request
if data[0] == "\x01":
buff = "blablabla"
socket.sendto(str(buff), self.client_address)
class TEST1(SocketServer.BaseRequestHandler):
def server_bind(self):
self.socket.setsockopt(SOL_SOCKET, SO_REUSEADDR,SO_REUSEPORT, 1)
self.socket.bind(self.server_address)
self.socket.setblocking(0)
def handle(self):
request, socket = self.request
data = request
if data[0] == "\x01":
buff = "blablabla"
socket.sendto(str(buff), self.client_address)
class TEST2(SocketServer.BaseRequestHandler):
def server_bind(self):
self.socket.setsockopt(SOL_SOCKET, SO_REUSEADDR,SO_REUSEPORT, 1)
self.socket.bind(self.server_address)
self.socket.setblocking(0)
def handle(self):
request, socket = self.request
data = request
if data[0] == "\x01":
buff = "blablabla"
socket.sendto(str(buff), self.client_address)
class TEST3(SocketServer.BaseRequestHandler):
def server_bind(self):
self.socket.setsockopt(SOL_SOCKET, SO_REUSEADDR,SO_REUSEPORT, 1)
self.socket.bind(self.server_address)
self.socket.setblocking(0)
def handle(self):
request, socket = self.request
data = request
if data[0] == "\x01":
buff = "blablabla"
socket.sendto(str(buff), self.client_address)
def serve_thread_udp(host, port, handler):
server = SocketServer.UDPServer((host, port), handler)
server.serve_forever()
def serve_thread_tcp(host, port, handler):
server = SocketServer.TCPServer((host, port), handler)
server.serve_forever()
def main():
try:
threading.Thread(target=serve_thread_tcp,args=('', 4045,TEST)).start()
threading.Thread(target=serve_thread_tcp,args=('', 239,TEST1)).start()
threading.Thread(target=serve_thread_udp,args=('', 1246,TEST2)).start()
threading.Thread(target=serve_thread_tcp,args=('', 12342,TEST3)).start()
except KeyboardInterrupt:
os._exit()
if __name__ == '__main__':
try:
main()
except:
raise
I'm trying to understand what i've done wrong and what would be the best way to be able to kill the whole script via a crtl-c.
Any help would be greatly appreciated !
Thanks
Here is a solution:
def main():
import thread
try:
thread.start_new(serve_thread_tcp, ('', 4045,TEST))
thread.start_new(serve_thread_tcp,('', 239,TEST1))
thread.start_new(serve_thread_udp,('', 1246,TEST2))
thread.start_new(serve_thread_tcp,('', 12342,TEST3))
except KeyboardInterrupt:
os._exit()
if __name__ == '__main__':
try:
main()
except:
raise
raw_input()
To close the server you can type return or close the stdin.
The Problem is with the Thread class that will not allow closing the application before all Threads are closed.
serve_forever() will not end until you close the belonging to server(an other solution) on KeyboardInterrupt.
When creating threads, set them as daemon :
Thread.__init__(self)
self.setDaemon(True)
In this way all the thread will terminate when you have killed the main thread.
Based on python documentation in here :
A thread can be flagged as a “daemon thread”. The significance of this flag is that the entire Python program exits when only daemon threads are left. The initial value is inherited from the creating thread. The flag can be set through the daemon property.

Categories

Resources