Hi all I have this main in Tornado:
def main():
tornado.options.parse_command_line()
http_server = tornado.httpserver.HTTPServer(Application())
http_server.listen(options.port)
periodic = tornado.ioloop.PeriodicCallback(check_commands_response, 5000)
periodic.start()
tornado.ioloop.IOLoop.instance().start()
Now, how you can see, I've put in the main a periodic function that calls a function.
In this function I would use some function methods that are into some classes.... so I cannot put this check_commands_response out of the class. But I can't put the function also into the class (it's a BaseHandler) because when the main starts the function isn't already defined...
How can I do?
EDIT
What is wrong if I write this:
class CheckCommandResponse(BaseHandler):
#tornado.web.authenticated
#tornado.web.asynchronous
#tornado.gen.engine
#staticmethod
def check_commands_response(self):
self.lock_tables("read", ['networks'])
nets = self.db.query("SELECT DISTINCT netid from networks")
self.unlock_tables
for net in nets:
got_commands_response(net)
#staticmethod
def got_commands_response(netid):
como_url = "".join("http://xx.xx.xx.xx:44444/ztc_config?netid=" \
+ netid + "&opcode_group=0&opcode=0&start=-10s&end=-1s")
http_client = AsyncHTTPClient()
#asynchronous alternative to time.sleep
yield tornado.gen.Task(tornado.ioloop.IOLoop.instance().add_timeout, time.time() + 5)
response = yield tornado.gen.Task(http_client.fetch, como_url)
print response
################################################################################
# Application Entry Point
################################################################################
def main():
tornado.options.parse_command_line()
http_server = tornado.httpserver.HTTPServer(Application())
http_server.listen(options.port)
periodic = tornado.ioloop.PeriodicCallback(CheckCommandResponse.check_commands_response, 5000)
periodic.start()
tornado.ioloop.IOLoop.instance().start()
Related
I have troubles to make Python Asyncio NATS.io running sequentialy. I have two classes: Account and Bridge
Account holds the logic of application and it is communicating thought Bridge with external service via NATS.io.
Main file:
loop = asyncio.get_event_loop()
account = Account(loop, options)
asyncio.async(account.start())
loop.run_forever()
Account class:
class Account:
bridge = Bridge()
def connect(self):
result = self.bridge.connect(self.id)
return result
Bridge class:
def connect(self, account_id):
data = None
try:
response = yield from self.nc.timed_request("bank.account.connect",
BankRequest(
method="connect",
data={...}
), 10)
data = json.loads(response.data.decode())
except ErrTimeout:
status = Messages.REQUEST_TIMED_OUT
return Result(data=data)
I need to call account.connect() from anywhere inside account class and get result of connection (sequentialy). now I'm getting generator object
your connect() methods should probably be coroutines:
class Account:
bridge = Bridge() # you probably want to put this in `def __init__(self)`!
#asyncio.coroutine
def connect(self):
result = yield from self.bridge.connect(self.id)
return result
class Bridge:
#asyncio.coroutine
def connect(self, account_id):
data = None
try:
response = yield from self.nc.timed_request("bank.account.connect",
BankRequest(
method="connect",
data={...}
), 10)
data = json.loads(response.data.decode())
except ErrTimeout:
status = Messages.REQUEST_TIMED_OUT
return Result(data=data)
and:
resp = yield from account.connect()
I'm trying to add a delay between requests in an asynchronous way.
When I use Tornado gen.sleep(x) my function (launch) doesn't get executed.
If I remove yield from yield gen.sleep(1.0), function is called, but no delay is added.
How to add delay between requests in my for loop? I need to control Request per second to external API.
If I use time.sleep the response is delayed after all requests are completed.
Tried to add #gen.engine decorator to launch function and no results.
Code:
import collections
import tornado.httpclient
class BacklogClient(object):
MAX_CONCURRENT_REQUESTS = 20
def __init__(self, ioloop):
self.ioloop = ioloop
self.client = tornado.httpclient.AsyncHTTPClient(max_clients=self.MAX_CONCURRENT_REQUESTS)
self.client.configure(None, defaults=dict(connect_timeout=20, request_timeout=30))
self.backlog = collections.deque()
self.concurrent_requests = 0
def __get_callback(self, function):
def wrapped(*args, **kwargs):
self.concurrent_requests -= 1
self.try_run_request()
return function(*args, **kwargs)
return wrapped
def try_run_request(self):
while self.backlog and self.concurrent_requests < self.MAX_CONCURRENT_REQUESTS:
request, callback = self.backlog.popleft()
self.client.fetch(request, callback=callback)
self.concurrent_requests += 1
def fetch(self, request, callback=None):
wrapped = self.__get_callback(callback)
self.backlog.append((request, wrapped))
self.try_run_request()
import time
from tornado import ioloop, httpclient, gen
class TornadoBacklog:
def __init__(self):
self.queue = 0
self.debug = 1
self.toProcess = [
'http://google.com',
'http://yahoo.com',
'http://nytimes.com',
'http://msn.com',
'http://cnn.com',
'http://twitter.com',
'http://facebook.com',
]
def handle_request(self, response):
print response.code
if not self.backlog.backlog and self.backlog.concurrent_requests == 0:
ioloop.IOLoop.instance().stop()
def launch(self):
self.ioloop = ioloop.IOLoop.current()
self.backlog = BacklogClient(self.ioloop)
for item in self.toProcess:
yield gen.sleep(1.0)
print item
self.backlog.fetch(
httpclient.HTTPRequest(
item,
method='GET',
headers=None,
),
self.handle_request
)
self.ioloop.start()
def main():
start_time = time.time()
scraper = TornadoBacklog()
scraper.launch()
elapsed_time = time.time() - start_time
print('Process took %f seconds processed %d items.' % (elapsed_time, len(scraper.toProcess)))
if __name__ == "__main__":
main()
Reference: https://github.com/tornadoweb/tornado/issues/1400
Tornado coroutines have two components:
They contain "yield" statements
They are decorated with "gen.coroutine"
Use the "coroutine" decorator on your "launch" function:
#gen.coroutine
def launch(self):
Run a Tornado coroutine from start to finish like this:
tornado.ioloop.IOLoop.current().run_sync(launch)
Remove the call to "ioloop.start" from your "launch" function: the loop runs the "launch" function, not vice-versa.
I'm developing a simple client-server application in python. I'm using a manager to set up shared queues, but I can't figure out how to pass an arbitrary object from the server to the client. I suspect it has something to do with the manager.register function, but it's not very well explained in the multiprocessing documentation. The only example there uses Queues and nothing else.
Here's my code:
#manager demo.py
from multiprocessing import Process, Queue, managers
from multiprocessing.managers import SyncManager
import time
class MyObject():
def __init__( self, p, f ):
self.parameter = p
self.processor_function = f
class MyServer():
def __init__(self, server_info, obj):
print '=== Launching Server ... ====='
(ip, port, pw) = server_info
self.object = obj #Parameters for task processing
#Define queues
self._process_queue = Queue() #Queue of tasks to be processed
self._results_queue = Queue() #Queue of processed tasks to be stored
#Set up IS_Manager class and register server functions
class IS_Manager(managers.BaseManager): pass
IS_Manager.register('get_processQ', callable=self.get_process_queue)
IS_Manager.register('get_resultsQ', callable=self.get_results_queue)
IS_Manager.register('get_object', callable=self.get_object)
#Initialize manager and server
self.manager = IS_Manager(address=(ip, port), authkey=pw)
self.server = self.manager.get_server()
self.server_process = Process( target=self.server.serve_forever )
self.server_process.start()
def get_process_queue(self): return self._process_queue
def get_results_queue(self): return self._results_queue
def get_object(self): return self.object
def runUntilDone(self, task_list):
#Fill the initial queue
for t in task_list:
self._process_queue.put(t)
#Main loop
total_tasks = len(task_list)
while not self._results_queue.qsize()==total_tasks:
time.sleep(.5)
print self._process_queue.qsize(), '\t', self._results_queue.qsize()
if not self._results_queue.empty():
print '\t', self._results_queue.get()
#Do stuff
pass
class MyClient():
def __init__(self, server_info):
(ip, port, pw) = server_info
print '=== Launching Client ... ====='
class IS_Manager(managers.BaseManager): pass
IS_Manager.register('get_processQ')
IS_Manager.register('get_resultsQ')
IS_Manager.register('get_object')
#Set up manager, pool
print '\tConnecting to server...'
manager = IS_Manager(address=(ip, port), authkey=pw)
manager.connect()
self._process_queue = manager.get_processQ()
self._results_queue = manager.get_resultsQ()
self.object = manager.get_object()
print '\tConnected.'
def runUntilDone(self):#, parameters):
print 'Starting client main loop...'
#Main loop
while 1:
if self._process_queue.empty():
print 'I\'m bored here!'
time.sleep(.5)
else:
task = self._process_queue.get()
print task, '\t', self.object.processor_function( task, self.object.parameter )
print 'Client process is quitting. Bye!'
self._clients_queue.get()
And a simple server...
from manager_demo import *
def myProcessor( x, parameter ):
return x + parameter
if __name__ == '__main__':
my_object = MyObject( 100, myProcessor )
my_task_list = range(1,20)
my_server_info = ('127.0.0.1', 8081, 'my_pw')
my_crawl_server = MyServer( my_server_info, my_object )
my_crawl_server.runUntilDone( my_task_list )
And a simple client...
from manager_demo import *
if __name__ == '__main__':
my_server_info = ('127.0.0.1', 8081, 'my_pw')
my_client = MyClient( my_server_info )
my_client.runUntilDone()
When I run this it crashes on:
erin#Erin:~/Desktop$ python client.py
=== Launching Client ... =====
Connecting to server...
Connected.
Starting client main loop...
2 Traceback (most recent call last):
File "client.py", line 5, in <module>
my_client.runUntilDone()
File "/home/erin/Desktop/manager_demo.py", line 84, in runUntilDone
print task, '\t', self.object.processor_function( task, self.object.parameter )
AttributeError: 'AutoProxy[get_object]' object has no attribute 'parameter'
Why does python have no trouble with Queues or the processor_function, but choke on the object parameter? Thanks!
You're encountering this issue because the parameter attribute on your MyObject() class is not a callable.
The documentation states that, _exposed_ is used to specify a sequence of method names which proxies for this typeid. In the case where no exposed list is specified, all “public methods” of the shared object will be accessible. (Here a “public method” means any attribute which has a __call__() method and whose name does not begin with '_'.)
So, you will need to manually expose the parameter attribute on MyObject, presumably, as a method, by changing your MyObject():
class MyObject():
def __init__(self, p, f):
self._parameter = p
self.processor_function = f
def parameter(self):
return self._parameter
Also, you will need to change your task to:
self.object.processor_function(task, self.object.parameter())
HTH.
I tried to get support on this but I am TOTALLY confused.
Here's my code:
from twisted.internet import reactor
from twisted.web.client import getPage
from twisted.web.error import Error
from twisted.internet.defer import DeferredList
from sys import argv
class GrabPage:
def __init__(self, page):
self.page = page
def start(self, *args):
if args == ():
# We apparently don't need authentication for this
d1 = getPage(self.page)
else:
if len(args) == 2:
# We have our login information
d1 = getPage(self.page, headers={"Authorization": " ".join(args)})
else:
raise Exception('Missing parameters')
d1.addCallback(self.pageCallback)
dl = DeferredList([d1])
d1.addErrback(self.errorHandler)
dl.addCallback(self.listCallback)
def errorHandler(self,result):
# Bad thingy!
pass
def pageCallback(self, result):
return result
def listCallback(self, result):
print result
a = GrabPage('http://www.google.com')
data = a.start() # Not the HTML
I wish to get the HTML out which is given to pageCallback when start() is called. This has been a pita for me. Ty! And sorry for my sucky coding.
You're missing the basics of how Twisted operates. It all revolves around the reactor, which you're never even running. Think of the reactor like this:
(source: krondo.com)
Until you start the reactor, by setting up deferreds all you're doing is chaining them with no events from which to fire.
I recommend you give the Twisted Intro by Dave Peticolas a read. It's quick and it really gives you all the missing information that the Twisted documentation doesn't.
Anyways, here is the most basic usage example of getPage as possible:
from twisted.web.client import getPage
from twisted.internet import reactor
url = 'http://aol.com'
def print_and_stop(output):
print output
if reactor.running:
reactor.stop()
if __name__ == '__main__':
print 'fetching', url
d = getPage(url)
d.addCallback(print_and_stop)
reactor.run()
Since getPage returns a deferred, I'm adding the callback print_and_stop to the deferred chain. After that, I start the reactor. The reactor fires getPage, which then fires print_and_stop which prints the data from aol.com and then stops the reactor.
Edit to show a working example of OP's code:
class GrabPage:
def __init__(self, page):
self.page = page
########### I added this:
self.data = None
def start(self, *args):
if args == ():
# We apparently don't need authentication for this
d1 = getPage(self.page)
else:
if len(args) == 2:
# We have our login information
d1 = getPage(self.page, headers={"Authorization": " ".join(args)})
else:
raise Exception('Missing parameters')
d1.addCallback(self.pageCallback)
dl = DeferredList([d1])
d1.addErrback(self.errorHandler)
dl.addCallback(self.listCallback)
def errorHandler(self,result):
# Bad thingy!
pass
def pageCallback(self, result):
########### I added this, to hold the data:
self.data = result
return result
def listCallback(self, result):
print result
# Added for effect:
if reactor.running:
reactor.stop()
a = GrabPage('http://google.com')
########### Just call it without assigning to data
#data = a.start() # Not the HTML
a.start()
########### I added this:
if not reactor.running:
reactor.run()
########### Reference the data attribute from the class
data = a.data
print '------REACTOR STOPPED------'
print
########### First 100 characters of a.data:
print '------a.data[:100]------'
print data[:100]
I want to create multiple threads and every one of them should create flask app.
I am not sure how to do it, but that's what I have:
app = Flask(__name__)
app.url_map.strict_slashes = False
#app.route('/api/v1/something/<string:FirstArgument>/<string:SecondArgument>/', methods=['POST'])
def do_it(FirstArgument, SecondArgument):
request_str = request.get_data().decode('utf-8').strip()
response = somefunction(mydata.state, request_str)
return response, 200
def run_app(this_port, mydata):
currentThread = threading.current_thread()
mydata.state = some_function_that_returns_6GB_of_data()
app.run(host='0.0.0.0',port=this_port)
if __name__ == '__main__':
mydata = threading.local()
thread1 = Thread(target=run_app, args=(4100, mydata,))
#thread2 = Thread(target=run_app, args=(4101,mydata,))
thread1.start()
#thread2.start()
For now I want to test only one thread. And I don't know how to pass mydata.state to the 'do_it'. If I add new argument (def do_it(FirstArgument, SecondArgument, mydata.state)) than Flask says that he wants to get this variable from the app.route. How can I pass this data to the do_it function?
And one more question. This program will pas N instances of state to N threads on N ports?
Or I should do something like this:
def do_it(FirstArgument, SecondArgument):
request_str = request.get_data().decode('utf-8').strip()
response = somefunction(mydata.state[threading.get_ident()], request_str)
return response, 200
def run_app(this_port, mydata):
currentThread = threading.current_thread()
mydata.state[threading.get_ident()] = some_function_that_returns_6GB_of_data()
app.run(host='0.0.0.0',port=this_port)