New to Tornado, and Redis
I find someone had this same problem here , tornado-redis: RPOP works but BRPOP doesn't?
but I still do not understand why, and how to resove my problem
code blow work fine
#coding:utf-8
import random
import time
import tornado.web
import tornado.httpserver
import tornado.ioloop
import tornado.options
from uuid import uuid4
# import redis
from tornado.escape import json_encode
import tornado.gen
import tornadoredis
class noticePush(tornado.web.RequestHandler):
def initialize(self):
print 'initialize'
#tornado.web.asynchronous
#tornado.gen.engine
def get(self):
print 'go here'
try:
**uid = self.get_argument('uid')
# key = u'test_comet%s'%uid
key = 'test_comet1'
c = tornadoredis.Client(host='127.0.0.1', port=6379,password='psw')
print key
res = yield tornado.gen.Task(c.blpop, key, 0)**
print res
if res :
self.finish(json_encode(res))
else :
self.finish('None')
except Exception, e :
print e
pass
class Application(tornado.web.Application):
def __init__(self):
handlers = [
(r'/', noticePush)
]
settings = {
'template_path': 'templates',
'static_path': 'static',
'debug': True
}
tornado.web.Application.__init__(self, handlers, **settings)
if __name__ == '__main__':
tornado.options.parse_command_line()
app = Application()
server = tornado.httpserver.HTTPServer(app)
server.listen(8000)
tornado.ioloop.IOLoop.instance().start()
But , I try to use get_argument for the key, blpop never return any data
**uid = self.get_argument('uid')
key = 'test_comet' + uid
c = tornadoredis.Client(host='127.0.0.1', port=6379, password='psw')
print key
res = yield tornado.gen.Task(c.blpop, key, 0)**
print res
if res :
self.finish(json_encode(res))
else :
self.finish('None')
I try to read the tornadoredis code, find the blpop def and I find the reason
def blpop(self, keys, timeout=0, callback=None):
tokens = to_list(keys)
tokens.append(timeout)
self.execute_command('BLPOP', *tokens, callback=callback)
def to_list(source):
if isinstance(source, str):
return [source]
else:
return list(source)
important is
str_key = 'test_comet', type (key) -> str
unicode_key = 'test_comet' + uid , type (key) -> unicode
when I encode the unicode_key.encode('utf-8'), code worked!
Related
Im trying to edit this project in Python to have HP ILO exporter for Prometheus, so far I read a few articles here on stackoverflow and tried to implement some functionalities, eventually I came up to partialy working script but the hostname is not changing after first request, is there a way to dump collector?
I have tried it with try&except but it just does not work.
The goal is to use curl like this
curl localhost:9116/metrics?hostname=ip
And what will happen if there will be 10 requests at the same time with different hostname? Should it create somekind of a queue?
Can someone help me? Thanks
Original Project : https://github.com/JackWindows/ilo-exporter
My code :
#!/usr/bin/env python
import collections
import os
import time
import flask
import redfish
import waitress
from flask import Flask
from prometheus_client import make_wsgi_app
from prometheus_client.core import GaugeMetricFamily, REGISTRY
from werkzeug.middleware.dispatcher import DispatcherMiddleware
from flask import request
from time import sleep
from flask import Flask, Response, request
import traceback
from werkzeug.wsgi import ClosingIterator
class AfterResponse:
def __init__(self, app=None):
self.callbacks = []
if app:
self.init_app(app)
def __call__(self, callback):
self.callbacks.append(callback)
return callback
def init_app(self, app):
# install extension
app.after_response = self
# install middleware
app.wsgi_app = AfterResponseMiddleware(app.wsgi_app, self)
def flush(self):
for fn in self.callbacks:
try:
fn()
except Exception:
traceback.print_exc()
class AfterResponseMiddleware:
def __init__(self, application, after_response_ext):
self.application = application
self.after_response_ext = after_response_ext
def __call__(self, environ, after_response):
iterator = self.application(environ, after_response)
try:
return ClosingIterator(iterator, [self.after_response_ext.flush])
except Exception:
traceback.print_exc()
return iterator
class ILOCollector(object):
def __init__(self, hostname: str, port: int = 443, user: str = 'admin', password: str = 'password') -> None:
self.ilo = redfish.LegacyRestClient(base_url=hostname, username=user, password=password)
self.ilo.login()
system = self.ilo.get('/redfish/v1/Systems/1/').obj
self.label_names = ('hostname', 'product_name', 'sn')
self.label_values = (hostname, system.Model, system.SerialNumber.strip())
def collect(self):
embedded_media = self.ilo.get('/redfish/v1/Managers/1/EmbeddedMedia/').obj
smart_storage = self.ilo.get('/redfish/v1/Systems/1/SmartStorage/').obj
thermal = self.ilo.get('/redfish/v1/Chassis/1/Thermal/').obj
power = self.ilo.get('/redfish/v1/Chassis/1/Power/').obj
g = GaugeMetricFamily('hpilo_health',
'iLO health status, -1: Unknown, 0: OK, 1: Degraded, 2: Failed.',
labels=self.label_names + ('component',))
def status_to_code(status: str) -> int:
status = status.lower()
ret = -1
if status == 'ok':
ret = 0
elif status == 'warning':
ret = 1
elif status == 'failed':
ret = 2
return ret
g.add_metric(self.label_values + ('embedded_media',), status_to_code(embedded_media.Controller.Status.Health))
g.add_metric(self.label_values + ('smart_storage',), status_to_code(smart_storage.Status.Health))
for fan in thermal.Fans:
g.add_metric(self.label_values + (fan.FanName,), status_to_code(fan.Status.Health))
yield g
g = GaugeMetricFamily('hpilo_fan_speed', 'Fan speed in percentage.',
labels=self.label_names + ('fan',), unit='percentage')
for fan in thermal.Fans:
g.add_metric(self.label_values + (fan.FanName,), fan.CurrentReading)
yield g
sensors_by_unit = collections.defaultdict(list)
for sensor in thermal.Temperatures:
if sensor.Status.State.lower() != 'enabled':
continue
reading = sensor.CurrentReading
unit = sensor.Units
sensors_by_unit[unit].append((sensor.Name, reading))
for unit in sensors_by_unit:
g = GaugeMetricFamily('hpilo_temperature', 'Temperature sensors reading.',
labels=self.label_names + ('sensor',), unit=unit.lower())
for sensor_name, sensor_reading in sensors_by_unit[unit]:
g.add_metric(self.label_values + (sensor_name,), sensor_reading)
yield g
g = GaugeMetricFamily('hpilo_power_current', 'Current power consumption in Watts.', labels=self.label_names,
unit='watts')
g.add_metric(self.label_values, power.PowerConsumedWatts)
yield g
label_values = self.label_values + (str(power.PowerMetrics.IntervalInMin),)
g = GaugeMetricFamily('hpilo_power_average', 'Average power consumption in Watts.',
labels=self.label_names + ('IntervalInMin',), unit='watts')
g.add_metric(label_values, power.PowerMetrics.AverageConsumedWatts)
yield g
g = GaugeMetricFamily('hpilo_power_min', 'Min power consumption in Watts.',
labels=self.label_names + ('IntervalInMin',), unit='watts')
g.add_metric(label_values, power.PowerMetrics.MinConsumedWatts)
yield g
g = GaugeMetricFamily('hpilo_power_max', 'Max power consumption in Watts.',
labels=self.label_names + ('IntervalInMin',), unit='watts')
g.add_metric(label_values, power.PowerMetrics.MaxConsumedWatts)
yield g
# Create Flask app
app = Flask('iLO Exporter')
#app.route('/')
def root():
return '''<html>
<head><title>iLO Exporter</title></head>
<body>
<h1>iLO Exporter</h1>
<p><a href='/metrics'>Metrics</a></p>
</body>
</html>'''
AfterResponse(app)
#app.after_response
def say_hi():
print("hi")
#app.route("/metrics")
def home():
try:
REGISTRY.unregister(collector)
except:
print("An exception occurred")
pass
port = int(os.getenv('ILO_PORT', 443))
user = os.getenv('ILO_USER', 'admin')
password = os.getenv('ILO_PASSWORD', 'password')
hostname = request.args.get('hostname')
app.wsgi_app = DispatcherMiddleware(app.wsgi_app, {
'/metrics': make_wsgi_app()
})
collector = ILOCollector(hostname, port, user, password)
REGISTRY.register(collector)
if __name__ == '__main__':
exporter_port = int(os.getenv('LISTEN_PORT', 9116))
waitress.serve(app, host='0.0.0.0', port=exporter_port)
I am trying to develop a websocket server with Python and tornado. This websocket server streams a large database result to the client for some visualization.
The problem that I am facing is that no client can connect until the long process (send_data) is finished. It is as if only one client can connect at a time.
Is websocket already an async process or should I implement an async process?
The following is my code:
import time
import random
import json
import datetime
import os
import sys
import cx_Oracle
import string
import re
import subprocess
import asyncio
from tornado import websocket, web, ioloop, escape
from datetime import timedelta
from random import randint
from pprint import pprint
from tornado.web import RequestHandler
os.environ['ORACLE_HOME'] = 'pathToOracleHome'
os.environ['LD_LIBRARY_PATH'] = "$ORACLE_HOME/lib"
def is_hex(a):
printable = set(string.printable) - set("\x0b\x0c")
return any(c not in printable for c in a)
def json_print(d):
print(json.dumps(d, indent=4))
def printf (format,*args):
sys.stdout.write (format % args)
def db(database_name='localhost/database'):
return cx_Oracle.connect('user', 'pwd', database_name)
def query_db(query, args=(), one=False):
cur = db().cursor()
cur.arraysize = 1500
cur.execute(query, args)
return cur
class SummaryWebSocketHandler(websocket.WebSocketHandler):
clients = []
def check_origin(self, origin):
return True
def on_message(self, message):
print ('message received')
def closeDbConn(self,cur):
cur.connection.close()
def query(self, sql):
cursor = query_db(sql)
self.send_data(cursor)
### THIS IS THE LONG PROCESS ###
def send_data(self, cur):
results = {}
columns = [column[0] for column in cur.description]
total = 0
while True:
Res = []
rows = cur.fetchmany()
if rows == []:
print('no more rows')
break;
for row in rows:
results = {}
for i, value in enumerate(row):
if value == None:
value = '-'
results[cur.description[i][0]] = value
Res.append(results)
self.write_message(json.dumps(Res))
total = total + len(rows)
print('total rows send', total)
self.write_message("finished sending all data")
self.on_close(cur)
def open(self, table):
print ('Connection established. \n')
print ('Query string '+table+'\n')
p = re.compile(r'fields=')
m = p.match(table)
matches = table.split("&")
print (matches)
param_string = ''
params = []
if matches:
for m in matches:
print('m', m);
param = ''
items = m.split('=')
if items[1] != '':
param = '--'+items[0]+' '+items[1]
params.append(param)
param_string = " ".join(params)
script = "php getStmt.php "+param_string
print (script)
proc = subprocess.Popen(script, shell=True,stdout=subprocess.PIPE)
sql = proc.stdout.read()
print (sql)
self.query(sql)
def on_close(self, cursor):
print ('Connection closed.')
cursor.close()
settings = {'auto_reload': True, 'debug': True}
if __name__ == "__main__":
print ("Starting websocket server program. Awaiting client requests to open websocket ...")
application = web.Application([(r"/\/table\/(.*)",SummaryWebSocketHandler),
]
,**settings)
application.listen(3001)
ioloop.IOLoop.instance().start()
I am implementing stomp consumer as a library. By calling this library in other application i should be able to get the data in ActiveMQ. I am implementing it as below, but I have a problem in returning the frame.body. I am not able to retrieve the data from outside the class.
from twisted.internet import defer
from stompest.async import Stomp
from stompest.async.listener import SubscriptionListener
from stompest.config import StompConfig
from socket import gethostname
from uuid import uuid1
import json
class Consumer(object):
def __init__(self, amq_uri):
self.amq_uri = amq_uri
self.hostname = gethostname()
self.config = StompConfig(uri=self.amq_uri)
#defer.inlineCallbacks
def run(self, in_queue):
client = yield Stomp(self.config)
headers = {
StompSpec.ACK_HEADER: StompSpec.ACK_CLIENT_INDIVIDUAL,
StompSpec.ID_HEADER: self.hostname,
'activemq.prefetchSize': '1000',
}
yield client.connect(headers=self._return_client_id())
client.subscribe(
in_queue,
headers,
listener=SubscriptionListener(self.consume)
)
try:
client = yield client.disconnected
except StompConnectionError:
yield client.connect(headers=self._return_client_id())
client.subscribe(
in_queue,
headers,
listener=SubscriptionListener(self.consume)
)
while True:
try:
yield client.disconnected
except StompProtocolError:
pass
except StompConnectionError:
yield client.connect(headers=self._return_client_id())
client.subscribe(
in_queue,
headers,
listener=SubscriptionListener(self.consume)
)
def _return_client_id(self):
client_id = {}
client_id['client-id'] = gethostname() + '-' + str(uuid1())
return client_id
def consume(self, client, frame):
data = json.loads(frame.body)
print 'Received Message Type {}'.format(type(data))
print 'Received Message {}'.format(data)
## I want to return data here. I am able to print the frame.body here.
# Call from another application
import Queue
from twisted.internet import reactor
amq_uri = 'tcp://localhost:61613'
in_queue = '/queue/test_queue'
c = Consumer(amq_uri)
c.run(in_queue)
print "data is from outside function", data # Should be able to get the data which is returned by consume here
reactor.run()
Can someone please let me know how can i achieve this.
Thanks
I found a solution to my problem. Instead of using async stomp library, i used sync stomp library. Implemented it as below,
class Consumer(object):
def __init__(self, amq_uri):
self.amq_uri = amq_uri
self.hostname = gethostname()
self.config = StompConfig(uri=self.amq_uri)
def run(self, in_queue, return_dict):
client = Stomp(self.config)
headers = {
StompSpec.ACK_HEADER: StompSpec.ACK_CLIENT_INDIVIDUAL,
StompSpec.ID_HEADER: self.hostname
}
client.connect()
client.subscribe(in_queue, headers)
try:
frame = client.receiveFrame()
data = json.dumps(frame.body)
except Exception as exc:
print exc
client.ack(frame)
client.disconnect()
return_dict['data'] = data
return data
#!/usr/bin/python27
import tornado
import tornado.websocket
import tornado.wsgi
import motor
import json
import jsonpickle
import uuid
from py2neo import neo4j, cypher
from tornado.ioloop import IOLoop
class MainHandler(tornado.web.RequestHandler):
def get(self):
db = self.settings['db']
Class ChatWebSocket(tornado.websocket.WebSocketHandler):
ids={}
frd={}
def my_callback(result, error):
print 'result', repr(result)
IOLoop.instance().stop()
def on_message(self, message):
def get_current_user(self):
print(self)
to=db.today
data=json.loads(message)
print(data)
s=str(data['who'])
if (data['st']=='me'):
if ''+s+'' in ChatWebSocket.ids:
del ChatWebSocket.ids[''+s+'']
ChatWebSocket.ids[''+s+'']=self
print("sf")
else:
ChatWebSocket.ids[''+s+'']=self
print(ChatWebSocket.ids)
elif (data['st']=='st'):
graph_db = neo4j.GraphDatabaseService()
query = "START a=node:node_auto_index(uid='"+data['who']+"') MATCH a-[:frd]-q RETURN q.uid as id"
data1, metadata = cypher.execute(graph_db, query)
print(data1)
print(data)
for y in data1:
for j in y:
if ''+str(j)+'' in ChatWebSocket.ids:
ChatWebSocket.ids[''+str(j)+''].write_message({"s":"st","id":data['id'],"con":data['con'],"time":data['time'],"on":data['on']})
else:
print("shit")
elif(data['st']=='com'):
graph_db = neo4j.GraphDatabaseService()
query = "START a=node:node_auto_index(uid='"+data['who']+"') MATCH a-[:frd]-q RETURN q.uid as id"
data1, metadata = cypher.execute(graph_db, query)
print(data1)
print(data)
for y in data1:
for j in y:
if ''+str(j)+'' in ChatWebSocket.ids:
ChatWebSocket.ids[''+str(j)+''].write_message({"con":data['con'],"id":data['id'],"s":"com"})
else:
print("shit")
elif(data['st']=='cl'):
print("cl")
if ''+s+'' in ChatWebSocket.ids:
del ChatWebSocket.ids[''+s+'']
print(ChatWebSocket.ids)
print("closed")
elif(data['st']=='ty'):
s=str(data['whom'])
if ChatWebSocket.ids[''+s+''].write_message({"g":str(data['who']),"s":"ty"}):
print("ok")
else:
doc={'who':data['who'],'whom':data['whom'],'mes':data['mes'],'id':data['who']+data['whom']}
to.insert(doc)
s=str(data['whom'])
if ChatWebSocket.ids[''+s+''].write_message({"g":str(data['who']),"mes":data['mes'],"s":"m"}):
print("ok")
db=motor.MotorClient().open_sync().chat tornado_app = tornado.web.Application([(r'/websocket', ChatWebSocket),
(r'.*', tornado.web.FallbackHandler), (r'/', MainHandler)],db=db)
tornado_app.listen(5528)
tornado.ioloop.IOLoop.instance().start()
here is what i am doing:when websocket opens clients sends his uniqueid which python receives onmessage and create a dict with {uniqueid:websocketobject} when somebody send the message it sends the unique id the python get the websocket object as
websocket object=dict{uniqueid} it then sends it to appropiate clients but sometimes it does not work and says none type attribute,even if i have saved the websocket object in that.
I am getting the following error after a few hours of successful running.
Traceback (most recent call last):
File "/usr/lib/python2.6/threading.py", line 484, in run
self.__target(*self.__args, **self.__kwargs)
File "/usr/lib/python2.6/dist-packages/twisted/python/threadpool.py", line 210, in _worker
result = context.call(ctx, function, *args, **kwargs)
File "/usr/lib/python2.6/dist-packages/twisted/python/context.py", line 59, in callWithContext
return self.currentContext().callWithContext(ctx, func, *args, **kw)
File "/usr/lib/python2.6/dist-packages/twisted/python/context.py", line 37, in callWithContext
return func(*args,**kw)
--- <exception caught here> ---
File "/usr/lib/python2.6/dist-packages/twisted/enterprise/adbapi.py", line 436, in _runInteraction
conn.rollback()
File "/usr/lib/python2.6/dist-packages/twisted/enterprise/adbapi.py", line 52, in rollback
self._connection.rollback()
_mysql_exceptions.OperationalError: (2006, 'MySQL server has gone away')
My code is something like this...
from twisted.internet import reactor, defer,threads
from twisted.enterprise import adbapi
dbpool = adbapi.ConnectionPool("MySQLdb", '192.168.1.102','test', 'test', 'test')
class Scanner:
def _execQuery(self,txn):
sql="SELECT tool_id,tool_name FROM tool_master"
txn.execute(sql)
result = txn.fetchall()
return result
def objCursor(self):
return dbpool.runInteraction(self._execQuery)
def printResult(self,result):
print "resssssssssssssssssss",result
reactor.callLater(3,self.deferExecute)
def deferExecute(self):
self.objCursor().addCallback(self.printResult)
Scanner()
class MyApp(object):
reactor.callInThread(Scanner().deferExecute)
reactor.run()
MyApp()
Can anyone tell me why I am getting this error?
can anyone tell me why I am getting this error.. because you're doing it wrong.
runInteraction runs the supplied function with an argument of a cursor to a transaction which is run in a thread. You shouldn't be calling reactor.callInThread(Scanner().deferExecute).
It's better to use a twisted.internet.task.LoopingCall, it will make sure that the call completes before the next is fired.
You're just running a query in your example, so you could just use ConnectionPool.runQuery instead of ConnectionPool.runInteraction.
Use errorBack functions to report on Exceptions.
Attempting to correct for your badly formatted code, I think you've got this:
from twisted.internet import reactor, defer,threads
from twisted.enterprise import adbapi
dbpool = adbapi.ConnectionPool("MySQLdb", '192.168.1.102','test', 'test', 'test')
class Scanner:
def _execQuery(self,txn):
sql="SELECT tool_id,tool_name FROM tool_master"
txn.execute(sql)
result = txn.fetchall()
return result
def objCursor(self):
return dbpool.runInteraction(self._execQuery)
def printResult(self,result):
print "resssssssssssssssssss",result
reactor.callLater(3,self.deferExecute)
def deferExecute(self):
self.objCursor().addCallback(self.printResult)
Scanner()
class MyApp(object):
reactor.callInThread(Scanner().deferExecute)
reactor.run()
MyApp()
When you probably need something like the following instead. If you're planning on writing a twisted Application will be easy to modify this Scanner class to inherit from twisted.application.service.Service.
from twisted.internet import reactor, defer, task
from twisted.enterprise import adbapi
class Scanner(object):
def __init__(self,dbpool=None):
self.dbpool = dbpool
self.loopCall = task.LoopingCall(self.myQuery)
def start(self):
print "Started scanner"
self.loopCall.start(3)
def stop(self):
print "Stopping scanner"
self.loopCall.stop()
def myQuery(self):
def interact(txn):
sql="SELECT tool_id,tool_name FROM tool_master"
txn.execute(sql)
return txn.fetchall()
d = self.dbpool.runInteraction(interact)
d.addCallbacks(self.printResult,self.printError)
def printResult(self,result):
print "Got Result: %r" % result
def printError(self,error):
print "Got Error: %r" % error
error.printTraceback()
if __name__ == '__main__':
from twisted.internet import reactor
dbpool = adbapi.ConnectionPool("MySQLdb", '192.168.1.102','test', 'test', 'test')
s = Scanner(dbpool)
reactor.callWhenRunning(s.start)
reactor.addSystemEventTrigger('before','shutdown',s.stop)
reactor.run()
After all the suggestion & help by Matt I have following code which is running successfully:
#!usr/bin/python
# Using the "dbmodule" from the previous example, create a ConnectionPool
from twisted.internet import reactor
from twisted.enterprise import adbapi
from twisted.internet import reactor, defer,threads
from twisted.python.threadpool import ThreadPool
import itertools
from twisted.internet.threads import deferToThread
from twisted.internet import reactor, defer, task
from tools.printTime import *
from tools.getVersion import *
from sh_log import *
concurrent = 30
finished=itertools.count(1)
reactor.suggestThreadPoolSize(concurrent)
#Creating Global Instance variables
path="tools"
lo=Log()
class ToolsBuilder:
def build(self,txn,tool,asset_id):
if tool:
print "\n"
try:
sql="select tool_filename from tool_master where tool_id = %s" %(tool,)
sql_asset="select asset_url from asset_master where asset_id = %s" %(asset_id,)
txn.execute(sql_asset)
asset_url = txn.fetchall()
log_date=lo.log_date()
txn.execute(sql)
result = txn.fetchall()
log='\n'+log_date+"::"+str(result[0][0])+ " tool object is created......\n"
lo.wfile(log)
temp=(path +'/' + str(result[0][0]))
if result:
if temp:
f=open(temp).read()
obj_tool=compile(f, 'a_filename', 'exec')
return obj_tool
except:
lo.wfile("Error in creating executable tool object......")
tb=ToolsBuilder()
class ToolsVectorGenerator:
def generate(self,txn,res_set={}):
v1=[]
for asset_id in res_set.iterkeys():
try:
obj_tools=[]
if asset_id:
print "asset_id..............................",asset_id
log_date=lo.log_date()
log=log_date+"::"+" \nVector generation for the asset number...:"+str(asset_id)
lo.wfile(log)
vector=[]
tools_arr=[]
obj_tools=[]
for tool in res_set[asset_id]:
if tool:
print "tool..............",tool
temp_tool=tb.build(txn,tool,asset_id)
print "temp_tool..........",temp_tool
#fetch data of tool setting.....
sql_tool_setting="select * from tool_asset_settings where tool_id =%s" %(tool,)
txn.execute(sql_tool_setting)
result_tool_setting = txn.fetchall()
tool_id=result_tool_setting[0][1]
t_id=int(tool_id)
tool_id_arr=[]
tool_id_arr.append(t_id)
tool_id_arr.append(result_tool_setting)
tool_id_arr.append(temp_tool)
tools_arr.append(tool_id_arr)
#fetch data from asset master
sql_asset="select asset_name from asset_master where asset_id=%s" %(asset_id,)
txn.execute(sql_asset)
result_asset = txn.fetchall()
vector.append(result_asset)
vector.append(tools_arr)
except:
lo.wfile("\nError in getting asset,please check your database or network connection......")
tvm.executeVector(vector)
tvg=ToolsVectorGenerator()
class Tool:
def exectool(self,tool):
exec tool
return
def getResult(self,tool):
return deferToThread(self.exectool, tool)
to=Tool()
class StateMachine:
def setPriority(self,txn,tup):
temp=[]
arr=[]
for li in tup:
sql2="select tool_dependency from tool_asset_settings where tool_id =%s" %(li[1],)
txn.execute(sql2)
result12 = txn.fetchall()
arr=[]
if result12[0][0]!=None:
tup12=result12[0][0]
arr=(li[0],tup12)
# print "arr.........",arr
if arr in tup:
print "This element is already exist......."
else:
temp.append(arr)
temp.extend(tup)
return tuple(temp)
st=StateMachine()
class ToolsVectorExecutionManager(object):
def executeVector(self,toolsvector):
print "toolsvector================>",toolsvector
if toolsvector:
for tools in toolsvector[1]:
if tools[2] != None:
to.getResult(tools[2])
tvm=ToolsVectorExecutionManager()
class ToolsToExecuteAnalyzer:
def __init__(self,dbpool=None):
self.dbpool = dbpool
self.loopCall = task.LoopingCall(self.myQuery)
def start(self):
print "Started scanner"
self.loopCall.start(3)
def stop(self):
print "Stopping scanner"
self.loopCall.stop()
def myQuery(self):
def interact(txn):
sql="SELECT tool_asset_id,tool_execute_id FROM tool_to_execute where status='0'"
txn.execute(sql)
result=txn.fetchall()
if result:
tool_asset_id=tuple([int(e[0]) for e in result])
tool_execute_id=tuple([int(e[1]) for e in result])
if len(tool_asset_id)>1:
sql1="SELECT asset_id,tool_id FROM tool_in_assets WHERE tool_asset_id IN %s"%(tool_asset_id,)
else:
sql1="SELECT asset_id,tool_id FROM tool_in_assets WHERE tool_asset_id = (%s)"%(tool_asset_id)
txn.execute(sql1)
tup = txn.fetchall()
#dependency check for the selected tool
asset_tool=st.setPriority(txn,tup)
log_date=lo.log_date()
log=log_date+"::priority have been set for the tools......\n"
lo.wfile(log)
#creating group of asset with their tools
res={}
for element in asset_tool:
if element[0] in res:
res[element[0]].append(int(element[1]))
else:
res[int(element[0])] = [int(element[1])]
#Recored deletion from tool_to_execute table
if res!=None and res.keys()!=[]:
for asset_id in res.iterkeys():
if len(tool_execute_id)>1:
sql_del="delete from tool_to_execute where tool_execute_id in %s " %(tool_execute_id,)
else:
sql_del="delete from tool_to_execute where tool_execute_id = %s" %(tool_execute_id)
txn.execute(sql_del)
#New Addition of vector
tvg.generate(txn,res)
# return res
d = self.dbpool.runInteraction(interact)
d.addCallbacks(self.printResult,self.printError)
def printResult(self,res):
print "In printResult after generate...."
def printError(self,error):
print "Got Error: %r" % error
error.printTraceback()
ToolsToExecuteAnalyzer()
if __name__ == '__main__':
from twisted.internet import reactor
dbpool = adbapi.ConnectionPool("MySQLdb", 'localhost', 'test', 'test','test')
s = ToolsToExecuteAnalyzer(dbpool)
reactor.callWhenRunning(s.start)
reactor.addSystemEventTrigger('before','shutdown',s.stop)
reactor.run()
This is my whole code, I just wanted to know how many threads running, means for each tool new thread?
Anyway, thanks Matt for your help..:)
You may also want to take a look at this snippet which provides a ConnectionPool subclass that reconnects on "MySQL server has gone away".
http://www.gelens.org/2009/09/13/twisted-connectionpool-revisited/