string.lower() in contents.lower() not working correctly - python

My function looks up WHOIS requests using sockets and proxies.
Sometimes there is an issue with a proxy and it returns no data so I check to see if the data contains the initial domain request and if it does, return the result.
However sometimes this returns TRUE even when there's nothing in the string data.
I have also tried to do len(data) > 25, etc but for some reason it can still return true.
if domain.lower() in data.lower():
obj = WhoisEntry(domain, data)
logger.debug('WHOIS success ' + domain + ': ' + data)
return {
'expiration_date': str(obj.expiration_date),
'status': str(obj.status),
'registrar': str(obj.registrar)
}
Full code
def whois_tcp(domain):
whois_servers = [ 'whois.verisign-grs.com', 'whois.internic.net', 'whois.crsnic.net' ]
attempts = 0
while attempts < 15:
attempts = attempts + 1
logger.debug('WHOIS attempt '+domain+': '+str(attempts))
whois_host = random.choice(whois_servers)
proxy = random.choice(proxies) # global variable from config.py
proxy = proxy.split(':')
try:
s = socks.socksocket()
s.setproxy(socks.PROXY_TYPE_SOCKS5, proxy[0], int(proxy[1]))
s.connect((whois_host, 43))
s.send(domain + '\n\r\n')
data = ''
buf = s.recv(1024)
while len(buf):
data += buf
buf = s.recv(1024)
s.close()
#if domain.lower() not in data.lower():
# raise Exception(domain, 'Domain not found in WHOIS: '+data)
# continue
if domain.lower() in data.lower():
obj = WhoisEntry(domain, data)
logger.debug('WHOIS success ' + domain + ': ' + data)
return {
'expiration_date': str(obj.expiration_date),
'status': str(obj.status),
'registrar': str(obj.registrar)
}
except Exception, e:
logger.error('WHOIS Lookup Failed: '+str(e))
return None
What am I doing wrong?

1.Try:
if str(domain).lower() in str(data).lower():
...
2.Check whether the value of domain variable is None or ''.

I added code to write each whois response to file and noticed that it was in fact including whois results
file = open('./whois_records/'+domain, 'w')
file.write(data)
file.close()
Not sure why this line of code wasn't outputting the data string in the log file:
logger.debug('WHOIS success ' + domain + ': ' + str(data))
I have found that different whois servers are giving me different results for lookups.
For example: bpi-group.com
1. whois.verisign-grs.com: **FREE**
2. whois.gandi.net: **TAKEN**
Additionally I need to use different whois server for different TLDs (com/net/org/info/biz)
Currently now using:
- tld.whois-servers.net
Code is still not perfect but is working better and original problem has been solved:
def whois_tcp(domain):
ext = tldextract.extract(domain)
if ext.suffix == 'org':
whois_servers = [ 'org.whois-servers.net' ]
elif ext.suffix == 'biz':
whois_servers = [ 'biz.whois-servers.net' ]
elif ext.suffix == 'info':
whois_servers = [ 'info.whois-servers.net' ]
elif ext.suffix == 'com':
whois_servers = [ 'com.whois-servers.net' ]
elif ext.suffix == 'net':
whois_servers = [ 'net.whois-servers.net' ]
else:
whois_servers = [ 'whois.verisign-grs.com', 'whois.internic.net', 'whois.crsnic.net' ]
attempts = 0
result = None
while attempts < 15:
attempts = attempts + 1
logger.debug('WHOIS attempt '+domain+': '+str(attempts))
whois_host = random.choice(whois_servers)
proxy = random.choice(proxies) # global variable from config.py
proxy = proxy.split(':')
try:
s = socks.socksocket()
s.setproxy(socks.PROXY_TYPE_SOCKS5, proxy[0], int(proxy[1]))
s.connect((whois_host, 43))
s.send(domain + '\n\r\n')
data = ''
buf = s.recv(1024)
while len(buf):
data += buf
buf = s.recv(1024)
s.close()
file = open('./whois_records/'+domain, 'w')
file.write(data)
file.close()
# if domain.lower() not in data.lower():
# raise Exception(domain, 'Domain not found in WHOIS: '+data)
# continue
if str(domain).lower() in str(data).lower():
obj = WhoisEntry.load(domain, data)
logger.debug('WHOIS success ' + domain + ': ' + str(data))
return {
'expiration_date': str(obj.expiration_date),
'status': str(obj.status),
'registrar': str(obj.registrar)
}
except Exception, e:
logger.error('WHOIS Lookup Failed: '+str(e))
return None

Related

Python 3.6 Lambda code error for os.environ variable

try :
sf = Salesforce(username = sfdc_username,
password = sfdc_password,
security_token = sfdc_security_token,
instance_url = sfdc_salesforce_instance_url,
domain = sfdc_sandbox)
print('salesforce login good')
except (SalesforceGeneralError,
SalesforceMoreThanOneRecord,
SalesforceMalformedRequest,
SalesforceExpiredSession,
SalesforceRefusedRequest,
SalesforceResourceNotFound) as e :
print(e.content[0]['message'])
sys.exit(1)
this portion of code on lambda is failing with the error:
a bytes-like object is required, not 'str': TypeError
Traceback (most recent call last):
File "/var/task/sfdc_etl/bin/sfdc_etl.py", line 80, in lambda_handler
domain = sfdc_sandbox)
File "/var/task/sfdc_etl/lib/python3.6/site-packages/simple_salesforce/api.py", line 146, in __init__
domain=self.domain)
File "/var/task/sfdc_etl/lib/python3.6/site-packages/simple_salesforce/login.py", line 80, in SalesforceLogin
username = escape(username)
File "/var/lang/lib/python3.6/html/__init__.py", line 19, in escape
s = s.replace("&", "&") # Must be done first!
TypeError: a bytes-like object is required, not 'str'
When I move this code to my test environment on an EC2 amazon linux and set the sfdc_sandox to 'test' in line, it works with no issues. I tried using os.environb["L_SFDC_SANDBOX"] and os.environ["L_SFDC_SANDBOX"].encode('utf8'), but that also did not help as it gave the same error. How do I fix the type error when I pull in this variable in Lambda?
Here is the entire script, maybe the error isn't because of that specific piece of code even though it seems like it is.
import os
import sys
# this adds the parent directory of bin so we can find the module
parent_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
sys.path.append(parent_dir)
#This addes venv lib/python2.7/site-packages/ to the search path
mod_path = os.path.abspath(parent_dir+"/lib/python"+str(sys.version_info[0])+"."+str(sys.version_info[1])+"/site-packages/")
sys.path.append(mod_path)
from awsmgr.awsmgr import S3Helper
from base64 import b64decode
import boto3
from collections import OrderedDict
import datetime
from dateutil.parser import parse
import logging
import json
import math
import pandas as pd
from simple_salesforce import Salesforce, SalesforceLogin
from simple_salesforce.exceptions import SalesforceGeneralError, SalesforceMoreThanOneRecord, SalesforceMalformedRequest, SalesforceExpiredSession, SalesforceRefusedRequest, SalesforceResourceNotFound
from sqlalchemy import create_engine
from sqlalchemy import exc
current_path = os.path.dirname(os.path.realpath(__file__))
# Use this one for the parent directory
ENV_ROOT = os.path.abspath(os.path.join(current_path, os.path.pardir))
# Use this one for the current directory
#ENV_ROOT = os.path.abspath(os.path.join(current_path))
sys.path.append(ENV_ROOT)
def lambda_handler(event, context):
###############################
# Global Variable Definitions #
###############################
d_parse = parse
TMP_PATH = '/tmp'
igersUser = 'admin'
igersPwd = boto3.client('kms').decrypt(CiphertextBlob=b64decode(os.environ["RS_PASSWORD"]))['Plaintext']
igersHost = os.environ["RS_HOST"]
igers = create_engine('postgres://{}:{}#{}/ibdrs'.format(igersUser, igersPwd, igersHost), encoding="utf-8")
igersSchema = os.environ["RS_SCHEMA"]
s3 = S3Helper(debug=os.environ["DEBUG"])
nextObjFile = s3.get_s3_file('s3://test-sfdc-sds-team/sfdc-etl-jp-test/sfdc_etl/objects/next_object.txt',os.path.abspath(os.path.join(TMP_PATH,'next_object.txt')))
s3Destination = 's3://test-sfdc-sds-team/sfdc-etl-jp-test/sfdc_etl/json/'
s3Path = '{}_json'
s3NextObjDestination = 's3://test-sfdc-sds-team/sfdc-etl-jp-test/sfdc_etl/objects/{}'
fileCount = 1
sfdc_username = os.environ["L_USERNAME"].encode('utf8')
sfdc_salesforce_instance_url = os.environ["L_SALESFORCE_INSTANCE_URL"].encode('utf8')
sfdc_password = boto3.client('kms').decrypt(CiphertextBlob=b64decode(os.environ["L_PASSWORD"]))['Plaintext']
sfdc_security_token = boto3.client('kms').decrypt(CiphertextBlob=b64decode(os.environ["L_SECURITY_TOKEN"]))['Plaintext']
sfdc_sandbox = os.environ["L_SFDC_SANDBOX"].encode('utf8')
print(type(sfdc_username), type(sfdc_password), type(sfdc_security_token), type(sfdc_salesforce_instance_url), type(sfdc_sandbox))
try :
sf = Salesforce(username = sfdc_username,
password = sfdc_password,
security_token = sfdc_security_token,
instance_url = sfdc_salesforce_instance_url,
domain = sfdc_sandbox)
print('salesforce login good')
except (SalesforceGeneralError,
SalesforceMoreThanOneRecord,
SalesforceMalformedRequest,
SalesforceExpiredSession,
SalesforceRefusedRequest,
SalesforceResourceNotFound) as e :
print(e.content[0]['message'])
sys.exit(1)
# get nextobj from s3
with open(nextObjFile, 'r') as f :
nextObjItem = f.read().strip().lower()
nextObj = nextObjItem.lower()
print('Processing {}'.format(nextObj))
######################################################
# get rs table group permissions, store in dataframe #
######################################################
def rsGetGroupPerms(igers, nextObj) :
global groupPerms
groupPerms = {}
existingGroupPerms = '''
SELECT
namespace, item, type, groname
FROM
(
SELECT
use.usename AS subject,
nsp.nspname AS NAMESPACE,
cls.relname AS item,
cls.relkind AS TYPE,
use2.usename AS OWNER,
cls.relacl
FROM
pg_user use
CROSS JOIN pg_class cls
LEFT JOIN pg_namespace nsp ON cls.relnamespace = nsp.oid
LEFT JOIN pg_user use2 ON cls.relowner = use2.usesysid
WHERE
cls.relowner = use.usesysid
AND nsp.nspname NOT IN ( 'pg_catalog', 'pg_toast', 'information_schema' )
AND nsp.nspname IN ( 'salesforce' )
AND relacl IS NOT NULL
ORDER BY
subject,
NAMESPACE,
item
)
JOIN pg_group pu ON array_to_string( relacl, '|' ) LIKE'%%' || pu.groname || '%%'
WHERE item = '{}'
'''.format(nextObj)
groupPerms = pd.read_sql(existingGroupPerms, igers)
print('got the group permissions')
return groupPerms
#####################################################
# get rs table user permissions, store in dataframe #
# NOT CURRENTLY IN USE #
#####################################################
#def rsGetUseerPerms(igers, nextObj) :
# existingUserPerms = '''
# SELECT *
# FROM
# (
# SELECT
# schemaname
# ,objectname
# ,usename
# ,HAS_TABLE_PRIVILEGE(usrs.usename, fullobj, 'select') AND has_schema_privilege(usrs.usename, schemaname, 'usage') AS sel
# ,HAS_TABLE_PRIVILEGE(usrs.usename, fullobj, 'insert') AND has_schema_privilege(usrs.usename, schemaname, 'usage') AS ins
# ,HAS_TABLE_PRIVILEGE(usrs.usename, fullobj, 'update') AND has_schema_privilege(usrs.usename, schemaname, 'usage') AS upd
# ,HAS_TABLE_PRIVILEGE(usrs.usename, fullobj, 'delete') AND has_schema_privilege(usrs.usename, schemaname, 'usage') AS del
# ,HAS_TABLE_PRIVILEGE(usrs.usename, fullobj, 'references') AND has_schema_privilege(usrs.usename, schemaname, 'usage') AS ref
# FROM
# (
# SELECT schemaname, 't' AS obj_type, tablename AS objectname, schemaname + '.' + tablename AS fullobj FROM pg_tables
# UNION
# SELECT schemaname, 'v' AS obj_type, viewname AS objectname, schemaname + '.' + viewname AS fullobj FROM pg_views
# ) AS objs
# ,(SELECT * FROM pg_user) AS usrs
# ORDER BY fullobj
# )
# WHERE (sel = true or ins = true or upd = true or del = true or ref = true)
# and objectname = '{}'
# '''.format(nextObj)
#
# userPerms = pd.read_sql_query(existingUserPerms, igers)
# return userPerms
####################################################
# Connect to Salesforce, Query JSON, and Copy to S3#
####################################################
def sfToS3(fileCount, sf, nextObj) :
# Initiate list for returned data
pulls = []
# Pull initial Query
sfobject = sf.restful('sobjects/{}/describe/'.format(nextObj), params=None)
fields_list = [record['name'] for record in sfobject['fields']]
initialQuery = sf.query("SELECT {} FROM {}".format(','.join(fields_list),nextObj))
#Send a single file or the first file to S3
data = initialQuery['records']
try :
send_temp_jsonl_to_s3(data, nextObj, s3, s3Destination, fileCount, s3Path)
# Append initial query data to pulls
if 'nextRecordsUrl' in initialQuery :
pulls.append(initialQuery['nextRecordsUrl'])
nextChunk = initialQuery['nextRecordsUrl']
nextQuery = sf.query_more(nextChunk,True)
if 'nextRecordsUrl' in nextQuery :
pulls.append(nextQuery['nextRecordsUrl'])
x = True
fileCount = 2
while x == True:
try:
# set up while loop to re-query salesforce until returned
# query does not have a 'nextRecordsUrl' return value
# Query new 'nextRecordsUrl'
nextQuery = sf.query_more(nextQuery['nextRecordsUrl'],True)
# append new query to pulls
pulls.append(nextQuery['nextRecordsUrl'])
except: # This triggers when nextQuery['nextRecordsUrl'] does not exist
# set x to False to end loop
x = False
#if there was a follow on set of records, query it and add to S3
if len(pulls) >= 1 :
for i in range(len(pulls)) :
data = sf.query_more(str(pulls[i].split('/')[5]))['records']
send_temp_jsonl_to_s3(data, nextObj, s3, s3Destination, fileCount, s3Path)
fileCount += 1
print('completed sending JSON files to S3')
except :
print('Salesforce Object Empty, ending execution')
updateNextObj(nextObj, s3NextObjDestination)
sys.exit(1)
####################
# JSONL to S3 #
####################
def send_temp_jsonl_to_s3(data, nextObj, s3, s3Destination, fileCount, s3Path) :
fileName = '{}_file{}.json'
localFilePath ='/tmp/'
for element in data :
item = data.pop()
item.pop('attributes', None)
tempdict = OrderedDict({})
for k,v in item.items() :
if 'date' in k.lower() or 'stamp' in k.lower() :
if not v is None :
d = d_parse(v)
v = d.strftime('%Y-%m-%d %I:%M:%S')
tempdict[k.lower()] = v
else :
tempdict[k.lower()] = v
with open(localFilePath+fileName.format(nextObj,fileCount), 'a') as outfile :
outfile.write(json.dumps(tempdict))
outfile.write('\n')
s3.put_s3_file_datedpath(localFilePath+fileName.format(nextObj,fileCount),s3Destination+s3Path.format(nextObj))
os.remove(localFilePath+fileName.format(nextObj,fileCount))
#################################################
# maps SFDC type to SQL type - used for ddl #
#################################################
def map_data_type(sfdc_type, length):
"""
Definition to map Salesforce datatype to Redshift datatype.
"""
__MULTIPLIER = 1.3 # may not be Zero!
if length == 0:
length = 1092
if length == 4095:
length = 15000
if length > 65535:
length = 65534
if sfdc_type == u'boolean':
return u'varchar(5)'
elif sfdc_type == u'date':
return u'timestamp'
elif sfdc_type == u'datetime':
return u'timestamp'
elif sfdc_type == u'currency':
return u'decimal(38,6)'
elif sfdc_type == u'double':
return u'decimal(38,6)'
elif sfdc_type == u'int':
return u'numeric(10)'
elif sfdc_type == u'picklist':
return u'varchar({})'.format(length)
elif sfdc_type == u'id':
return u'varchar({})'.format(length)
elif sfdc_type == u'reference':
return u'varchar({})'.format(length)
elif sfdc_type == u'textarea':
if length >= (65535/length*__MULTIPLIER):
return u'varchar({})'.format(65534)
else:
return u'varchar({})'.format( math.ceil(length*__MULTIPLIER))
elif sfdc_type == u'email':
return u'varchar({})'.format(length)
elif sfdc_type == u'phone':
return u'varchar({})'.format(length)
elif sfdc_type == u'url':
return u'varchar({})'.format(length)
elif sfdc_type == u'multipicklist':
return u'varchar({})'.format(length)
elif sfdc_type == u'anyType':
if length >= 65535:
return u'varchar({})'.format(65534)
else:
return u'varchar({})'.format(math.ceil(length*__MULTIPLIER))
elif sfdc_type == u'percent':
return u'numeric(38,6)'
elif sfdc_type == u'combobox':
return u'varchar({})'.format(length)
elif sfdc_type == u'base64':
return u'varchar({})'.format(length)
elif sfdc_type == u'time':
return u'varchar(255)'
elif sfdc_type == u'string':
if length >= 65535:
return u'varchar({})'.format(65534)
else:
return u'varchar({})'.format(math.ceil(length*__MULTIPLIER))
else:
return u'varchar(65535)'
####################################
# Turn SFDC metadata into SQL #
####################################
def get_ddl(sf, nextObj, igersSchema, col_remove=None):
md = sf.restful("sobjects/{}/describe/".format(nextObj), params=None)
target_table=nextObj
total_field_count = 0
global ddl_str
ddl_str = ''
ddl_str += 'CREATE TABLE '+ igersSchema+"."+target_table +' ('
for x in md["fields"]:
#print x["name"]
if col_remove:
if x["name"].lower() in [element.lower() for element in col_remove]:
print("Skipping: {}".format(x["name"]))
continue
ddl_str += x["name"] + ' ' + map_data_type(x["type"],x["length"])
if x["name"] == 'Id':
ddl_str += ' NOT NULL DISTKEY'
ddl_str += ","
total_field_count = total_field_count + 1
ddl_str = ddl_str[:-1]
ddl_str += ')'
logging.info('DDL Successfully created...')
# print("Total Field Count: "+str(total_field_count))
return ddl_str
#########################
# Create Table from DDL, execute the copy query and update permissions #
#########################
def rs_operations(ddl_str, groupPerms, igersSchema, nextObj, s3Destination, s3Path, igers) :
today = datetime.date.today()
dated_path = today.strftime('%Y/%m/%d')
perms_statement = ''
drop_table = '''
DROP TABLE IF EXISTS {}.{} CASCADE
'''.format(igersSchema, nextObj)
loadQuery = '''
COPY {}.{}
FROM '{}{}/{}/'
iam_role 'arn:aws:iam::087024238921:role/LambdaFullAccessRole'
TRUNCATECOLUMNS
FORMAT AS JSON 'auto'
'''.format(igersSchema, nextObj, s3Destination, s3Path.format(nextObj), dated_path)
grantPerms = '''
GRANT SELECT ON {}.{} TO GROUP {}
'''
with igers.connect() as conn:
try :
conn.execute(drop_table)
print('completed drop table')
conn.execute(ddl_str)
print('completed create table')
conn.execute(loadQuery)
print('completed load query')
for row in range(len(groupPerms)) :
perms_statement = grantPerms.format(groupPerms['namespace'].iloc[row],groupPerms['item'].iloc[row],groupPerms['groname'].iloc[row])
conn.execute(perms_statement)
print('completed grant group permissions')
conn.close()
except exc.SQLAlchemyError as e :
print(e)
######################################
# Update Next Object and Write to S3 #
######################################
def updateNextObj(nextObj, s3NextObjDestination) :
objectsList = []
objectsFile = s3.get_s3_file('s3://test-sfdc-sds-team/sfdc-etl-jp-test/sfdc_etl/objects/sfdc_etl_objects.txt',os.path.abspath(os.path.join(TMP_PATH,'sfdc_etl_objects.txt')))
localNobjTempFile = os.path.abspath(os.path.join(TMP_PATH,'next_object.txt'))
nextObjText = ''
with open (objectsFile, 'r') as objs :
for line in objs :
objectsList.append(line.strip("\n"))
for i in range(len(objectsList)-1) :
if objectsList[i].lower() == nextObj :
nextObjText = objectsList[i+1]
print(nextObjText)
with open (localNobjTempFile, 'w') as f :
f.write(nextObjText)
s3.put_s3_file(localNobjTempFile,s3NextObjDestination.format('next_object.txt'))
print('completed Updating the next object')
################################################
# Test if the object exists and execute #
################################################
try :
getattr(sf,nextObj).describe()
except (SalesforceGeneralError,
SalesforceMoreThanOneRecord,
SalesforceMalformedRequest,
SalesforceExpiredSession,
SalesforceRefusedRequest,
SalesforceResourceNotFound) as e :
print(e.content[0]['message'] +', writing next object and ending')
updateNextObj(nextObj, s3NextObjDestination)
sys.exit(1)
rsGetGroupPerms(igers, nextObj)
sfToS3(fileCount, sf, nextObj)
get_ddl(sf, nextObj, igersSchema, col_remove=None)
rs_operations(ddl_str, groupPerms, igersSchema, nextObj, s3Destination, s3Path, igers)
updateNextObj(nextObj, s3NextObjDestination)
The problem is that the following line
sfdc_password = boto3.client('kms').decrypt(CiphertextBlob=b64decode(os.environ["L_PASSWORD"]))['Plaintext']
is returning a bytes object, and in the other hand, the Salesforce class expects a string.
Solution:
try :
sf = Salesforce(username=sfdc_username,
password=sfdc_password.decode("utf-8"),
security_token=sfdc_security_token,
instance_url=sfdc_salesforce_instance_url,
domain=sfdc_sandbox)
print('salesforce login good')
Note that the sfdc_password variable is being converted to string using the .decode bytes method

Python tnsnames.ora parser

I need a dict containing all database connections from tnsnames.ora file.
I need to go from this :
(DESCRIPTION=(ADDRESS_LIST=(ADDRESS=(PROTOCOL=TCP)(HOST=mydbserver.mydomain.com)(PORT=1521)))(CONNECT_DATA=(SID=CATAL)(SERVER=DEDICATED)(SERVICE_NAME=mydb.mydomain.com)))
to this :
{'DESCRIPTION': [{'ADDRESS_LIST': [{'ADDRESS': [{'PROTOCOL': 'TCP'},
{'HOST': 'mydbserver.mydomain.com'},
{'PORT': '1521'}
]
}]
},
{'CONNECT_DATA': [{'SID': 'CATAL'},
{'SERVER': 'DEDICATED'},
{'SERVICE_NAME': 'mydb.mydomain.com'}
]
}
]
}
So far, my code is :
def get_param(param_string):
print("get_param input:", param_string)
if param_string.count("(") != param_string.count(")"):
raise Exception("Number of '(' is not egal to number of ')' : " + str(param_string.count("(")) + " and " + str(param_string.count(")")))
else:
param_string = param_string[1:-1]
splitted = param_string.split("=")
keywork = splitted[0]
if len(splitted) == 2:
return {keywork: splitted[1]}
else:
splitted.remove(keywork)
values = "=".join(splitted)
return {keywork: get_value_list(values)}
def get_value_list(value_string):
print("get_value_list input:", value_string)
to_return = list()
if "=" not in value_string and "(" not in value_string and ")" not in value_string:
to_return.append(value_string)
elif value_string[0] != "(":
raise Exception("[ERROR] Format error '(' is not the first char: " + repr(value_string))
else:
parenth_count = 0
strlen = len(value_string)
current_value = ""
for i in range(0,strlen):
current_char = value_string[i]
current_value += current_char
if current_char == "(":
parenth_count += 1
elif current_char == ")":
parenth_count += -1
if parenth_count == 0:
to_return.append(get_param(current_value))
if i != (strlen - 1):
if value_string[i+1] == "(":
to_return += get_value_list(value_string[i+1:])
else:
raise Exception("Format error - Next char should be a '('. value_string[i+1]:" + repr(value_string[i+1]) )
break
print("get_value_list return:", to_return)
if len(to_return) == 0:
to_return = ""
elif len(to_return) == 1:
to_return = to_return[0]
return to_return
connection_infos = "(DESCRIPTION=(ADDRESS_LIST=(ADDRESS=(PROTOCOL=TCP)(HOST=mydbserver.mydomain.com)(PORT=1521)))(CONNECT_DATA=(SID=CATAL)(SERVER=DEDICATED)(SERVICE_NAME=mydb.mydomain.com)))"
current_connection = get_param(connection_infos)
print("current_connection:", current_connection)
pprint(current_connection)
And I got this :
{'DESCRIPTION': [{'ADDRESS_LIST': {'ADDRESS': [{'PROTOCOL': 'TCP'},
{'HOST': 'mydbserver.mydomain.com'},
'PORT']
}
},
'CONNECT_DATA'
]
}
So I'm doing something wrong. And I feel I'm doing something too complicated. Would anyone point some mistake I made or help me find a simpler way to do this ?
I have a working code now, but I'm not really satisfied with it. It's too long, not flexible, and will not work with some other tnsnames.ora possible formats :
class Tnsnames():
def __init__(self, file_path, file_name='tnsnames.ora'):
self.file_path = file_path
self.file_name = file_name
self.load_file()
def load_file(self):
try:
fhd = open(os.path.join(self.file_path, self.file_name), 'rt', encoding='utf-8')
except:
raise
else:
#Oracle doc : https://docs.oracle.com/cd/B28359_01/network.111/b28317/tnsnames.htm#NETRF007
file_content = list()
for l in fhd:
l = " ".join(l.split()).strip(" \n")
if len(l) > 0:
if "#" not in l:
file_content.append(l)
fhd.close()
file_content = " ".join(file_content)
connections_list = dict()
current_depth = 0
current_word = ""
current_keyword = ""
name_to_register = ""
is_in_add_list = False
current_addr = dict()
connections_aliases = dict()
stop_registering = False
connections_duplicates = list()
for c in file_content:
if c == " ":
pass
elif c == "=":
current_keyword = str(current_word)
current_word = ""
if current_keyword == "ADDRESS_LIST":
is_in_add_list = True
elif c == "(":
if current_depth == 0:
current_keyword = current_keyword.upper()
names_list = current_keyword.replace(" ","").split(",")
if len(names_list) == 1:
name_to_register = names_list[0]
else:
name_to_register = None
# We use either the first name with at least
# a dot in it, or the longest one.
for n in names_list:
if "." in n:
name_to_register = n
break
else:
name_to_register = max(names_list, key=len)
names_list.remove(name_to_register)
for n in names_list:
if n in connections_aliases.keys():
print("[ERROR] already registered alias:", n,
". Registered to:", connections_aliases[n],
". New:", name_to_register,
". This possible duplicate will not be registered.")
connections_duplicates.append(n)
stop_registering = True
else:
connections_aliases[n] = name_to_register
if not stop_registering:
connections_list[name_to_register] = {"ADDRESS_LIST": list(),
"CONNECT_DATA": dict(),
"LAST_TEST_TS": None}
current_depth += 1
elif current_depth in [1,2,3]:
current_depth += 1
else:
print("[ERROR] Incorrect depth:", repr(current_depth), ". Current connection will not be registered" )
del connections_list[name_to_register]
stop_registering = True
elif c == ")":
if current_depth == 1:
if stop_registering:
stop_registering = False
else:
# Before moving to next connection,
# we check that current connection
# have at least a HOST, and a SID or
# SERVICE_NAME
connection_is_valid = True
if isinstance(connections_list[name_to_register]["ADDRESS_LIST"], dict):
if "HOST" not in connections_list[name_to_register]["ADDRESS_LIST"].keys():
print("[ERROR] Only one address defined, and no HOST defined. Current connection will not be registered:", name_to_register)
connection_is_valid = False
elif isinstance(connections_list[name_to_register]["ADDRESS_LIST"], list):
for current_address in connections_list[name_to_register]["ADDRESS_LIST"]:
if "HOST" in current_address.keys():
break
else:
print("[ERROR] Multiple addresses but none with HOST. Current connection will not be registered:", name_to_register)
connection_is_valid = False
else:
print("[ERROR] Incorrect address format:", connections_list[name_to_register]["ADDRESS_LIST"], " Connection:", name_to_register)
connection_is_valid = False
if not connection_is_valid:
del connections_list[name_to_register]
else:
if "SERVICE_NAME" not in connections_list[name_to_register]["CONNECT_DATA"].keys() and \
"SID" not in connections_list[name_to_register]["CONNECT_DATA"].keys():
print("[ERROR] Missing SERVICE_NAME / SID for connection:", name_to_register)
del connections_list[name_to_register]
elif current_depth == 2:
if is_in_add_list:
is_in_add_list = False
if not stop_registering:
if len(connections_list[name_to_register]["ADDRESS_LIST"]) == 1:
connections_list[name_to_register]["ADDRESS_LIST"] = connections_list[name_to_register]["ADDRESS_LIST"][0]
elif current_depth == 3:
if is_in_add_list:
if not stop_registering:
connections_list[name_to_register]["ADDRESS_LIST"].append(current_addr)
current_addr = dict()
elif current_keyword.upper() in ["SID", "SERVER", "SERVICE_NAME"]:
if not stop_registering:
connections_list[name_to_register]["CONNECT_DATA"][current_keyword.upper()] = current_word.upper()
elif current_depth == 4:
if is_in_add_list:
if not stop_registering:
current_addr[current_keyword.upper()] = current_word.upper()
current_keyword = ""
current_word = ""
current_depth += -1
else:
current_word += c
self.connections = connections_list
self.aliases = connections_aliases
self.duplicates = connections_duplicates
Test tnsnames.ora :
########################################
# This is a sample tnsnames.ora #
########################################
###################################################
# PRODDB
###################################################
proddb.mydbs.domain.com, PRODDB =
(DESCRIPTION =
(ADDRESS_LIST =
(ADDRESS = (PROTOCOL = TCP)(HOST = proddb1.mydbs.domain.com)(PORT = 1522))
(ADDRESS = (PROTOCOL = TCP)(HOST = proddb2.mydbs.domain.com)(PORT = 1522))
(ADDRESS = (PROTOCOL = TCP)(HOST = proddb3.mydbs.domain.com)(PORT = 1522))
(ADDRESS = (PROTOCOL = TCP)(HOST = proddb4.mydbs.domain.com)(PORT = 1522))
)
(CONNECT_DATA =
(SID = PRODDB)
(SERVER = DEDICATED)
(SERVICE_NAME = proddb.mydbs.domain.com)
)
)
###################################################
# DEVDBA : Test database for DBA usage
###################################################
devdba.mydbs.domain.com, DEVDBA =
(DESCRIPTION =
(ADDRESS_LIST =
(ADDRESS = (PROTOCOL = TCP)(HOST = devdba.mydbs.domain.com)(PORT = 1521))
)
(CONNECT_DATA =
(SID = DEVDBA)
)
)
Test code :
from pprint import pprint
from lib_database import Tnsnames
tnsnnames = Tnsnames('/usr/lib/oracle/12.2/client64/network/admin')
print('Connexions:')
pprint(tnsnnames.connections)
print('Aliases:')
pprint(tnsnnames.aliases)
print('Duplicates:')
pprint(tnsnnames.duplicates)
Output :
Connexions:
{'DEVDBA.MYDBS.DOMAIN.COM': {'ADDRESS_LIST': {'HOST': 'DEVDBA.MYDBS.DOMAIN.COM',
'PORT': '1521',
'PROTOCOL': 'TCP'},
'CONNECT_DATA': {'SID': 'DEVDBA'},
'PRODDB.MYDBS.DOMAIN.COM': {'ADDRESS_LIST': [{'HOST': 'PRODDB1.MYDBS.DOMAIN.COM',
'PORT': '1522',
'PROTOCOL': 'TCP'},
{'HOST': 'PRODDB2.MYDBS.DOMAIN.COM',
'PORT': '1522',
'PROTOCOL': 'TCP'},
{'HOST': 'PRODDB3.MYDBS.DOMAIN.COM',
'PORT': '1522',
'PROTOCOL': 'TCP'},
{'HOST': 'PRODDB4.MYDBS.DOMAIN.COM',
'PORT': '1522',
'PROTOCOL': 'TCP'}],
'CONNECT_DATA': {'SERVER': 'DEDICATED',
'SERVICE_NAME': 'PRODDB.MYDBS.DOMAIN.COM',
'SID': 'PRODDB'}}
Aliases:
{'DEVDBA': 'DEVDBA.MYDBS.DOMAIN.COM', 'PRODDB': 'PRODDB.MYDBS.DOMAIN.COM'}
Duplicates:
[]
I could not find other Python parser for tnsnames.ora files. If you know about one, please point me to it.
You can do this with pyparsing:
import pyparsing as pp
# 1. Literals
VAR = pp.Word(pp.alphas + "_", pp.alphanums + "_").setName('variable')
SPACE = pp.Suppress(pp.Optional(pp.White()))
EQUALS = SPACE + pp.Suppress('=') + SPACE
OPEN = pp.Suppress('(') + SPACE
CLOSE = pp.Suppress(')') + SPACE
INTEGER = pp.Optional('-') + pp.Word(pp.nums) + ~pp.Char(".")
INTEGER.setParseAction(lambda t: int(t[0]))
FLOAT = pp.Optional('-') + pp.Word(pp.nums) + pp.Char('.') + pp.Optional(pp.Word(pp.nums))
FLOAT.setParseAction(lambda t: float(t[0]))
STRING = pp.Word(pp.alphanums + r'_.-')
# 2. Literal assignment expressions: (IDENTIFIER = VALUE)
INTEGER_ASSIGNMENT = pp.Group(OPEN + VAR + EQUALS + INTEGER + CLOSE)
FLOAT_ASSIGNMENT = pp.Group(OPEN + VAR + EQUALS + FLOAT + CLOSE)
STRING_ASSIGNMENT = pp.Group(OPEN + VAR + EQUALS + STRING + CLOSE)
# 3. Nested object assignment
ASSIGNMENT = pp.Forward()
NESTED_ASSIGNMENT = pp.Group(OPEN + VAR + EQUALS + ASSIGNMENT + CLOSE)
ASSIGNMENT << pp.OneOrMore(INTEGER_ASSIGNMENT |
FLOAT_ASSIGNMENT |
STRING_ASSIGNMENT |
NESTED_ASSIGNMENT)
# 4. Net service name(s): NAME(.DOMAIN)[, NAME(.DOMAIN)...]
NET_SERVICE_NAME = pp.OneOrMore(pp.Word(pp.alphas + '_' + '.', pp.alphanums + '_' + '.')
+ pp.Optional(pp.Suppress(',')))
# 5. Full TNS entry
TNS_ENTRY = NET_SERVICE_NAME + EQUALS + ASSIGNMENT
Here is some example data:
TNS_NAMES_ORA = """
MYDB =
(DESCRIPTION =
(ADDRESS_LIST =
(ADDRESS = (PROTOCOL = TCP)(HOST = server01)(PORT = 25881))
)
(CONNECT_DATA =
(SID = MYDB01)
)
)
OTHERDB.DOMAIN, ALIAS_FOR_OTHERDB.DOMAIN =
(DESCRIPTION_LIST =
(DESCRIPTION =
(ADDRESS_LIST = (ADDRESS = (PROTOCOL = TCP)
(HOST = server02)
(PORT = 25881)
))
(CONNECT_DATA = (SID = MYDB02))
)
)
"""
This part is a little one-off-ish, but here's an example of putting some additional parsing on top of that in order to extract all the data:
def _parse_addresses(tns_entry, addresses):
"""
Parse ADDRESS keywords from the a TNS entry
:param definition: Unparsed part of the TNS entry
:param addresses: List of addresses parsed
"""
keyword = tns_entry[0]
# Base Case: We found an ADDRESS, so extract the data
# and do not recurse into it
if keyword.upper() == 'ADDRESS':
port = None
host = None
for k, v in tns_entry[1:]:
if k == 'PORT':
port = v
elif k == 'HOST':
host = v
if port is None:
print('WARNING: Ignoring ADDRESS due to missing PORT')
elif host is None:
print('WARNING: Ignoring ADDRESS due to missing HOST')
addresses.append({'host': host, 'port': port})
# Else recursively descend through the definition
for d in tns_entry[1:]:
# Only parse sub-lists, not literals
if isinstance(d, list):
_parse_addresses(d, addresses)
def _parse_connect_data(tns_entry, sids):
"""
Parse CONNECT_DATA keywords from the a TNS entry
:param definition: Unparsed part of the TNS entry
:param sids: List of Oracle SIDs
"""
keyword = tns_entry[0]
# Base Case: We found a CONNECT_DATA, so extract the data
# and do not recurse into it
if keyword.upper() == 'CONNECT_DATA':
sid = None
for k, v in tns_entry[1:]:
if k == 'SID':
sid = v
if sid is None:
print('WARNING: Ignoring CONNECT_DATA due to missing SID')
sids.append(sid)
for d in tns_entry[1:]:
# Only parse sub-lists, not literals
if isinstance(d, list):
_parse_connect_data(d, sids)
def get_connection_info(net_service_name: str, tns_string: str):
"""
Generator over all simple connections inferred from a TNS entry
:param net_service_name: Net service name to return connection info for
:param tns_string: tnsnames.ora file contents
"""
# Parse the TNS entries and keep the requested definition
definition = None
for tokens, _start, _end in TNS_ENTRY.scanString(tns_string):
if net_service_name in tokens.asList()[0]:
definition = tokens.asList()[1]
break
# Check if we found a definition
if definition is None:
raise KeyError(f'No net service named {net_service_name}')
# Look for all the ADDRESS keywords
addresses = []
_parse_addresses(definition, addresses)
# Look for all CONNECT_DATA keywords
sids = []
_parse_connect_data(definition, sids)
# Emit all combinations
for address in addresses:
for sid in sids:
yield {'sid': sid, **address}
# Try it out!
for connection_info in get_connection_info('MYDB', TNS_NAMES_ORA):
print(connection_info)
I wrote a blog post about it here for "fun":
https://unparameterized.blogspot.com/2021/02/parsing-oracle-tns-files-in-python.html
import re
def find_match(tns_regex, y):
x1 = re.match(tns_regex, y, re.M + re.I + re.S)
if x1 is not None:
x1 = x1.groups(1)[0] # Only first match is returned
x1 = x1.strip('\n')
return(x1)
# Removing commented text
with open("C:\\Oracle\\product\\11.2.0\\client_1\\network\\admin\\tnsnames.ora") as tns_file:
with open("test_tns.ora", 'w+') as output:
lines =tns_file.readlines()
for line in lines:
if not line.startswith('#'):
output.write(line)
with open('test_tns.ora') as tns_file:
tnsnames = tns_file.read()
tnsnames1 = re.split(r"\){3,}\n\n", tnsnames)
# Regex matches
tns_name = '^(.+?)\s?\=\s+\(DESCRIPTION.*'
tns_host = '.*?HOST\s?=\s?(.+?)\)'
tns_port = '.*?PORT\s?=\s?(\d+?)\)'
tns_sname = '.*?SERVICE_NAME\s?=\s?(.+?)\)'
tns_sid = '.*?SID\s?=\s?(.+?)\)'
easy_connects = []
for y in tnsnames1:
y = '%s))' % y
l = [find_match(x, y) for x in [tns_name, tns_host, tns_port, tns_sname, tns_sid]]
d = {
'name': l[0],
'host': l[1],
'port': l[2],
'service_name': l[3],
'sid': l[4]
}
easy_connects.append(d)
print(easy_connects)
I have written this small code. It parses tnsnames.ora. It is fast and works great.
def parse_ora_tns_file(fpath,tnskey=None,return_all_keys=False,view_file=False,logger=None)->str:
"""
This function parse oracle tns file
parms: fpath : full file path like
fpath=filepath\tnsnames.ora
param: tnskey: find tns entry for given tns key like tnskey='ABC.WORLD'
param: return_all_keys: if True it will return all tns key names
param: view_file : if True it returns tnsnames.ora as str
"""
clean_tns_file=''
if logger:
logger.info('Reading tnsnames ora file at {} ...'.format(fpath))
with open(fpath,mode='r') as tns_file:
lines =tns_file.readlines()
for line in lines:
if not line.startswith('#'):
clean_tns_file=clean_tns_file+line
#clean file
clean_str = clean_tns_file.replace('\n','')
clean_str = clean_str.replace('\t','')
#replace = with ' ' so later I can split with ' '
#with below it becomes like ABC.WORLD = (DESCRIPTION) to ABC.WORLD ' ' (DESCRIPTION)
clean_str = clean_str.replace('=',' ')
#Below one output to ['ABC.WORLD','',' (DESCRIPTION)']
lstresult= clean_str.split(" ")
#Below code remove extra space from char list like it becomes ['ABC.WORLD','','(DESCRIPTION)']
lstresult = [txt.strip() for txt in lstresult]
#Below code to replace () chars to '' from list so output as ['ABC.WORLD','','DESCRIPTION']
removetable = str.maketrans('', '', '()')
out_list = [s.translate(removetable) for s in lstresult]
#Below code remove any empty list items so output as ['ABC.WORLD','DESCRIPTION']
out_list = list(filter(None, out_list))
#find index of DESCRIPTION words
indices = [i for i, x in enumerate(out_list ) if x.upper() == "DESCRIPTION"]
tns_keys= ""
for i in indices:
#use index of DESCRIPTION to tns keys which is required for regx pattern below.
tns_keys=tns_keys+out_list[i-1]+"|"
if return_all_keys:
return tns_keys.replace('|',',')[:-1]
if logger:
logger.info('Keys found in tnsnames ora: {}'.format(tns_keys))
regex = r"\s+(?!^({}))".format(tns_keys)
result = re.sub(regex, '', clean_tns_file, 0, re.MULTILINE)
if view_file:
return result
if result:
for match in re.finditer(r'^((?:{}))(.*)'.format(tns_keys), result, re.MULTILINE):
if match.group(1) == tnskey:
#removing = sign from start of entry
if logger:
logger.info('Found tns entry: {} {}'.format(match.group(1),match.group(2)))
return match.group(2)[1:]
if logger:
logger.info('No tns entry found for {}'.format(tnskey))
return None

Sending file over UDP divided into fragments

I have been dealing with sending file which is divided into fragments set by user on input. Problem is, I am getting error:
rec_list[fragIndex - 1] = data
IndexError: list assignment index out of range
I am also sending single string messages like in a chat and it works normally.
I can't find a bug in my code but maybe you can.
Btw, there are variables which might help with math which doesn't fit probably.
fragSize = 3
fragIndex = 216
fragCount = 215
Problem is, total number of fragments should be 215 (precalculated before sending - IS OK), index shouldn't be more than count! That is the problem. And it doesn't happen with strings. Only here with file.
Sending:
fragSize = int(input('Fragment size: ')) #max size of fragment
while True:
message = input('Enter message: ')
fragIndex=0 #reset fragment indexing
#asking for fragment size
if(message[:3] == '-sf'):
fragSize = int(input('Fragment size: '))
And here is sending function for files:
if (message[:2] == '-f'):
mType = 3
if message.startswith('-f'):
message = message[3:]
file_name = message
f=open(file_name,"rb")
contents = f.read()
fragCount = math.ceil(len(contents) / fragSize)
while contents!= '':
data = bytearray()
data.extend(contents[:fragSize])
fragIndex += 1
crc = crc32(data)
header = struct.pack('!hIIII', mType, fragSize, fragIndex, fragCount, crc)
self.sock.sendto(header + bytearray(data), (self.host, self.port))
contents = contents[fragSize:]
Receiving:
while True:
received_chunks = 0
rec_list = []
while True:
data, addr = sock.recvfrom(65535)
header = data[:18]
data = data[18:]
(mType, fragSize, fragIndex, fragCount, crc) = struct.unpack('!hIIII', header)
print(
'\nTyp: ' + str(mType) +
'\nFragSize: ' + str(fragSize) +
'\nFragIndex: ' + str(fragIndex) +
'\nFragCount: ' + str(fragCount) +
'\nCRC: ' + str(crc)
)
if len(rec_list) < fragCount:
need_to_add = fragCount - len(rec_list)
rec_list.extend([''] * need_to_add) # empty list for messages of size fragCount
rec_list[fragIndex - 1] = data
received_chunks += 1
if received_chunks == fragCount:
break # This is where the second while loop ends
This is only if I want to receive message of type file: (because it is divided into more message types)
if mType == 3:
content = b''.join(rec_list)
f = open('filename.py','wb')
f.write(content)
You tried to compare apples to oranges. Well, bytes to str but wikipedia doesn't say anything about that.
while contents!='':
...
contents is a bytes object and '' is a str object. In python 3, those two things can never be equal. Firing up the shell we see that
>>> b''==''
False
>>>
>>> contents = b"I am the very model"
>>> while contents != '':
... if not contents:
... print("The while didn't catch it!")
... break
... contents = contents[3:]
...
The while didn't catch it!
Since all objects have a truthiness (that is, bool(some_object) is meaningful) and bytes objects turn False when they are empty, you can just do
while contents:
....
UPDATE
Not part of the original answer but a question was raised about sending retries back to the client. The server side is sketched in here
while True:
received_chunks = 0
fragCount = -1
rec_list = []
while True:
# wait forever for next conversation
if fragCount == -1:
sock.settimeout(None)
try:
data, addr = sock.recvfrom(65535)
except socket.timeout:
# scan for blank slots in rec_list
retries = [i for i, data in rec_list if not data]
# packet is mType, numFrags, FragList
# TODO: I just invented 13 for retry mtype
sock.sendto(struct.pack("!{}h".format(len(retries+2)), 13,
len(retries), *retries)
continue
# our first packet, set timeout for retries
if fragCount == -1:
sock.settimeout(2)
header = data[:18]
data = data[18:]
(mType, fragSize, fragIndex, fragCount, crc) = struct.unpack('!hIIII', header)
print(
'\nTyp: ' + str(mType) +
'\nFragSize: ' + str(fragSize) +
'\nFragIndex: ' + str(fragIndex) +
'\nFragCount: ' + str(fragCount) +
'\nCRC: ' + str(crc)
)
if len(rec_list) < fragCount:
need_to_add = fragCount - len(rec_list)
rec_list.extend([''] * need_to_add) # empty list for messages of size fragCount
rec_list[fragIndex - 1] = data
received_chunks += 1
if received_chunks == fragCount:
break # This is where the second while loop ends

Json response error: response null?

I have a problem.
I have a page where I send commands to a sensor network.
When I click on this part of code
<a href='javascript:void(send_command_to_sensor("{{net.id}}", "{{sens.id}}", "identify"));'></a>
I call a js function, this:
function send_command_to_sensor(net, sens, command) {
$.ajax({url: "/networks/" + net + "/sensors/" + sens + "/send?command=" + command,
type: "GET",
async: true,
dataType: "json",
success: function(json_response) {
var err = json_response['error'];
if (err) {
show_alert('error', err);
return;
}
var success = json_response['success'];
if (success) {
show_alert('success', success);
return;
}
show_alert('alert', "This should not happen!");
}
});
}
This function build a url that recall an handler in the Tornado web server written in python. The handler is this:
################################################################################
# SensorSend handler.
# Sends a packet to the sensor by writing in a file (Should work through
# MySQL database) read by Quantaserv Daemon
################################################################################
class SensorSendHandler(BaseHandler):
# Requires authentication
#tornado.web.authenticated
def get(self, nid, sid):
command = self.get_argument('command').upper();
print command
# Retrieve the current user
usr = self.get_current_user()
usr_id = usr['id']
self.lock_tables("read", ['nets_permissions as n'])
perm = self.db.get("SELECT n.perm FROM nets_permissions as n \
WHERE n.network_id=%s AND n.user_id=%s", nid, int(usr_id))
self.unlock_tables()
# Check whether the user has access to the network
perms = self.check_network_access(nid, perm['perm'])
#Check wether the sensor exists in this network
self.check_sensor_in_network(sid, nid)
# The dictionary to return
ret = {}
############################################################################
# The following Code is valid for ZTC networks only
# Must be generalized
# TODO: - get the network type:
# - check wether the command is allowed
############################################################################
if command not in ['ON', 'OFF', 'TOGGLE', 'IDENTIFY']:
raise tornado.web.HTTPError(404, "Unknown command: " + str(command))
#Command OnOffCmd_SetState
if command in ['ON', 'OFF', 'TOGGLE']:
op_group = "70"
op_code = "50"
packet_meta = "%s%s%s%s02%s%s600080000%s"
pkt_len = hextransform(16, 2)
netid = hextransform(int(nid), 16)
sens_id = hextransform(int(sid) >> 16, 4)
sens_id_little = invert2bytes(sens_id,0)
cluster_id = hextransform(int(sid) & 0x00FFFF, 4)
end_point = "08"
if command == 'ON':
cmd_data = "01"
elif command == 'OFF':
cmd_data = "00"
elif command == 'TOGGLE':
cmd_data = "02"
packet = packet_meta % (netid, op_group, op_code, pkt_len, sens_id, end_point, cmd_data)
packet = packet.upper()
op_group_hex=0x70
op_code_hex=0x50
print command
#Command ZDP-IEEE_addr.Request
elif command == 'IDENTIFY':
op_group = "A2"
op_code = "01"
packet_meta = "%s%s%s%s"
pkt_len = hextransform(2, 2)
sens_id = hextransform(int(sid) >> 16, 4)
sens_id_little = invert2bytes(sens_id,0)
packet = packet_meta % (op_group, op_code, pkt_len, sens_id)
packet = packet.upper()
op_group_hex=0xA2
op_code_hex=0x01
#Command ZDP-Active_EP_req.Request
elif command == 'HOWMANY':
op_group = "A2"
op_code = "05"
packet_meta = "%s%s%s%s%s"
pkt_len = hextransform(4, 2)
netid = hextransform(int(nid), 16)
sens_id = hextransform(int(sid) >> 16, 4)
sens_id_little = invert2bytes(sens_id,0)
packet = packet_meta % (op_group, op_code, pkt_len, sens_id, netid)
packet = packet.upper()
op_group_hex=0xA2
op_code_hex=0x05
mynet_type ="ztc"
cmdjson = packet2json(op_group_hex,op_code_hex, packet)
print("\t\t " + packet + "\n")
#
#
#TODO : -write command into db
ts = datetime.datetime.now().isoformat()
self.lock_tables("write", ['confcommands'])
self.db.execute("INSERT INTO confcommands (network_id, ntype, timestamp, command) \
VALUES (%s,%s,%s,%s)", nid, mynet_type, ts, cmdjson)
self.unlock_tables();
############### ELISA ##########################################
# TODO: - open the /tmp/iztc file in append mode
cmdfile = open('/tmp/iztc', 'a')
# - acquire a lock "only for the DB case, it's easier"
# - write the packet
cmdfile.write(nid + "\t"+ mynet_type + "\t"+ ts + "\t"+ cmdjson +"\n");
# - release the lock "only for the DB case, it's easier"
# - close the file
cmdfile.close()
if command == 'HOWMANY':
opcodegroupr = "A0"
opcoder = "85"
elif command == 'IDENTIFY':
opcodegroupr = "A0"
opcoder = "81"
print command
#Code for retrieving the MAC address of the node
como_url = "".join(['http://', options.como_address, ':', options.como_port,
'/', ztc_config, '?netid=', netid,
'&opcode_group=', opcodegroupr,
'&opcode=', opcoder, '&start=-5m&end=-1s'])
http_client = AsyncHTTPClient()
response = yield tornado.gen.Task(http_client.fetch, como_url)
ret = {}
if response.error:
ret['error'] = 'Error while retrieving unregistered sensors'
else:
for line in response.body.split("\n"):
if line != "":
value = int(line.split(" ")[6])
ret['response'] = value
self.write(tornado.escape.json_encode(ret))
if command == 'HOWMANY':
status = value[0]
NwkAddress = value[1:2]
ActiveEPCount = value[3]
ActiveEPList = value[len(ActiveEPCount)]
if status == "0":
ret['success'] = "The %s command has been succesfully sent!" % (command.upper())
self.write(tornado.escape.json_encode(ret))
elif status == "80":
ret['error'] = "Invalid Request Type"
self.write(tornado.escape.json_encode(ret))
elif status == "89":
ret['error'] = "No Descriptor"
self.write(tornado.escape.json_encode(ret))
else:
ret['error'] = "Device not found!"
self.write(tornado.escape.json_encode(ret))
if command == 'IDENTIFY':
status = value[0]
IEEEAddrRemoteDev = value[1:8]
NWKAddrRemoteDev = value[9:2]
NumOfAssociatedDevice = value[11:1]
StartIndex = value[12:1]
ListOfShortAddress = value[13:2*NumOfAssociatedDevice]
if status == "0":
ret['success'] = "Command succesfully sent! The IEEE address is: %s" % (IEEEAddrRemoteDev)
self.write(tornado.escape.json_encode(ret))
elif status == "80":
ret['success'] = "Invalid Request Type"
self.write(tornado.escape.json_encode(ret))
else:
ret['error'] = "Device Not Found"
self.write(tornado.escape.json_encode(ret))
The error I receive in the developer consolle is this:
Uncaught TypeError: Cannot read property 'error' of null
$.ajax.successwsn.js:26 jQuery.Callbacks.firejquery-1.7.2.js:1075
jQuery.Callbacks.self.fireWithjquery-1.7.2.js:1193
donejquery-1.7.2.js:7538 jQuery.ajaxTransport.send.callback
in the js function. Why the function doesn't know 'error' or 'success'? Where is the problem???? I think the program don't enter never in the handler, but is blocked after, just in the js function.
Thank you very much for the help! It's a long post but it's simple to read! Please!
The quick read is the function isn't decorated correctly.
The line
response = yield tornado.gen.Task(http_client.fetch, como_url)
Means that you should declare the function thusly:
#tornado.web.asynchronous
#tornado.gen.engine
#tornado.web.authenticated
def get(self):
....
Note the addition of the two additional decorators.

Python IRC Client

When I run the script:
import socket
from time import strftime
time = strftime("%H:%M:%S")
irc = 'irc.tormented-box.net'
port = 6667
channel = '#tormented'
sck = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sck.connect((irc, port))
print sck.recv(4096)
sck.send('NICK supaBOT\r\n')
sck.send('USER supaBOT supaBOT supaBOT :supaBOT Script\r\n')
sck.send('JOIN ' + channel + '\r\n')
sck.send('PRIVMSG #tormented :supaBOT\r\n')
while True:
data = sck.recv(4096)
if data.find('PING') != -1:
sck.send('PONG ' + data.split() [1] + '\r\n')
elif data.find ( 'PRIVMSG' ) != -1:
nick = data.split ( '!' ) [ 0 ].replace ( ':', '' )
message = ':'.join ( data.split ( ':' ) [ 2: ] )
destination = ''.join ( data.split ( ':' ) [ :2 ] ).split ( ' ' ) [ -2 ]
if destination == 'supaBOT':
destination = 'PRIVATE'
print '(', destination, ')', nick + ':', message
get = message.split(' ') [1]
if get == 'hi':
try:
args = message.split(' ') [2:]
sck.send('PRIVMSG ' + destination + ' :' + nick + ': ' + 'hello' + '\r\n')
except:
pass
I get this is the error:
get = message.split(' ')[1]
IndexError: list index out of range
How can I fix it?
This means that message has no spaces in it, so when it's split by a space, you get a list containing a single element - you are trying to access the second element of this list. You should insert a check for this case.
EDIT: In reply to your comment: how you add the check depends on the logic of your program. The simplest solution would be something like:
if ' ' in msg:
get = message.split(' ')[1]
else:
get = message
Try
get = message.split(" ",1)[-1]
Example
>>> "abcd".split(" ",1)[-1]
'abcd'
>>> "abcd efgh".split(" ",1)[-1]
'efgh'

Categories

Resources