Json response error: response null? - python

I have a problem.
I have a page where I send commands to a sensor network.
When I click on this part of code
<a href='javascript:void(send_command_to_sensor("{{net.id}}", "{{sens.id}}", "identify"));'></a>
I call a js function, this:
function send_command_to_sensor(net, sens, command) {
$.ajax({url: "/networks/" + net + "/sensors/" + sens + "/send?command=" + command,
type: "GET",
async: true,
dataType: "json",
success: function(json_response) {
var err = json_response['error'];
if (err) {
show_alert('error', err);
return;
}
var success = json_response['success'];
if (success) {
show_alert('success', success);
return;
}
show_alert('alert', "This should not happen!");
}
});
}
This function build a url that recall an handler in the Tornado web server written in python. The handler is this:
################################################################################
# SensorSend handler.
# Sends a packet to the sensor by writing in a file (Should work through
# MySQL database) read by Quantaserv Daemon
################################################################################
class SensorSendHandler(BaseHandler):
# Requires authentication
#tornado.web.authenticated
def get(self, nid, sid):
command = self.get_argument('command').upper();
print command
# Retrieve the current user
usr = self.get_current_user()
usr_id = usr['id']
self.lock_tables("read", ['nets_permissions as n'])
perm = self.db.get("SELECT n.perm FROM nets_permissions as n \
WHERE n.network_id=%s AND n.user_id=%s", nid, int(usr_id))
self.unlock_tables()
# Check whether the user has access to the network
perms = self.check_network_access(nid, perm['perm'])
#Check wether the sensor exists in this network
self.check_sensor_in_network(sid, nid)
# The dictionary to return
ret = {}
############################################################################
# The following Code is valid for ZTC networks only
# Must be generalized
# TODO: - get the network type:
# - check wether the command is allowed
############################################################################
if command not in ['ON', 'OFF', 'TOGGLE', 'IDENTIFY']:
raise tornado.web.HTTPError(404, "Unknown command: " + str(command))
#Command OnOffCmd_SetState
if command in ['ON', 'OFF', 'TOGGLE']:
op_group = "70"
op_code = "50"
packet_meta = "%s%s%s%s02%s%s600080000%s"
pkt_len = hextransform(16, 2)
netid = hextransform(int(nid), 16)
sens_id = hextransform(int(sid) >> 16, 4)
sens_id_little = invert2bytes(sens_id,0)
cluster_id = hextransform(int(sid) & 0x00FFFF, 4)
end_point = "08"
if command == 'ON':
cmd_data = "01"
elif command == 'OFF':
cmd_data = "00"
elif command == 'TOGGLE':
cmd_data = "02"
packet = packet_meta % (netid, op_group, op_code, pkt_len, sens_id, end_point, cmd_data)
packet = packet.upper()
op_group_hex=0x70
op_code_hex=0x50
print command
#Command ZDP-IEEE_addr.Request
elif command == 'IDENTIFY':
op_group = "A2"
op_code = "01"
packet_meta = "%s%s%s%s"
pkt_len = hextransform(2, 2)
sens_id = hextransform(int(sid) >> 16, 4)
sens_id_little = invert2bytes(sens_id,0)
packet = packet_meta % (op_group, op_code, pkt_len, sens_id)
packet = packet.upper()
op_group_hex=0xA2
op_code_hex=0x01
#Command ZDP-Active_EP_req.Request
elif command == 'HOWMANY':
op_group = "A2"
op_code = "05"
packet_meta = "%s%s%s%s%s"
pkt_len = hextransform(4, 2)
netid = hextransform(int(nid), 16)
sens_id = hextransform(int(sid) >> 16, 4)
sens_id_little = invert2bytes(sens_id,0)
packet = packet_meta % (op_group, op_code, pkt_len, sens_id, netid)
packet = packet.upper()
op_group_hex=0xA2
op_code_hex=0x05
mynet_type ="ztc"
cmdjson = packet2json(op_group_hex,op_code_hex, packet)
print("\t\t " + packet + "\n")
#
#
#TODO : -write command into db
ts = datetime.datetime.now().isoformat()
self.lock_tables("write", ['confcommands'])
self.db.execute("INSERT INTO confcommands (network_id, ntype, timestamp, command) \
VALUES (%s,%s,%s,%s)", nid, mynet_type, ts, cmdjson)
self.unlock_tables();
############### ELISA ##########################################
# TODO: - open the /tmp/iztc file in append mode
cmdfile = open('/tmp/iztc', 'a')
# - acquire a lock "only for the DB case, it's easier"
# - write the packet
cmdfile.write(nid + "\t"+ mynet_type + "\t"+ ts + "\t"+ cmdjson +"\n");
# - release the lock "only for the DB case, it's easier"
# - close the file
cmdfile.close()
if command == 'HOWMANY':
opcodegroupr = "A0"
opcoder = "85"
elif command == 'IDENTIFY':
opcodegroupr = "A0"
opcoder = "81"
print command
#Code for retrieving the MAC address of the node
como_url = "".join(['http://', options.como_address, ':', options.como_port,
'/', ztc_config, '?netid=', netid,
'&opcode_group=', opcodegroupr,
'&opcode=', opcoder, '&start=-5m&end=-1s'])
http_client = AsyncHTTPClient()
response = yield tornado.gen.Task(http_client.fetch, como_url)
ret = {}
if response.error:
ret['error'] = 'Error while retrieving unregistered sensors'
else:
for line in response.body.split("\n"):
if line != "":
value = int(line.split(" ")[6])
ret['response'] = value
self.write(tornado.escape.json_encode(ret))
if command == 'HOWMANY':
status = value[0]
NwkAddress = value[1:2]
ActiveEPCount = value[3]
ActiveEPList = value[len(ActiveEPCount)]
if status == "0":
ret['success'] = "The %s command has been succesfully sent!" % (command.upper())
self.write(tornado.escape.json_encode(ret))
elif status == "80":
ret['error'] = "Invalid Request Type"
self.write(tornado.escape.json_encode(ret))
elif status == "89":
ret['error'] = "No Descriptor"
self.write(tornado.escape.json_encode(ret))
else:
ret['error'] = "Device not found!"
self.write(tornado.escape.json_encode(ret))
if command == 'IDENTIFY':
status = value[0]
IEEEAddrRemoteDev = value[1:8]
NWKAddrRemoteDev = value[9:2]
NumOfAssociatedDevice = value[11:1]
StartIndex = value[12:1]
ListOfShortAddress = value[13:2*NumOfAssociatedDevice]
if status == "0":
ret['success'] = "Command succesfully sent! The IEEE address is: %s" % (IEEEAddrRemoteDev)
self.write(tornado.escape.json_encode(ret))
elif status == "80":
ret['success'] = "Invalid Request Type"
self.write(tornado.escape.json_encode(ret))
else:
ret['error'] = "Device Not Found"
self.write(tornado.escape.json_encode(ret))
The error I receive in the developer consolle is this:
Uncaught TypeError: Cannot read property 'error' of null
$.ajax.successwsn.js:26 jQuery.Callbacks.firejquery-1.7.2.js:1075
jQuery.Callbacks.self.fireWithjquery-1.7.2.js:1193
donejquery-1.7.2.js:7538 jQuery.ajaxTransport.send.callback
in the js function. Why the function doesn't know 'error' or 'success'? Where is the problem???? I think the program don't enter never in the handler, but is blocked after, just in the js function.
Thank you very much for the help! It's a long post but it's simple to read! Please!

The quick read is the function isn't decorated correctly.
The line
response = yield tornado.gen.Task(http_client.fetch, como_url)
Means that you should declare the function thusly:
#tornado.web.asynchronous
#tornado.gen.engine
#tornado.web.authenticated
def get(self):
....
Note the addition of the two additional decorators.

Related

Multicast Network Trigger In Python Script

I'm trying to add a multicast network trigger for a camera set up. Currently I am using a conf. file called multicast-trigger.conf
[multicast-trigger]
address = 224.1.1.1
port = 600
payload = 0x05AA9544
and a python script called app_ext_multicast_trigger.py which takes the information from the conf file to send a multicast network trigger.
import sys, os, logging, json, ConfigParser, socket
sys.path.append('/home/root/ss-web')
from camconstants import *
class AppExt(object):
pkg_name = "Multicast network trigger "
pkg_version = "v1.0"
mt_config = None
# =============================================================================
# Package and URL registration logic
# =============================================================================
def __init__(self, _app, _cam, _ci, register_url_callback):
"""
Register new URLs with camera's webserver.
"""
urls = [
( '/trigger2', self.trigger2, "Sends a multicast network trigger packet to trigger all the cameras on the same local network, including trigging this camera." ),
( '/get_multicast_configuration', self.get_multicast_configuration, "Returns a JSON encoded dictionary of the camera's /etc/multicast-trigger.conf file contents." ),
]
register_url_callback(self.pkg_name, self.pkg_version, urls)
self.mt_config = self._mt_read_multicast_config_file('/etc/multicast-trigger.conf')
logging.debug("Multicast trigger URL added: %s" % repr(self.mt_config))
# =============================================================================
# Multicast trigger helper methods
# =============================================================================
def _mt_read_multicast_config_file(self, fn):
"""
Returns dictionary with multicast trigger configuration information read from file fn.
"""
if not os.path.exists(fn):
logging.error("Missing file: %s" % fn)
return None
config = {}
try:
config_parser = ConfigParser.SafeConfigParser()
config_parser.read(fn)
c = config_parser.items('multicast-trigger')
for (key, value) in c:
if key == 'address':
config[key] = value
elif key in ('port', 'payload'):
config[key] = int(value, 0)
except Exception, e:
logging.error("Bad file format: %s" % fn)
logging.error("Ignoring multicast-trigger parameters due to exception - %s" % str(e))
return None
return config
# =============================================================================
# Exposed URLs - URL matches method name
# =============================================================================
def trigger2(self):
"""
Sends a multicast network packet to trigger all cameras on the same local network.
"""
if self.mt_config == None:
logging.error("Missing file: %s" % fn)
ret = CAMAPI_STATUS_INVALID_PARAMETER
else:
try:
logging.debug("Triggering cameras by sending multicast packet: address %s port %d with payload 0x%x" % (self.mt_config['address'], self.mt_config['port'], self.mt_config['payload']))
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 32)
data=""
v = self.mt_config['payload']
for i in range(4):
data = chr(v & 0xFF) + data
v = v >> 8
sock.sendto(data, (self.mt_config['address'], self.mt_config['port']))
ret = CAMAPI_STATUS_OKAY
except Exception, e:
logging.error("Multi-camera trigger error due to exception - %s" % str(e))
ret = CAMAPI_STATUS_INVALID_PARAMETER
return json.dumps(ret)
def get_multicast_configuration(self):
"""
Returns a JSON encoded dictionary of the camera's /etc/multicast-trigger.conf file.
"""
logging.debug("Returning multicast trigger configuration: %s" % repr(self.mt_config))
return json.dumps(self.mt_config)
This works well and when I go to the ip address of the camera /trigger2 it will send a packet to trigger the other camera on the same network. Now I also have a different piece of equipment which will also trigger the camera via a python script called app_ext_b1 and when run from the device will trigger the camera and save to the device.
import sys, os, logging, json, time, threading, flask, glob, datetime, subprocess, math
from urllib import unquote
from camconstants import *
import tarfile
import sys, os, time, json, datetime, math
class CameraException(Exception):
def __init__(self, code, message):
self.Code = code
self.Message = message
super(CameraException, self).__init__(message)
def AsObject(self):
return { "Code" : self.Code, "Message" : self.Message }
class ExecutionContext:
def __init__(self, id, path):
self.Id = id
self.Error = None
self.path = path
if not self.path.endswith("/"):
self.path = "/" + self.path
self.Key = None
self.Logs = []
self.Results = {}
self._from = time.time()
self.add_log("Id: " + self.Id)
def get_path(self, fname):
return self.path + fname
def execute(self, key, method):
self.Key = key
self.add_log("Executing " + key)
try:
method(self)
except CameraException as camException:
self.add_log("Failed task " + key)
self._add_error(key, camException)
return False
except Exception as error:
self.add_log("Failed task " + key)
self._add_error(key, CameraException(-1, str(error)))
return False
self.add_log("Finished " + key)
return True
def _add_error(self, key, error):
self.Error = error
self.add_log("Error: " + str(error) )
def add_local(self, key, result):
if not self.Key in self.Results:
self.Results[self.Key] = {}
local_result = self.Results[self.Key]
local_result[key] = result
def add_log(self, message):
delta = str(int(1000*(time.time() - self._from))).rjust(6)
fullmsg = str(delta) + ": " + str(self.Key) + " " + message
self.Logs.append(fullmsg)
def add_state(self, save_state, camStatus):
state = {
"save_state": save_state,
"time": int(time.time() * 1000),
"camerastatus": camStatus,
}
stateStr = json.dumps(state)
self.add_log("Setting state to: " + stateStr)
progress_file = open(self.get_path(self.Id + ".state"), "w")
progress_file.write(stateStr)
progress_file.close()
def get_state(self):
try:
stateJson = open(self.get_path(self.Id + ".state"), "r").read()
self.add_log("State: " + stateJson)
state = json.loads(stateJson)
return state
except:
return None
def save_logs(self):
try:
self.Logs.append("Results:\n" + json.dumps(self.Results))
progress_file = open(self.get_path(self.Id + ".log"), "w")
progress_file.write("\n".join(self.Logs))
progress_file.close()
except:
pass
class TriggerInfo:
def __init__(self, triggerTime, releaseTime, fps):
self.TriggerTime = triggerTime
if releaseTime <=0:
releaseTime = triggerTime + releaseTime
self.ReleaseTime = releaseTime
self.Fps = fps
def getFrameForDeltaReleaseMs(self, deltaReleaseMs):
return self.convertMsToFrames(((self.ReleaseTime + float(deltaReleaseMs) / 1000.0) - self.TriggerTime)* 1000)
def convertMsToFrames(self, ms):
return int(self.Fps * float(ms)/ 1000.0)
class FrameSaver:
def __init__(self, cam, ec, params):
self.Cam = cam
self.Tar = None
self.Ec = ec
self.LastImageModified = None
self.id = params["id"]
frameTo = params["frameTo"]
curFrame = params["frameFrom"]
self.Frames = [curFrame]
interval = params["frameInterval"]
curFrame += interval
while curFrame < frameTo:
self.Frames.append(curFrame)
curFrame += interval
self.Frames.append(frameTo)
if not frameTo in self.Frames:
self.Frames.append(frameTo)
def save_frames(self):
for frameNumber in self.Frames:
self._remove_image()
self.Ec.add_log("reviewing frame")
self.Cam.review_frame(1, frameNumber)
self._save_image(frameNumber)
self.Tar.close()
def _remove_image(self):
os.system("rm /home/root/ss-web/static/images/image.jpg")
def _save_image(self, frameNumber):
self.Ec.add_log("save image to tar")
# http://10.11.12.13/static/images/image.jpg
path = "/home/root/ss-web/static/images/image.jpg"
tar = self.create_archive_if_not_exists()
start_time = time.time()
while not os.path.exists(path):
time.sleep(0.050)
if (time.time() - start_time) > 1:
raise CameraException(-1, "Fullball flight: failed to save image for framenumber " + str(frameNumber))
tar.add(path, str(frameNumber) + ".jpg")
def create_archive_if_not_exists(self):
if self.Tar == None:
self.Tar = tarfile.open("/mnt/sdcard/DCIM/" + self.id + ".Frames.tar", "w")
return self.Tar
class CameraCaptureFlow:
def __init__(self, id, camera, options):
self.Camera = camera
self.Options = options
self._state_key = "Unknown"
self.Id = id
self.Fps = 1
self.TriggerInfo = None
self.ReleaseFrame = None
self.SaveFps = None
self.StartFrame = None
self.EndFrame = None
self.PretriggerFillLevel = 100
self.ExecutionContext = ExecutionContext(id, "/mnt/sdcard/DCIM/")
self.ExecutionContext.add_log("Options:\n" + json.dumps(options))
def run(self):
# A trigger flow has already been initiated this id - return - note, it can be None if we are generating the file on top of the old one
if self.ExecutionContext.get_state() != None:
return
self._add_state("Started")
if not self._execute("Info", self.info):
return
if not self._execute("trigger", self.trigger):
self._execute("reinit", self.reinit)
return
if not self._execute("saveselected", self.save_selected):
self._execute("reinit", self.reinit)
return
if not self._execute("remaining ball flight", self.save_remaning_ballflight):
self._execute("reinit", self.reinit)
return
if not self._execute("reinit", self.reinit):
return
self._add_state("Completed")
return
def info(self, context):
self.CurrentConfig = self.Camera.get_current_settings()
self.PretriggerFillLevel = self.Camera.get_pretrigger_fill_level()
self.CamInfo = self.Camera.get_storage_info()
if not "available_space" in self.CamInfo:
raise CameraException(3, "StorageUnavaible")
if self.CamInfo["available_space"] < 50000000:
raise CameraException(4, "StorageInsufficient")
self.Fps = self.CurrentConfig["frame_rate"]
def TriggerCamera(self, context):
if hasattr(self.Camera, "trigger_hw"):
context.add_log("using hardware trigger")
return self.Camera.trigger_hw(None)
return self.Camera.trigger(None)
def wait_for_buffer_ready(self, context, releaseOffset, postCaptureMs):
delta_ms_release = int(1000*(time.time() - releaseOffset))
waitTime = postCaptureMs - delta_ms_release
context.add_log("Time since release : " + str(delta_ms_release) + "ms" )
context.add_log("Post capture time : " + str(postCaptureMs) + "ms" )
if waitTime > 0:
context.add_log("Time since release is less than required for post capture duration")
context.add_log("waiting " +str(waitTime + 100) + "ms to fill up buffers")
time.sleep((waitTime)/1000.0 + 0.1)
def trigger(self, context):
releaseOffset = self.Options["releaseOffsetTime"]
self.wait_for_buffer_ready(context, releaseOffset, self.Options["postCaptureMs"])
context.add_log("Triggering camera")
before = time.time()
triggerResult = self.TriggerCamera(context)
after = time.time()
triggerTime = (before + after) / 2
self.TriggerInfo = TriggerInfo(triggerTime, releaseOffset, self.Fps)
context.add_local("triggerTime", triggerTime)
context.add_local("relaseTime", self.TriggerInfo.ReleaseTime)
if (triggerResult != CAMAPI_STATUS_OKAY):
self._stop_if_saving()
raise CameraException(6, "TriggerFail")
context.add_log("Waiting for camera to trigger")
self.wait_for_status(lambda status: status == CAMAPI_STATE_TRIGGERED, 1)
context.add_log("Waiting for camera to finish triggering")
self.wait_for_status(lambda status: status != CAMAPI_STATE_TRIGGERED, 2)
context.add_log("Triggering finished")
def _stop_if_saving(self):
camStatus = self.Camera.get_camstatus()["state"]
if camStatus == CAMAPI_STATE_SELECTIVE_SAVING:
self.Camera.save_stop(discard_unsaved=True)
self.wait_for_status(lambda status: status == CAMAPI_STATE_REVIEWING or status == CAMAPI_STATE_RUNNING, 5)
raise CameraException(5, "TriggerFailCameraIsSaving")
def save_selected(self, context):
preCapMs = self.Options["preCaptureMs"]
postCapMs = self.Options["postCaptureMs"]
self.ReleaseFrame = self.TriggerInfo.getFrameForDeltaReleaseMs(0)
self.StartFrame = self.TriggerInfo.getFrameForDeltaReleaseMs(-preCapMs)
self.EndFrame = self.TriggerInfo.getFrameForDeltaReleaseMs(postCapMs)
context.add_log("ReleaseFrame: " + str(self.ReleaseFrame))
context.add_local("release_frame", self.ReleaseFrame)
context.add_local("start_frame", self.StartFrame)
context.add_local("end_frame", self.EndFrame)
self._validateSaveParams(context, self.StartFrame, self.EndFrame)
save_params = {}
save_params['buffer_number'] = 1
save_params['start_frame'] = self.StartFrame
save_params['end_frame'] = self.EndFrame
save_params['filename'] = str(self.Options["id"])
context.add_log("selective_save_params\n" + json.dumps(save_params))
before = time.time()
if self.Camera.selective_save(save_params) != CAMAPI_STATUS_OKAY:
raise CameraException(9, "SelectiveSaveFailed")
context.add_log("Waiting for camera to start saving")
self.wait_for_status(lambda status: status == CAMAPI_STATE_SELECTIVE_SAVING, 1)
context.add_log("Waiting for camera to finish saving")
self.wait_for_status(lambda status: status != CAMAPI_STATE_SELECTIVE_SAVING, 300)
context.add_log("Camera finished saving")
after = time.time()
self.SaveFps = math.ceil((self.EndFrame - self.StartFrame) / (after - before))
context.add_local("save_fps", self.SaveFps)
context.add_log("save fps: " + str(self.SaveFps))
def save_remaning_ballflight(self, context):
context.add_log("Checking options")
frameIntervalMs = self.Options["singleFrameCaptureIntervalMs"]
frameCaptureEndMs = self.Options["singleFrameCaptureEndMs"]
# we wish to combine the existing video with the next frames
if frameIntervalMs == None:
return
frameCaptureStartMs = self.Options["postCaptureMs"] + frameIntervalMs
frameInterval = self.TriggerInfo.convertMsToFrames(frameIntervalMs)
frameFrom = self.TriggerInfo.getFrameForDeltaReleaseMs(frameCaptureStartMs)
frameEnd = self.TriggerInfo.getFrameForDeltaReleaseMs(frameCaptureEndMs)
frameSaverParams = {
"id" : self.Id,
"frameFrom" : frameFrom,
"frameTo" : frameEnd,
"frameInterval" : frameInterval
}
frameSaver = FrameSaver(self.Camera, self.ExecutionContext, frameSaverParams)
frameSaver.save_frames()
def _validateSaveParams(self, context, startFrame, endFrame):
duration = self.CurrentConfig["duration"]
pretrigger = self.CurrentConfig["pretrigger"]
context.add_log("Pretrigger Level " + str(self.PretriggerFillLevel))
preTriggerBufferSeconds = duration * pretrigger / 100.0 * (float(self.PretriggerFillLevel) *1.0001 / 100.0)
context.add_log("preTriggerBufferSeconds " + str(preTriggerBufferSeconds))
postTriggerBuffserSeconds = duration * (1 - pretrigger / 100)
minFrame = -preTriggerBufferSeconds * self.Fps + 10
maxFrame = postTriggerBuffserSeconds * self.Fps - 10
if startFrame < minFrame:
msg = "Startframe: " + str(startFrame) + " is less than minimum frame" + str(minFrame)
context.add_log(msg)
raise CameraException(7, "OutOfBufferStartFrame")
if endFrame > maxFrame:
msg = "Endframe: " + str(endFrame) + " is larger than maximum frame" + str(maxFrame)
context.add_log(msg)
raise CameraException(8, "OutOfBufferEndFrame")
def reinit(self, context):
self.Camera.run(self.CurrentConfig)
def wait_for_status(self, predicate, timeout):
before = time.time()
lastStateUpdate = time.time()
lastState = None
while True:
status = self.Camera.get_camstatus()["state"]
deltaTime = time.time() - lastStateUpdate
if status != lastState or deltaTime > 1:
lastState = status
self._update_state(status)
lastStateUpdate = time.time()
if predicate(status):
return True
if time.time() - before > timeout:
return False
time.sleep(.333)
return False
def _update_state(self, camStatus):
self.ExecutionContext.add_state(self._state_key, camStatus)
def _add_state(self, save_state_key):
self._state_key = save_state_key
try:
camStatus = self.Camera.get_camstatus()["state"]
except:
camStatus = CAMAPI_STATE_UNCONFIGURED
self._update_state(camStatus)
def _execute(self, key, method):
success = self.ExecutionContext.execute(key, method)
if not success:
self._add_state("Failed")
return success
def save_logs(self):
self.ExecutionContext.save_logs()
class AppExt(object):
pkg_name = "B1 application extension"
pkg_version = "v1.8.1"
app = None
cam = None
ci = None
def __init__(self, _app, _cam, _ci, register_url_callback):
self.app = _app
self.cam = _cam
self.ci = _ci
urls = [
(
'/get_extension_b1_version',
self.get_version,
"returns version of B1 extension"
),
(
'/get_time_offset',
self.get_time_offset,
"Gets the camera time offset"
),
(
'/trigger_and_save',
self.trigger_and_save,
"Triggers the camera, and save the selected capture part"
),
(
'/get_trigger_and_save_progress',
self.get_trigger_and_save_progress,
"Returns the progress of the trigger and save process"
),
(
'/ensure_camera_ready',
self.ensure_camera_ready,
"Will check that the camera can save and has enough available space"
)
]
register_url_callback(self.pkg_name, self.pkg_version, urls)
def get_version(self):
return json.dumps(
{
"extensionVersion": AppExt.pkg_version
})
def get_time_offset(self):
timeOffset = time.time()
return json.dumps(
{
"timeOffset": timeOffset
})
def trigger_and_save(self):
request = {}
try:
request["id"] = str(self.get_arg('id'))
request["releaseOffsetTime"] = float(self.get_arg('releaseOffsetTime'))
request["preCaptureMs"] = self.get_int('preCaptureMs')
request["postCaptureMs"] = self.get_int('postCaptureMs')
request["singleFrameCaptureIntervalMs"] = self.get_int('singleFrameCaptureIntervalMs', True)
request["singleFrameCaptureEndMs"] = self.get_int('singleFrameCaptureEndMs', True)
captureFlow = CameraCaptureFlow(request["id"], self.cam, request)
timeStart = time.time()
captureFlow.run()
totalTime = int((time.time() - timeStart) * 1000)
ec = captureFlow.ExecutionContext
state = ec.get_state()
result = {
"id" : ec.Id,
"results" : ec.Results,
"logs" : ec.Logs,
"state" : state,
"fps" : captureFlow.Fps,
"release_frame" : captureFlow.ReleaseFrame,
"start_frame" : captureFlow.StartFrame,
"end_frame" : captureFlow.EndFrame,
"save_fps": captureFlow.SaveFps,
"trigger_offset" : captureFlow.TriggerInfo.TriggerTime,
"total_time" : totalTime
}
if ec.Error != None:
result["capture_error"] = ec.Error.AsObject()
captureFlow.save_logs()
return json.dumps(result)
except Exception as E:
request["error"] = str(E)
captureFlow.save_logs()
return json.dumps(request)
def get_arg(self, arg, isOptional = False):
req = flask.request.args
try:
val = req.get(arg)
return str(unquote(val))
except:
if isOptional:
return None
raise Exception("Could not find " + arg + " in the request")
def get_int(self, arg, isOptional = False):
try:
val = self.get_arg(arg)
return int(val)
except Exception as E:
if isOptional:
return None
raise E
def get_trigger_and_save_progress(self):
request = {}
try:
trigger_id = str(self.get_arg('id'))
exec_context = ExecutionContext(trigger_id, "/mnt/sdcard/DCIM/")
result = {}
result["id"] = trigger_id
state = None
timeBefore = time.time()
while state == None and time.time() - timeBefore < 1:
state = exec_context.get_state()
if state == None:
time.sleep(0.017)
result["state"] = state
return json.dumps(result)
except Exception as E:
request["error"] = str(E)
return json.dumps(request)
def ensure_camera_ready(self):
try:
camInfo = self.cam.get_storage_info()
if "available_space" not in camInfo:
raise CameraException(3, "StorageUnavailable")
if camInfo["available_space"] < 50000000:
raise CameraException(4, "StorageInsufficient")
self._validate_storage_writable()
except CameraException as camEx:
return json.dumps({ "Success": False, "Code": camEx.Code, "Message": camEx.Message })
except Exception as ex:
return json.dumps({ "Success": False, "Code": -1, "Message": str(ex) })
return json.dumps({ "Success": True, "Code": 0, "Message": None})
def _validate_storage_writable(self):
try:
timestamp = str(time.time())
fname = "/mnt/sdcard/DCIM/timestamp.txt"
storageFile = open(fname, "w")
storageFile.write(timestamp)
storageFile.close()
storageContent = open(fname, "r").readlines()[0]
if (storageContent != timestamp):
raise "could not write file"
except:
raise CameraException(2, "StorageReadonly")
The device is a radar unit and only allows for one camera to be synced at a given time. It reads the object it's tracking, tells the camera when to trigger and will save the file to ipad I am running the radar on. I would like to have a second camera trigger which would save the video on the second cameras interal memory. Doing this I can upload and merge with the other video file later. This would save me from manually having to trigger the second camera each time and trim the video lengths to match up. This would also allow me to connect the cameras wirelessly using travel routers. I was wondering how I could add the app_ext_multicast_trigger and multicast-trigger.conf to the app_ext_b1 python script so when the radar unit runs the app_ext_b1 python script on the camera it will also send out a multicast network packet to trigger camera two?

How to fix KeyError in python --> KeyError: 'message'?

This error usually should not appear, but recently when I run this, this error appears, and I have no clue how I will fix it.
Please if Anyone can help me to fix this error on this code below:
import requests
import json
from time import sleep
global OFFSET
OFFSET = 0
botToken = ""
global requestURL
global sendURL
requestURL = "http://api.telegram.org/bot" + botToken + "/getUpdates"
sendURL = "http://api.telegram.org/bot" + botToken + "/sendMessage"
def update (url):
global OFFSET
try:
update_raw = requests.get(url + "?offset=" + str(OFFSET))
update = update_raw.json()
result = extract_result(update)
if result != False:
OFFSET = result['update_id'] + 1
return result
else:
return False
except requests.exceptions.ConnectionError:
pass
def extract_result (dict):
result_array = dict['result']
if result_array == []:
return False
else:
result_dic = result_array[0]
return result_dic
def is_callback (dict):
if 'callback_query' in dict:
return True
def send_message (chatId, message):
requests.post(sendURL + "?chat_id=" + str(chatId) + "&text=" + message)
def send_message_button (chatId, message, buttonJSON):
requests.post(sendURL + "?chat_id=" + str(chatId) + "&reply_markup=" + buttonJSON + "&text=" + message)
#print (sendURL + "?chat_id=" + str(chatId) + "&reply_markup=" + buttonJSON + "&text=" + message)
while True:
newmessage = update (requestURL)
if newmessage != False:
if is_callback(newmessage) == True:
userchatid = newmessage['callback_query']['message']['chat']['id']
usertext = newmessage['callback_query']['message']['text']
username = newmessage['callback_query']['message']['chat']['first_name']
callback_data = newmessage['callback_query']['data']
send_message (userchatid, "Callback from " + callback_data + ", pressed by " + username)
else:
userchatid = newmessage['message']['chat']['id']
usertext = newmessage['message']['text']
username = newmessage['message']['chat']['first_name']
if usertext.lower() == "button":
buttonDict1 = {"text":"Knopf\n" + "hitest", "callback_data":"Knopf"}
buttonDict2 = {"text":"Knopf2", "callback_data":"Knopf2"}
buttonArr = {"inline_keyboard":[[buttonDict1, buttonDict2]]}
send_message_button (userchatid, "Hi " + username, json.dumps(buttonArr))
else:
send_message(userchatid, "You said: " + usertext)
sleep (1)
This is the error that appears to me after I run this bot
Line: 67
userchatid = newmessage['message']['chat']['id']
KeyError: 'message'
You catch the requests.exceptions.ConnectionError but don't handle it ( in the update function ), so now update does not return False as it returns nothing at all and can pass your check and cause havock.
Try to deal with the exception, or at least put a print in there to see if it's the one causing you issues, good luck!

A problem with compatibility of the script on Windows

This script works perfectly on Mac OS and Linux but when I try it on Windows it does not work.
When I try to try it on Windows, this error appears...
I imagine the problem from switch_user(dev).
I tried it with her (dev) but I couldn't solve it.
Whereas when Check GPT is verified, it stops and the error occurs.
PS C:\Users\motc-pc\Desktop\amonet-karnak-\amonet\modules> python main.py
[2020-06-03 22:03:49.653199] Waiting for bootrom
[2020-06-03 22:03:59.339064] Found port = COM22
[2020-06-03 22:03:59.348800] Handshake
* * * If you have a short attached, remove it now * * *
* * * Press Enter to continue * * *
[2020-06-03 22:04:02.368897] Init crypto engine
[2020-06-03 22:04:02.422396] Disable caches
[2020-06-03 22:04:02.424535] Disable bootrom range checks
[2020-06-03 22:04:02.459386] Load payload from ../brom-payload/build/payload.bin = 0x4888 bytes
[2020-06-03 22:04:02.469524] Send payload
[2020-06-03 22:04:03.440416] Let's rock
[2020-06-03 22:04:03.442368] Wait for the payload to come online...
[2020-06-03 22:04:04.163004] all good
[2020-06-03 22:04:04.165239] Check GPT
Traceback (most recent call last):
File "main.py", line 450, in <module>
main()
File "main.py", line 361, in main
switch_user(dev)
File "main.py", line 321, in switch_user
block = dev.emmc_read(0)
File "main.py", line 196, in emmc_read
raise RuntimeError("read fail")
RuntimeError: read fail
This script is full.
As I said earlier when I trying it on Linux or MacOS it works normally without problems
import struct
import os
import sys
import time
from handshake import handshake
from load_payload import load_payload, UserInputThread
from logger import log
import struct
import glob
import serial
from logger import log
BAUD = 115200
TIMEOUT = 5
CRYPTO_BASE = 0x10210000 # for karnak
def serial_ports ():
""" Lists available serial ports
:raises EnvironmentError:
On unsupported or unknown platforms
:returns:
A set containing the serial ports available on the system
"""
if sys.platform.startswith("win"):
ports = [ "COM{0:d}".format(i + 1) for i in range(256) ]
elif sys.platform.startswith("linux"):
ports = glob.glob("/dev/ttyACM*")
elif sys.platform.startswith("darwin"):
ports = glob.glob("/dev/cu.usbmodem*")
else:
raise EnvironmentError("Unsupported platform")
result = set()
for port in ports:
try:
s = serial.Serial(port, timeout=TIMEOUT)
s.close()
result.add(port)
except (OSError, serial.SerialException):
pass
return result
def p32_be(x):
return struct.pack(">I", x)
class Device:
def __init__(self, port=None):
self.dev = None
if port:
self.dev = serial.Serial(port, BAUD, timeout=TIMEOUT)
def find_device(self,preloader=False):
if self.dev:
raise RuntimeError("Device already found")
if preloader:
log("Waiting for preloader")
else:
log("Waiting for bootrom")
old = serial_ports()
while True:
new = serial_ports()
# port added
if new > old:
port = (new - old).pop()
break
# port removed
elif old > new:
old = new
time.sleep(0.25)
log("Found port = {}".format(port))
self.dev = serial.Serial(port, BAUD, timeout=TIMEOUT)
def check(self, test, gold):
if test != gold:
raise RuntimeError("ERROR: Serial protocol mismatch")
def check_int(self, test, gold):
test = struct.unpack('>I', test)[0]
self.check(test, gold)
def _writeb(self, out_str):
self.dev.write(out_str)
return self.dev.read()
def handshake(self):
# look for start byte
while True:
c = self._writeb(b'\xa0')
if c == b'\x5f':
break
self.dev.flushInput()
# complete sequence
self.check(self._writeb(b'\x0a'), b'\xf5')
self.check(self._writeb(b'\x50'), b'\xaf')
self.check(self._writeb(b'\x05'), b'\xfa')
def handshake2(self, cmd='FACTFACT'):
# look for start byte
c = 0
while c != b'Y':
c = self.dev.read()
log("Preloader ready, sending " + cmd)
command = str.encode(cmd)
self.dev.write(command)
self.dev.flushInput()
def read32(self, addr, size=1):
result = []
self.dev.write(b'\xd1')
self.check(self.dev.read(1), b'\xd1') # echo cmd
self.dev.write(struct.pack('>I', addr))
self.check_int(self.dev.read(4), addr) # echo addr
self.dev.write(struct.pack('>I', size))
self.check_int(self.dev.read(4), size) # echo size
self.check(self.dev.read(2), b'\x00\x00') # arg check
for _ in range(size):
data = struct.unpack('>I', self.dev.read(4))[0]
result.append(data)
self.check(self.dev.read(2), b'\x00\x00') # status
# support scalar
if len(result) == 1:
return result[0]
else:
return result
def write32(self, addr, words, status_check=True):
# support scalar
if not isinstance(words, list):
words = [ words ]
self.dev.write(b'\xd4')
self.check(self.dev.read(1), b'\xd4') # echo cmd
self.dev.write(struct.pack('>I', addr))
self.check_int(self.dev.read(4), addr) # echo addr
self.dev.write(struct.pack('>I', len(words)))
self.check_int(self.dev.read(4), len(words)) # echo size
self.check(self.dev.read(2), b'\x00\x01') # arg check
for word in words:
self.dev.write(struct.pack('>I', word))
self.check_int(self.dev.read(4), word) # echo word
if status_check:
self.check(self.dev.read(2), b'\x00\x01') # status
def run_ext_cmd(self, cmd):
self.dev.write(b'\xC8')
self.check(self.dev.read(1), b'\xC8') # echo cmd
cmd = bytes([cmd])
self.dev.write(cmd)
self.check(self.dev.read(1), cmd)
self.dev.read(1)
self.dev.read(2)
def wait_payload(self):
data = self.dev.read(4)
if data != b"\xB1\xB2\xB3\xB4":
raise RuntimeError("received {} instead of expected pattern".format(data))
def emmc_read(self, idx):
# magic
self.dev.write(p32_be(0xf00dd00d))
# cmd
self.dev.write(p32_be(0x1000))
# block to read
self.dev.write(p32_be(idx))
data = self.dev.read(0x200)
if len(data) != 0x200:
raise RuntimeError("read fail")
return data
def emmc_write(self, idx, data):
if len(data) != 0x200:
raise RuntimeError("data must be 0x200 bytes")
# magic
self.dev.write(p32_be(0xf00dd00d))
# cmd
self.dev.write(p32_be(0x1001))
# block to write
self.dev.write(p32_be(idx))
# data
self.dev.write(data)
code = self.dev.read(4)
if code != b"\xd0\xd0\xd0\xd0":
raise RuntimeError("device failure")
def emmc_switch(self, part):
# magic
self.dev.write(p32_be(0xf00dd00d))
# cmd
self.dev.write(p32_be(0x1002))
# partition
self.dev.write(p32_be(part))
def reboot(self):
# magic
self.dev.write(p32_be(0xf00dd00d))
# cmd
self.dev.write(p32_be(0x3000))
def kick_watchdog(self):
# magic
self.dev.write(p32_be(0xf00dd00d))
# cmd
self.dev.write(p32_be(0x3001))
def rpmb_read(self):
# magic
self.dev.write(p32_be(0xf00dd00d))
# cmd
self.dev.write(p32_be(0x2000))
data = self.dev.read(0x100)
if len(data) != 0x100:
raise RuntimeError("read fail")
return data
def rpmb_write(self, data):
if len(data) != 0x100:
raise RuntimeError("data must be 0x100 bytes")
# magic
self.dev.write(p32_be(0xf00dd00d))
# cmd
self.dev.write(p32_be(0x2001))
# data
self.dev.write(data)
def switch_boot0(dev):
dev.emmc_switch(1)
block = dev.emmc_read(0)
if block[0:9] != b"EMMC_BOOT" and block != b"\x00" * 0x200:
dev.reboot()
raise RuntimeError("what's wrong with your BOOT0?")
dev.kick_watchdog()
def flash_data(dev, data, start_block, max_size=0):
while len(data) % 0x200 != 0:
data += b"\x00"
if max_size and len(data) > max_size:
raise RuntimeError("data too big to flash")
blocks = len(data) // 0x200
for x in range(blocks):
print("[{} / {}]".format(x + 1, blocks), end='\r')
dev.emmc_write(start_block + x, data[x * 0x200:(x + 1) * 0x200])
if x % 10 == 0:
dev.kick_watchdog()
print("")
def flash_binary(dev, path, start_block, max_size=0):
with open(path, "rb") as fin:
data = fin.read()
while len(data) % 0x200 != 0:
data += b"\x00"
flash_data(dev, data, start_block, max_size=0)
def dump_binary(dev, path, start_block, max_size=0):
with open(path, "w+b") as fout:
blocks = max_size // 0x200
for x in range(blocks):
print("[{} / {}]".format(x + 1, blocks), end='\r')
fout.write(dev.emmc_read(start_block + x))
if x % 10 == 0:
dev.kick_watchdog()
print("")
def force_fastboot(dev, gpt):
switch_user(dev)
block = list(dev.emmc_read(gpt["MISC"][0]))
block[0:16] = "FASTBOOT_PLEASE\x00".encode("utf-8")
dev.emmc_write(gpt["MISC"][0], bytes(block))
block = dev.emmc_read(gpt["MISC"][0])
def force_recovery(dev, gpt):
switch_user(dev)
block = list(dev.emmc_read(gpt["MISC"][0]))
block[0:16] = "boot-recovery\x00\x00\x00".encode("utf-8")
dev.emmc_write(gpt["MISC"][0], bytes(block))
block = dev.emmc_read(gpt["MISC"][0])
def switch_user(dev):
dev.emmc_switch(0)
block = dev.emmc_read(0)
dev.kick_watchdog()
def parse_gpt(dev):
data = dev.emmc_read(0x400 // 0x200) + dev.emmc_read(0x600 // 0x200) + dev.emmc_read(0x800 // 0x200) + dev.emmc_read(0xA00 // 0x200)
num = len(data) // 0x80
parts = dict()
for x in range(num):
part = data[x * 0x80:(x + 1) * 0x80]
part_name = part[0x38:].decode("utf-16le").rstrip("\x00")
part_start = struct.unpack("<Q", part[0x20:0x28])[0]
part_end = struct.unpack("<Q", part[0x28:0x30])[0]
parts[part_name] = (part_start, part_end - part_start + 1)
return parts
def main():
minimal = False
dev = Device()
dev.find_device()
# 0.1) Handshake
handshake(dev)
# 0.2) Load brom payload
load_payload(dev, "../brom-payload/build/payload.bin")
dev.kick_watchdog()
if len(sys.argv) == 2 and sys.argv[1] == "minimal":
thread = UserInputThread(msg = "Running in minimal mode, assuming LK, TZ, LK-payload and TWRP to have already been flashed.\nIf this is correct (i.e. you used \"brick\" option in step 1) press enter, otherwise terminate with Ctrl+C")
thread.start()
while not thread.done:
dev.kick_watchdog()
time.sleep(1)
minimal = True
# 1) Sanity check GPT
log("Check GPT")
switch_user(dev)
# 1.1) Parse gpt
gpt = parse_gpt(dev)
log("gpt_parsed = {}".format(gpt))
if "lk" not in gpt or "tee1" not in gpt or "boot" not in gpt or "recovery" not in gpt:
raise RuntimeError("bad gpt")
# 2) Sanity check boot0
log("Check boot0")
switch_boot0(dev)
# 3) Sanity check rpmb
log("Check rpmb")
rpmb = dev.rpmb_read()
if rpmb[0:4] != b"AMZN":
thread = UserInputThread(msg = "rpmb looks broken; if this is expected (i.e. you're retrying the exploit) press enter, otherwise terminate with Ctrl+C")
thread.start()
while not thread.done:
dev.kick_watchdog()
time.sleep(1)
# Clear preloader so, we get into bootrom without shorting, should the script stall (we flash preloader as last step)
# 4) Downgrade preloader
log("Clear preloader header")
switch_boot0(dev)
flash_data(dev, b"EMMC_BOOT" + b"\x00" * ((0x200 * 8) - 9), 0)
# 5) Zero out rpmb to enable downgrade
log("Downgrade rpmb")
dev.rpmb_write(b"\x00" * 0x100)
log("Recheck rpmb")
rpmb = dev.rpmb_read()
if rpmb != b"\x00" * 0x100:
dev.reboot()
raise RuntimeError("downgrade failure, giving up")
log("rpmb downgrade ok")
dev.kick_watchdog()
if not minimal:
# 6) Install preloader
log("Flash preloader")
switch_boot0(dev)
flash_binary(dev, "../bin/preloader.bin", 8)
flash_binary(dev, "../bin/preloader.bin", 520)
# 6) Install lk-payload
log("Flash lk-payload")
switch_boot0(dev)
flash_binary(dev, "../lk-payload/build/payload.bin", 1024)
# 7) Downgrade tz
log("Flash tz")
switch_user(dev)
flash_binary(dev, "../bin/tz.img", gpt["tee1"][0], gpt["tee1"][1] * 0x200)
# 8) Downgrade lk
log("Flash lk")
switch_user(dev)
flash_binary(dev, "../bin/lk.bin", gpt["lk"][0], gpt["lk"][1] * 0x200)
# 9) Flash microloader
log("Inject microloader")
switch_user(dev)
boot_hdr1 = dev.emmc_read(gpt["boot"][0]) + dev.emmc_read(gpt["boot"][0] + 1)
boot_hdr2 = dev.emmc_read(gpt["boot"][0] + 2) + dev.emmc_read(gpt["boot"][0] + 3)
flash_binary(dev, "../bin/microloader.bin", gpt["boot"][0], 2 * 0x200)
if boot_hdr2[0:8] != b"ANDROID!":
flash_data(dev, boot_hdr1, gpt["boot"][0] + 2, 2 * 0x200)
if not minimal:
log("Force fastboot")
force_fastboot(dev, gpt)
else:
log("Force recovery")
force_recovery(dev, gpt)
# 10) Downgrade preloader
log("Flash preloader header")
switch_boot0(dev)
flash_binary(dev, "../bin/preloader.hdr0", 0, 4)
flash_binary(dev, "../bin/preloader.hdr1", 4, 4)
# Reboot (to fastboot or recovery)
log("Reboot")
dev.reboot()
if __name__ == "__main__":
main()

string.lower() in contents.lower() not working correctly

My function looks up WHOIS requests using sockets and proxies.
Sometimes there is an issue with a proxy and it returns no data so I check to see if the data contains the initial domain request and if it does, return the result.
However sometimes this returns TRUE even when there's nothing in the string data.
I have also tried to do len(data) > 25, etc but for some reason it can still return true.
if domain.lower() in data.lower():
obj = WhoisEntry(domain, data)
logger.debug('WHOIS success ' + domain + ': ' + data)
return {
'expiration_date': str(obj.expiration_date),
'status': str(obj.status),
'registrar': str(obj.registrar)
}
Full code
def whois_tcp(domain):
whois_servers = [ 'whois.verisign-grs.com', 'whois.internic.net', 'whois.crsnic.net' ]
attempts = 0
while attempts < 15:
attempts = attempts + 1
logger.debug('WHOIS attempt '+domain+': '+str(attempts))
whois_host = random.choice(whois_servers)
proxy = random.choice(proxies) # global variable from config.py
proxy = proxy.split(':')
try:
s = socks.socksocket()
s.setproxy(socks.PROXY_TYPE_SOCKS5, proxy[0], int(proxy[1]))
s.connect((whois_host, 43))
s.send(domain + '\n\r\n')
data = ''
buf = s.recv(1024)
while len(buf):
data += buf
buf = s.recv(1024)
s.close()
#if domain.lower() not in data.lower():
# raise Exception(domain, 'Domain not found in WHOIS: '+data)
# continue
if domain.lower() in data.lower():
obj = WhoisEntry(domain, data)
logger.debug('WHOIS success ' + domain + ': ' + data)
return {
'expiration_date': str(obj.expiration_date),
'status': str(obj.status),
'registrar': str(obj.registrar)
}
except Exception, e:
logger.error('WHOIS Lookup Failed: '+str(e))
return None
What am I doing wrong?
1.Try:
if str(domain).lower() in str(data).lower():
...
2.Check whether the value of domain variable is None or ''.
I added code to write each whois response to file and noticed that it was in fact including whois results
file = open('./whois_records/'+domain, 'w')
file.write(data)
file.close()
Not sure why this line of code wasn't outputting the data string in the log file:
logger.debug('WHOIS success ' + domain + ': ' + str(data))
I have found that different whois servers are giving me different results for lookups.
For example: bpi-group.com
1. whois.verisign-grs.com: **FREE**
2. whois.gandi.net: **TAKEN**
Additionally I need to use different whois server for different TLDs (com/net/org/info/biz)
Currently now using:
- tld.whois-servers.net
Code is still not perfect but is working better and original problem has been solved:
def whois_tcp(domain):
ext = tldextract.extract(domain)
if ext.suffix == 'org':
whois_servers = [ 'org.whois-servers.net' ]
elif ext.suffix == 'biz':
whois_servers = [ 'biz.whois-servers.net' ]
elif ext.suffix == 'info':
whois_servers = [ 'info.whois-servers.net' ]
elif ext.suffix == 'com':
whois_servers = [ 'com.whois-servers.net' ]
elif ext.suffix == 'net':
whois_servers = [ 'net.whois-servers.net' ]
else:
whois_servers = [ 'whois.verisign-grs.com', 'whois.internic.net', 'whois.crsnic.net' ]
attempts = 0
result = None
while attempts < 15:
attempts = attempts + 1
logger.debug('WHOIS attempt '+domain+': '+str(attempts))
whois_host = random.choice(whois_servers)
proxy = random.choice(proxies) # global variable from config.py
proxy = proxy.split(':')
try:
s = socks.socksocket()
s.setproxy(socks.PROXY_TYPE_SOCKS5, proxy[0], int(proxy[1]))
s.connect((whois_host, 43))
s.send(domain + '\n\r\n')
data = ''
buf = s.recv(1024)
while len(buf):
data += buf
buf = s.recv(1024)
s.close()
file = open('./whois_records/'+domain, 'w')
file.write(data)
file.close()
# if domain.lower() not in data.lower():
# raise Exception(domain, 'Domain not found in WHOIS: '+data)
# continue
if str(domain).lower() in str(data).lower():
obj = WhoisEntry.load(domain, data)
logger.debug('WHOIS success ' + domain + ': ' + str(data))
return {
'expiration_date': str(obj.expiration_date),
'status': str(obj.status),
'registrar': str(obj.registrar)
}
except Exception, e:
logger.error('WHOIS Lookup Failed: '+str(e))
return None

facebook signed request email

I have the following code:
def base64_url_decode(inp):
padding_factor = (4 - len(inp) % 4) % 4
inp += "="*padding_factor
return base64.b64decode(unicode(inp).translate(dict(zip(map(ord, u'-_'), u'+/'))))
def parse_signed_request(signed_request, secret):
l = signed_request.split('.', 2)
encoded_sig = l[0]
payload = l[1]
sig = base64_url_decode(encoded_sig)
data = json.loads(base64_url_decode(payload))
if data.get('algorithm').upper() != 'HMAC-SHA256':
log.error('Unknown algorithm')
return None
else:
expected_sig = hmac.new(secret, msg=payload, digestmod=hashlib.sha256).digest()
if sig != expected_sig:
return None
else:
log.debug('valid signed request received..')
return data
How do I use the facebook signed request data (returned from the parse_signed_request) to get the person's email address?
Here's the facebook documentation for it:
https://developers.facebook.com/docs/howtos/login/signed-request/
I tried doing :
data = parsed_signed_request(...)
data.get('registration').email
but that did not work.
What can I do?
Maybe you could try this one, this is part of my codes:
def parse_signed_request(signed_request, app_secret):
try:
l = signed_request.split('.', 2)
encoded_sig = str(l[0])
payload = str(l[1])
except IndexError:
raise ValueError("'signed_request' malformed")
sig = base64.urlsafe_b64decode(encoded_sig + "=" * ((4 - len(encoded_sig) % 4) % 4))
data = base64.urlsafe_b64decode(payload + "=" * ((4 - len(payload) % 4) % 4))
data = json.loads(data)
if data.get('algorithm').upper() != 'HMAC-SHA256':
raise ValueError("'signed_request' is using an unknown algorithm")
else:
expected_sig = hmac.new(app_secret, msg=payload, digestmod=hashlib.sha256).digest()
if sig != expected_sig:
raise ValueError("'signed_request' signature mismatch")
else:
return data
def fb_registration(request):
if request.POST:
if 'signed_request' in request.POST:
# parse and check data
data = parse_signed_request(request.POST['signed_request'], settings.FACEBOOK_APP_SECRET)
# lets try to check if user exists based on username or email
try:
check_user = User.objects.get(username=data['registration']['name'])
except:
state = "Username is already exist. Please try other account."
return HttpResponseRedirect(reverse('accounts:register'))
try:
check_user = User.objects.get(email=data['registration']['email'])
except:
state = "Email is already exist. Please use other account."
return HttpResponseRedirect(reverse('accounts:register'))
#lets create now the user
randompass = ''.join([choice('1234567890qwertyuiopasdfghjklzxcvbnm') for i in range(7)])
user = User.objects.create_user(data['registration']['name'], data['registration']['email'], randompass)
user.save()
user = authenticate(username=data['registration']['name'], password=randompass)
if user is not None:
# save in user profile his facebook id
fbid = 'http://www.facebook.com/profile.php?id=%s' % data['user_id']
r = RPXAssociation(user=user, identifier=fbid)
r.save()
login(request, user)
return HttpResponseRedirect(reverse('accounts:choose_plan'))
else:
state = "Registration request failed!"
return HttpResponseRedirect(reverse('accounts:register'))

Categories

Resources