I had an Python script that running continuously. If there any new file on directory then the python will open url using urllib2 to do some request on specific ip.
here are the code
encoded_string = base64.b64encode(image_file.read())
values = dumps({
'image_data':encoded_string,
'requestCode':'111'
})
headers = {"Content-Type": "application/json"}
request = Request("http:/xx.xxx.xx.xxx/api/carplate_service",data=values, headers=headers)
response = urlopen(request, timeout=60)
The code are working well but on random time, let say usually happened on 1-2 AM then I got this error:
<class 'urllib2.URLError'> - <urlopen error [Errno 110] Connection timed out>
I had an exception on that function on this bellow :
try:
ip = sys.argv[1]
histId = int(sys.argv[2])
handler = ModHandler()
wm = pyinotify.WatchManager()
notifier = pyinotify.Notifier(wm, handler)
wdd = wm.add_watch('./' + ip + '/', pyinotify.IN_CLOSE_WRITE)
notifier.loop()
except BaseException as e:
with open("error.log", "a") as text_file:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
text_file.write( time.strftime("%Y-%m-%d %H:%M:%S") + " [" + str(exc_tb.tb_lineno) + " - " + fname + "] : " + str(exc_type) + " - " + str(e) + "\n")
text_file.close();
The exception not working well because application cannot continue if there are some error like above.
My question are how to make program still continue even the exception throw?
I'm using python2.6
Thanks
For function calls that go out to external services I usually find that the following basic structure works pretty well
import time
expire_time = 2
while True:
start_time = time.time()
try:
# Do something here
# ....
# If you make it to the bottom of the code, break out of the loop
break
except BaseException as e:
# Compare the start_time with the current time
now_time = time.time()
if now_time > start_time + expire_time:
raise e
# Otherwise try executing the `try` block again
Using the code that you've provided it could look something like this
import time
expire_time = 2
while True:
start_time = time.time()
try:
ip = sys.argv[1]
histId = int(sys.argv[2])
handler = ModHandler()
wm = pyinotify.WatchManager()
notifier = pyinotify.Notifier(wm, handler)
wdd = wm.add_watch('./' + ip + '/', pyinotify.IN_CLOSE_WRITE)
notifier.loop()
break
except BaseException as e:
now_time = time.time()
if now_time > start_time + expire_time:
raise e
else:
with open("error.log", "a") as text_file:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
text_file.write( time.strftime("%Y-%m-%d %H:%M:%S") + " [" + str(exc_tb.tb_lineno) + " - " + fname + "] : " + str(exc_type) + " - " + str(e) + "\n")
text_file.close();
Related
I am trying to troubleshoot a script for Mimecast's API. The script runs fine for the most part, but a few times, I have noticed that it stops pulling logs and generally appears to be a hung process. After restarting the script and manually pushing logs to the syslog server, it starts working again without issue. I am not able to reproduce this issue at will.
The script is supposed to do the following:
Authenticate against Mimecast's API
Sign responses
download, extract and save log files to log dir
utilize a tokenized header to determine which file was downloaded in the last request. Should save the token ID within a file in the checkpoint directory
Push files to remote syslog server
Output any errors and info to console
Below is the sample code from Mimecast.
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging.handlers
import json
import os
import requests
import base64
import uuid
import datetime
import hashlib
import shutil
import hmac
import time
from zipfile import ZipFile
import io
# Set up variables
APP_ID = "YOUR DEVELOPER APPLICATION ID"
APP_KEY = "YOUR DEVELOPER APPLICATION KEY"
URI = "/api/audit/get-siem-logs"
EMAIL_ADDRESS = 'EMAIL ADDRESS OF YOUR ADMINISTRATOR'
ACCESS_KEY = 'ACCESS KEY FOR YOUR ADMINISTRATOR'
SECRET_KEY = 'SECRET KEY FOR YOUR ADMINISTRATOR'
LOG_FILE_PATH = "FULLY QUALIFIED PATH TO FOLDER TO WRITE LOGS"
CHK_POINT_DIR = 'FULLY QUALIFIED PATH TO FOLDER TO WRITE PAGE TOKEN'
# Set True to output to syslog, false to only save to file
syslog_output = False
# Enter the IP address or hostname of your syslog server
syslog_server = 'localhost'
# Change this to override default port
syslog_port = 514
# delete files after fetching
delete_files = True
# Set threshold in number of files in log file directory
log_file_threshold = 10000
# Set up logging (in this case to terminal)
log = logging.getLogger(__name__)
log.root.setLevel(logging.DEBUG)
log_formatter = logging.Formatter('%(levelname)s %(message)s')
log_handler = logging.StreamHandler()
log_handler.setFormatter(log_formatter)
log.addHandler(log_handler)
# Set up syslog output
syslog_handler = logging.handlers.SysLogHandler(address=(syslog_server, syslog_port))
syslog_formatter = logging.Formatter('%(message)s')
syslog_handler.setFormatter(syslog_formatter)
syslogger = logging.getLogger(__name__)
syslogger = logging.getLogger('SysLogger')
syslogger.addHandler(syslog_handler)
# Supporting methods
def get_hdr_date():
return datetime.datetime.utcnow().strftime("%a, %d %b %Y %H:%M:%S UTC")
def read_file(file_name):
try:
with open(file_name, 'r') as f:
data = f.read()
return data
except Exception as e:
log.error('Error reading file ' + file_name + '. Cannot continue. Exception: ' + str(e))
quit()
def write_file(file_name, data_to_write):
if '.zip' in file_name:
try:
byte_content = io.BytesIO(data_to_write)
zip_file = ZipFile(byte_content)
zip_file.extractall(LOG_FILE_PATH)
except Exception as e:
log.error('Error writing file ' + file_name + '. Cannot continue. Exception: ' + str(e))
quit()
else:
try:
with open(file_name, 'w') as f:
f.write(data_to_write)
except Exception as e:
log.error('Error writing file ' + file_name + '. Cannot continue. Exception: ' + str(e))
quit()
def get_base_url(email_address):
# Create post body for request
post_body = dict()
post_body['data'] = [{}]
post_body['data'][0]['emailAddress'] = email_address
# Create variables required for request headers
request_id = str(uuid.uuid4())
request_date = get_hdr_date()
headers = {'x-mc-app-id': APP_ID, 'x-mc-req-id': request_id, 'x-mc-date': request_date}
# Send request to API
log.debug('Sending request to https://api.mimecast.com/api/discover-authentication with request Id: ' +
request_id)
try:
r = requests.post(url='https://api.mimecast.com/api/login/discover-authentication',
data=json.dumps(post_body), headers=headers)
# Handle Rate Limiting
if r.status_code == 429:
log.warning('Rate limit hit. sleeping for ' + str(r.headers['X-RateLimit-Reset'] * 1000))
time.sleep(r.headers['X-RateLimit-Reset'] * 1000)
except Exception as e:
log.error('Unexpected error getting base url. Cannot continue.' + str(e))
quit()
# Handle error from API
if r.status_code != 200:
log.error('Request returned with status code: ' + str(r.status_code) + ', response body: ' +
r.text + '. Cannot continue.')
quit()
# Load response body as JSON
resp_data = json.loads(r.text)
# Look for api key in region region object to get base url
if 'region' in resp_data["data"][0]:
base_url = resp_data["data"][0]["region"]["api"].split('//')
base_url = base_url[1]
else:
# Handle no region found, likely the email address was entered incorrectly
log.error(
'No region information returned from API, please check the email address.'
'Cannot continue')
quit()
return base_url
def post_request(base_url, uri, post_body, access_key, secret_key):
# Create variables required for request headers
request_id = str(uuid.uuid4())
request_date = get_hdr_date()
unsigned_auth_header = '{date}:{req_id}:{uri}:{app_key}'.format(
date=request_date,
req_id=request_id,
uri=uri,
app_key=APP_KEY
)
hmac_sha1 = hmac.new(
base64.b64decode(secret_key),
unsigned_auth_header.encode(),
digestmod=hashlib.sha1).digest()
sig = base64.encodebytes(hmac_sha1).rstrip()
headers = {
'Authorization': 'MC ' + access_key + ':' + sig.decode(),
'x-mc-app-id': APP_ID,
'x-mc-date': request_date,
'x-mc-req-id': request_id,
'Content-Type': 'application/json'
}
try:
# Send request to API
log.debug('Sending request to https://' + base_url + uri + ' with request Id: ' + request_id)
r = requests.post(url='https://' + base_url + uri, data=json.dumps(post_body), headers=headers)
# Handle Rate Limiting
if r.status_code == 429:
log.warning('Rate limit hit. sleeping for ' + str(r.headers['X-RateLimit-Reset'] * 1000))
time.sleep(r.headers['X-RateLimit-Reset'] * 1000)
r = requests.post(url='https://' + base_url + uri, data=json.dumps(post_body), headers=headers)
# Handle errors
except Exception as e:
log.error('Unexpected error connecting to API. Exception: ' + str(e))
return 'error'
# Handle errors from API
if r.status_code != 200:
log.error('Request to ' + uri + ' with , request id: ' + request_id + ' returned with status code: ' +
str(r.status_code) + ', response body: ' + r.text)
return 'error'
# Return response body and response headers
return r.content, r.headers
def get_mta_siem_logs(checkpoint_dir, base_url, access_key, secret_key):
uri = "/api/audit/get-siem-logs"
# Set checkpoint file name to store page token
checkpoint_filename = os.path.join(checkpoint_dir, 'get_mta_siem_logs_checkpoint')
# Build post body for request
post_body = dict()
post_body['data'] = [{}]
post_body['data'][0]['type'] = 'MTA'
post_body['data'][0]['compress'] = True
if os.path.exists(checkpoint_filename):
post_body['data'][0]['token'] = read_file(checkpoint_filename)
# Send request to API
resp = post_request(base_url, uri, post_body, access_key, secret_key)
now = datetime.datetime.now().strftime("%a %b %d %H:%M:%S %Y")
# Process response
if resp != 'error':
resp_body = resp[0]
resp_headers = resp[1]
content_type = resp_headers['Content-Type']
# End if response is JSON as there is no log file to download
if content_type == 'application/json':
log.info('No more logs available')
return False
# Process log file
elif content_type == 'application/octet-stream':
file_name = resp_headers['Content-Disposition'].split('=\"')
file_name = file_name[1][:-1]
# Save files to LOG_FILE_PATH
write_file(os.path.join(LOG_FILE_PATH, file_name), resp_body)
# Save mc-siem-token page token to check point directory
write_file(checkpoint_filename, resp_headers['mc-siem-token'])
try:
if syslog_output is True:
for filename in os.listdir(LOG_FILE_PATH):
file_creation_time = time.ctime(os.path.getctime(LOG_FILE_PATH + "/" + filename))
if now < file_creation_time or now == file_creation_time:
log.info('Loading file: ' + filename + ' to output to ' + syslog_server + ':' + str(syslog_port))
with open(file=os.path.join(LOG_FILE_PATH, filename), mode='r', encoding='utf-8') as log_file:
lines = log_file.read().splitlines()
for line in lines:
syslogger.info(line)
log.info('Syslog output completed for file ' + filename)
except Exception as e:
log.error('Unexpected error writing to syslog. Exception: ' + str(e))
# return true to continue loop
return True
else:
# Handle errors
log.error('Unexpected response')
for header in resp_headers:
log.error(header)
return False
def run_script():
# discover base URL
try:
base_url = get_base_url(email_address=EMAIL_ADDRESS)
except Exception as e:
log.error('Error discovering base url for ' + EMAIL_ADDRESS + ' . Exception: ' + str(e))
quit()
# Request log data in a loop until there are no more logs to collect
try:
log.info('Getting MTA log data')
while get_mta_siem_logs(checkpoint_dir=CHK_POINT_DIR, base_url=base_url, access_key=ACCESS_KEY,
secret_key=SECRET_KEY) is True:
log.info('Getting more MTA log files')
except Exception as e:
log.error('Unexpected error getting MTA logs ' + (str(e)))
file_number = len([name for name in os.listdir(LOG_FILE_PATH) if os.path.isfile(name)])
if delete_files or file_number >= log_file_threshold:
for filename in os.listdir(LOG_FILE_PATH):
file_path = os.path.join(LOG_FILE_PATH, filename)
try:
if os.path.isfile(file_path) or os.path.islink(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
except Exception as e:
print('Failed to delete %s. Reason: %s' % (file_path, e))
quit()
# Run script
run_script()
It seems like it may potentially be a race condition but I am not sure how to confirm since I can't reproduce it. I notice that SumoLogic has a modified version of this script as well with a different methodology for managing the files/paths. If this script works better than the main sample script above, would anybody be able to explain WHY? I haven't had any issues with it yet.
https://github.com/SumoLogic/sumologic-content/blob/master/MimeCast/SumoLogic-Mimecast-Data-Collection/siem_collection.py
I have 3 lat/lngs and a URL that I am constructing. My output should be 3 URLs, for each lat/lng, I am receiving 6. What do I need to change in my code below to print 3 URLs instead of 6? The try block and first for loops start are error handling, if the script fails, try twice. I am getting 6 values even when the script does not fail.
def main():
for i in range(2):
for attempts in range (1):
try:
for lat, lon, id_, startDate, endDate in zip(latval, lonval, idVal, startDayValStr, endDayValStr):
time_param = '?start='+ startDate +'T'+ "01:00" + 'Z' + '&end='+ endDate + 'T' + "23:00" + 'Z'
hrPrecip = 'https://insight.api.wdtinc.com/hourly-precipitation/' + str(lat)+'/' + str(lon) + time_param + '&unit=inches'
print hrPrecip
except Exception as e:
attempts = i + 1
sleep(30)
print "now trying attempt #" + " " + str(attempts) + " " + "for error" " " + str(e)
print(traceback.format_exc())
logging.exception(e)
msg = "PYTHON ERRORS:\nTraceback info:\n" + traceback.format_exc()
logging.debug("sending error email")
emailserver.email_error_msg(msg)
if __name__ == "__main__":
main()
Output:
https://insight.api.wdtinc.com/hourly-precipitation/44.797207/-95.175648?start=2019-05-13T01:00Z&end=2019-05-13T23:00Z&unit=inches
https://insight.api.wdtinc.com/hourly-precipitation/44.796302/-95.180946?start=2019-05-13T01:00Z&end=2019-05-13T23:00Z&unit=inches
https://insight.api.wdtinc.com/hourly-precipitation/44.778728/-95.23022?start=2019-05-13T01:00Z&end=2019-05-13T23:00Z&unit=inches
https://insight.api.wdtinc.com/hourly-precipitation/44.797207/-95.175648?start=2019-05-13T01:00Z&end=2019-05-13T23:00Z&unit=inches
https://insight.api.wdtinc.com/hourly-precipitation/44.796302/-95.180946?start=2019-05-13T01:00Z&end=2019-05-13T23:00Z&unit=inches
https://insight.api.wdtinc.com/hourly-precipitation/44.778728/-95.23022?start=2019-05-13T01:00Z&end=2019-05-13T23:00Z&unit=inches`
It could be the try: and except: block. failing in the first. I am guessing you do not need the second loop with attempt in range(1). In fact you do not need any loop here.
I test my proxies with this script and it gives many working but when i test the "working" proxies with an other proxy checker only a very little amount works.
Here is the part that's checking if the proxy works:
def process(self, task):
global alive
global dead
global tested
proxy = task
log_msg = str("Trying HTTP proxy%21s " % proxy)
cj = http.cookiejar.CookieJar()
opener = urllib.request.build_opener(
urllib.request.HTTPCookieProcessor(cj),
urllib.request.HTTPRedirectHandler(),
urllib.request.ProxyHandler({'http': proxy})
)
try:
t1 = time.time()
response = opener.open(test_url, timeout=timeout_value).read()
tested += 1
t2 = time.time()
except Exception as e:
log_msg += "%s " % fail_msg
print(Fore.LIGHTRED_EX + log_msg)
dead += 1
tested += 1
return None
log_msg += ok_msg + "Response time: %d" % (int((t2-t1)*1000))
print(Fore.LIGHTGREEN_EX + log_msg)
text_file = open(out_filename, "a")
text_file.write(proxy + "\r\n")
text_file.close()
alive += 1
I can upload document to GDrive, but I want it work in overwrite mode, which means the second time upload will overwrite the same file.
Code snippet as below:
import subprocess
import re
import gdata.client, gdata.docs.client, gdata.docs.data
import atom.data
import os
import time
#----------------------------------------#
def main():
filename = "test.xls"
mimetype = "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet"
username = "mymail#gmail.com"
password = "mypassword"
upload_gdoc(filename,mimetype,username,password)
return
def upload_gdoc(filename,mimetype,username,password):
try:
fdin = open(filename)
except IOError, e:
print 'ERROR: Unable to open ' + filename + ': ' + e[1]
return
file_size = os.path.getsize(fdin.name)
docsclient = gdata.docs.client.DocsClient()
try:
docsclient.ClientLogin(username, password, docsclient.source);
except (gdata.client.BadAuthentication, gdata.client.Error), e:
print 'ERROR: ' + str(e)
return
except:
print 'ERROR: Unable to login'
return
# The default root collection URI
uri = 'https://docs.google.com/feeds/upload/create-session/default/private/full'
uri += '?convert=true'
t1 = time.time()
uploader = gdata.client.ResumableUploader(docsclient,fdin,mimetype,file_size,chunk_size=1048576,desired_class=gdata.data.GDEntry)
new_entry = uploader.UploadFile(uri,entry=gdata.data.GDEntry(title=atom.data.Title(text=os.path.basename(fdin.name))))
t2 = time.time()
print 'Uploaded', '{0:.2f}'.format(file_size / 1024.0 / 1024.0) + ' MiB in ' + str(round(t2 - t1, 2)) + ' secs'
fdin.close()
return
if __name__ == "__main__":
main()
Now each time upload, it will create a new file with the same file name.
I have the following script (below). which will return the status code of URLs. It loops through a file and tries to connect to each host. Only problem is that it obviously stops looping when it reaches an exception.
I have tried numerous things to put the how of it in a loop, but to no avail. Any thoughts?
import urllib
import sys
import time
hostsFile = "webHosts.txt"
try:
f = file(hostsFile)
while True:
line = f.readline().strip()
epoch = time.time()
epoch = str(epoch)
if len(line) == 0:
break
conn = urllib.urlopen(line)
print epoch + ": Connection successful, status code for " + line + " is " + str(conn.code) + "\n"
except IOError:
epoch = time.time()
epoch = str(epoch)
print epoch + ": Connection unsuccessful, unable to connect to server, potential routing issues\n"
sys.exit()
else:
f.close()
EDIT:
I've come up with this in the mean-time, any issues with this? (i'm still learning :p )...
f = file(hostsFile)
while True:
line = f.readline().strip()
epoch = time.time()
epoch = str(epoch)
if len(line) == 0:
break
try:
conn = urllib.urlopen(line)
print epoch + ": Connection successful, status code for " + line + " is " + str(conn.code) + "\n"
except IOError:
print epoch + "connection unsuccessful"
Thanks,
MHibbin
You could handle the exception where it is raised. Also, use a context manager when opening files, it makes for simpler code.
with open(hostsFile, 'r') as f:
for line in f:
line = line.strip()
if not line:
continue
epoch = str(time.time())
try:
conn = urllib.urlopen(line)
print epoch + ": Connection successful, status code for " + line + " is " + str(conn.code) + "\n"
except IOError:
print epoch + ": Connection unsuccessful, unable to connect to server, potential routing issues\n"
You need to handle exception raised by urllib.urlopen(line), something like this.
try:
f = file(hostsFile)
while True:
line = f.readline().strip()
epoch = time.time()
epoch = str(epoch)
if len(line) == 0:
break
try:
conn = urllib.urlopen(line)
except IOError:
print "Exception occured"
pass
except IOError:
epoch = time.time()
epoch = str(epoch)
print epoch + ": Connection unsuccessful, unable to connect to server, potential routing issues\n"
sys.exit()
else:
f.close()
You could try catching the exception inside the while loop as something like this.
try:
f = file(hostsFile)
while True:
line = f.readline().strip()
epoch = time.time()
epoch = str(epoch)
if len(line) == 0:
break
try:
conn = urllib.urlopen(line)
print epoch + ": Connection successful, status code for " + line + " is " + str(conn.code) + "\n"
except:
epoch = time.time()
epoch = str(epoch)
print epoch + ": Connection unsuccessful, unable to connect to server, potential routing issues\n"
except IOError:
pass
else:
f.close()