I am trying to troubleshoot a script for Mimecast's API. The script runs fine for the most part, but a few times, I have noticed that it stops pulling logs and generally appears to be a hung process. After restarting the script and manually pushing logs to the syslog server, it starts working again without issue. I am not able to reproduce this issue at will.
The script is supposed to do the following:
Authenticate against Mimecast's API
Sign responses
download, extract and save log files to log dir
utilize a tokenized header to determine which file was downloaded in the last request. Should save the token ID within a file in the checkpoint directory
Push files to remote syslog server
Output any errors and info to console
Below is the sample code from Mimecast.
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging.handlers
import json
import os
import requests
import base64
import uuid
import datetime
import hashlib
import shutil
import hmac
import time
from zipfile import ZipFile
import io
# Set up variables
APP_ID = "YOUR DEVELOPER APPLICATION ID"
APP_KEY = "YOUR DEVELOPER APPLICATION KEY"
URI = "/api/audit/get-siem-logs"
EMAIL_ADDRESS = 'EMAIL ADDRESS OF YOUR ADMINISTRATOR'
ACCESS_KEY = 'ACCESS KEY FOR YOUR ADMINISTRATOR'
SECRET_KEY = 'SECRET KEY FOR YOUR ADMINISTRATOR'
LOG_FILE_PATH = "FULLY QUALIFIED PATH TO FOLDER TO WRITE LOGS"
CHK_POINT_DIR = 'FULLY QUALIFIED PATH TO FOLDER TO WRITE PAGE TOKEN'
# Set True to output to syslog, false to only save to file
syslog_output = False
# Enter the IP address or hostname of your syslog server
syslog_server = 'localhost'
# Change this to override default port
syslog_port = 514
# delete files after fetching
delete_files = True
# Set threshold in number of files in log file directory
log_file_threshold = 10000
# Set up logging (in this case to terminal)
log = logging.getLogger(__name__)
log.root.setLevel(logging.DEBUG)
log_formatter = logging.Formatter('%(levelname)s %(message)s')
log_handler = logging.StreamHandler()
log_handler.setFormatter(log_formatter)
log.addHandler(log_handler)
# Set up syslog output
syslog_handler = logging.handlers.SysLogHandler(address=(syslog_server, syslog_port))
syslog_formatter = logging.Formatter('%(message)s')
syslog_handler.setFormatter(syslog_formatter)
syslogger = logging.getLogger(__name__)
syslogger = logging.getLogger('SysLogger')
syslogger.addHandler(syslog_handler)
# Supporting methods
def get_hdr_date():
return datetime.datetime.utcnow().strftime("%a, %d %b %Y %H:%M:%S UTC")
def read_file(file_name):
try:
with open(file_name, 'r') as f:
data = f.read()
return data
except Exception as e:
log.error('Error reading file ' + file_name + '. Cannot continue. Exception: ' + str(e))
quit()
def write_file(file_name, data_to_write):
if '.zip' in file_name:
try:
byte_content = io.BytesIO(data_to_write)
zip_file = ZipFile(byte_content)
zip_file.extractall(LOG_FILE_PATH)
except Exception as e:
log.error('Error writing file ' + file_name + '. Cannot continue. Exception: ' + str(e))
quit()
else:
try:
with open(file_name, 'w') as f:
f.write(data_to_write)
except Exception as e:
log.error('Error writing file ' + file_name + '. Cannot continue. Exception: ' + str(e))
quit()
def get_base_url(email_address):
# Create post body for request
post_body = dict()
post_body['data'] = [{}]
post_body['data'][0]['emailAddress'] = email_address
# Create variables required for request headers
request_id = str(uuid.uuid4())
request_date = get_hdr_date()
headers = {'x-mc-app-id': APP_ID, 'x-mc-req-id': request_id, 'x-mc-date': request_date}
# Send request to API
log.debug('Sending request to https://api.mimecast.com/api/discover-authentication with request Id: ' +
request_id)
try:
r = requests.post(url='https://api.mimecast.com/api/login/discover-authentication',
data=json.dumps(post_body), headers=headers)
# Handle Rate Limiting
if r.status_code == 429:
log.warning('Rate limit hit. sleeping for ' + str(r.headers['X-RateLimit-Reset'] * 1000))
time.sleep(r.headers['X-RateLimit-Reset'] * 1000)
except Exception as e:
log.error('Unexpected error getting base url. Cannot continue.' + str(e))
quit()
# Handle error from API
if r.status_code != 200:
log.error('Request returned with status code: ' + str(r.status_code) + ', response body: ' +
r.text + '. Cannot continue.')
quit()
# Load response body as JSON
resp_data = json.loads(r.text)
# Look for api key in region region object to get base url
if 'region' in resp_data["data"][0]:
base_url = resp_data["data"][0]["region"]["api"].split('//')
base_url = base_url[1]
else:
# Handle no region found, likely the email address was entered incorrectly
log.error(
'No region information returned from API, please check the email address.'
'Cannot continue')
quit()
return base_url
def post_request(base_url, uri, post_body, access_key, secret_key):
# Create variables required for request headers
request_id = str(uuid.uuid4())
request_date = get_hdr_date()
unsigned_auth_header = '{date}:{req_id}:{uri}:{app_key}'.format(
date=request_date,
req_id=request_id,
uri=uri,
app_key=APP_KEY
)
hmac_sha1 = hmac.new(
base64.b64decode(secret_key),
unsigned_auth_header.encode(),
digestmod=hashlib.sha1).digest()
sig = base64.encodebytes(hmac_sha1).rstrip()
headers = {
'Authorization': 'MC ' + access_key + ':' + sig.decode(),
'x-mc-app-id': APP_ID,
'x-mc-date': request_date,
'x-mc-req-id': request_id,
'Content-Type': 'application/json'
}
try:
# Send request to API
log.debug('Sending request to https://' + base_url + uri + ' with request Id: ' + request_id)
r = requests.post(url='https://' + base_url + uri, data=json.dumps(post_body), headers=headers)
# Handle Rate Limiting
if r.status_code == 429:
log.warning('Rate limit hit. sleeping for ' + str(r.headers['X-RateLimit-Reset'] * 1000))
time.sleep(r.headers['X-RateLimit-Reset'] * 1000)
r = requests.post(url='https://' + base_url + uri, data=json.dumps(post_body), headers=headers)
# Handle errors
except Exception as e:
log.error('Unexpected error connecting to API. Exception: ' + str(e))
return 'error'
# Handle errors from API
if r.status_code != 200:
log.error('Request to ' + uri + ' with , request id: ' + request_id + ' returned with status code: ' +
str(r.status_code) + ', response body: ' + r.text)
return 'error'
# Return response body and response headers
return r.content, r.headers
def get_mta_siem_logs(checkpoint_dir, base_url, access_key, secret_key):
uri = "/api/audit/get-siem-logs"
# Set checkpoint file name to store page token
checkpoint_filename = os.path.join(checkpoint_dir, 'get_mta_siem_logs_checkpoint')
# Build post body for request
post_body = dict()
post_body['data'] = [{}]
post_body['data'][0]['type'] = 'MTA'
post_body['data'][0]['compress'] = True
if os.path.exists(checkpoint_filename):
post_body['data'][0]['token'] = read_file(checkpoint_filename)
# Send request to API
resp = post_request(base_url, uri, post_body, access_key, secret_key)
now = datetime.datetime.now().strftime("%a %b %d %H:%M:%S %Y")
# Process response
if resp != 'error':
resp_body = resp[0]
resp_headers = resp[1]
content_type = resp_headers['Content-Type']
# End if response is JSON as there is no log file to download
if content_type == 'application/json':
log.info('No more logs available')
return False
# Process log file
elif content_type == 'application/octet-stream':
file_name = resp_headers['Content-Disposition'].split('=\"')
file_name = file_name[1][:-1]
# Save files to LOG_FILE_PATH
write_file(os.path.join(LOG_FILE_PATH, file_name), resp_body)
# Save mc-siem-token page token to check point directory
write_file(checkpoint_filename, resp_headers['mc-siem-token'])
try:
if syslog_output is True:
for filename in os.listdir(LOG_FILE_PATH):
file_creation_time = time.ctime(os.path.getctime(LOG_FILE_PATH + "/" + filename))
if now < file_creation_time or now == file_creation_time:
log.info('Loading file: ' + filename + ' to output to ' + syslog_server + ':' + str(syslog_port))
with open(file=os.path.join(LOG_FILE_PATH, filename), mode='r', encoding='utf-8') as log_file:
lines = log_file.read().splitlines()
for line in lines:
syslogger.info(line)
log.info('Syslog output completed for file ' + filename)
except Exception as e:
log.error('Unexpected error writing to syslog. Exception: ' + str(e))
# return true to continue loop
return True
else:
# Handle errors
log.error('Unexpected response')
for header in resp_headers:
log.error(header)
return False
def run_script():
# discover base URL
try:
base_url = get_base_url(email_address=EMAIL_ADDRESS)
except Exception as e:
log.error('Error discovering base url for ' + EMAIL_ADDRESS + ' . Exception: ' + str(e))
quit()
# Request log data in a loop until there are no more logs to collect
try:
log.info('Getting MTA log data')
while get_mta_siem_logs(checkpoint_dir=CHK_POINT_DIR, base_url=base_url, access_key=ACCESS_KEY,
secret_key=SECRET_KEY) is True:
log.info('Getting more MTA log files')
except Exception as e:
log.error('Unexpected error getting MTA logs ' + (str(e)))
file_number = len([name for name in os.listdir(LOG_FILE_PATH) if os.path.isfile(name)])
if delete_files or file_number >= log_file_threshold:
for filename in os.listdir(LOG_FILE_PATH):
file_path = os.path.join(LOG_FILE_PATH, filename)
try:
if os.path.isfile(file_path) or os.path.islink(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
except Exception as e:
print('Failed to delete %s. Reason: %s' % (file_path, e))
quit()
# Run script
run_script()
It seems like it may potentially be a race condition but I am not sure how to confirm since I can't reproduce it. I notice that SumoLogic has a modified version of this script as well with a different methodology for managing the files/paths. If this script works better than the main sample script above, would anybody be able to explain WHY? I haven't had any issues with it yet.
https://github.com/SumoLogic/sumologic-content/blob/master/MimeCast/SumoLogic-Mimecast-Data-Collection/siem_collection.py
Related
I'm trying to get a database backup from odoo erp using ftputil
this is the code
# -*- coding: utf-8 -*-
from odoo import models, fields, api, tools, _
from odoo.exceptions import Warning
import odoo
from odoo.http import content_disposition
import logging
_logger = logging.getLogger(__name__)
import os
import datetime
try:
from xmlrpc import client as xmlrpclib
except ImportError:
import xmlrpclib
import time
import base64
import socket
try:
import ftputil
except ImportError:
raise ImportError(
'This module needs ftputil to automatically write backups to the FTP through ftp. Please install ftputil on your system. (sudo pip3 install ftputil)')
def execute(connector, method, *args):
res = False
try:
res = getattr(connector, method)(*args)
except socket.error as error:
_logger.critical('Error while executing the method "execute". Error: ' + str(error))
raise error
return res
class db_backup(models.Model):
_name = 'db.backup'
#api.multi
def get_db_list(self, host, port, context={}):
uri = 'http://' + host + ':' + port
conn = xmlrpclib.ServerProxy(uri + '/xmlrpc/db')
db_list = execute(conn, 'list')
return db_list
#api.multi
def _get_db_name(self):
dbName = self._cr.dbname
return dbName
# Columns for local server configuration
host = fields.Char('Host', required=True, default='localhost')
port = fields.Char('Port', required=True, default=8069)
name = fields.Char('Database', required=True, help='Database you want to schedule backups for',
default=_get_db_name)
folder = fields.Char('Backup Directory', help='Absolute path for storing the backups', required='True',
default='/odoo/backups')
backup_type = fields.Selection([('zip', 'Zip'), ('dump', 'Dump')], 'Backup Type', required=True, default='zip')
autoremove = fields.Boolean('Auto. Remove Backups',
help='If you check this option you can choose to automaticly remove the backup after xx days')
days_to_keep = fields.Integer('Remove after x days',
help="Choose after how many days the backup should be deleted. For example:\nIf you fill in 5 the backups will be removed after 5 days.",
required=True)
# Columns for external server (SFTP)
sftp_write = fields.Boolean('Write to external server with sftp',
help="If you check this option you can specify the details needed to write to a remote server with SFTP.")
sftp_path = fields.Char('Path external server',
help='The location to the folder where the dumps should be written to. For example /odoo/backups/.\nFiles will then be written to /odoo/backups/ on your remote server.')
sftp_host = fields.Char('IP Address SFTP Server',
help='The IP address from your remote server. For example 192.168.0.1')
sftp_port = fields.Integer('SFTP Port', help='The port on the FTP server that accepts SSH/SFTP calls.', default=22)
sftp_user = fields.Char('Username SFTP Server',
help='The username where the SFTP connection should be made with. This is the user on the external server.')
sftp_password = fields.Char('Password User SFTP Server',
help='The password from the user where the SFTP connection should be made with. This is the password from the user on the external server.')
days_to_keep_sftp = fields.Integer('Remove SFTP after x days',
help='Choose after how many days the backup should be deleted from the FTP server. For example:\nIf you fill in 5 the backups will be removed after 5 days from the FTP server.',
default=30)
send_mail_sftp_fail = fields.Boolean('Auto. E-mail on backup fail',
help='If you check this option you can choose to automaticly get e-mailed when the backup to the external server failed.')
email_to_notify = fields.Char('E-mail to notify',
help='Fill in the e-mail where you want to be notified that the backup failed on the FTP.')
#api.multi
def _check_db_exist(self):
self.ensure_one()
db_list = self.get_db_list(self.host, self.port)
if self.name in db_list:
return True
return False
_constraints = [(_check_db_exist, _('Error ! No such database exists!'), [])]
#api.multi
def test_sftp_connection(self, context=None):
self.ensure_one()
# Check if there is a success or fail and write messages
messageTitle = ""
messageContent = ""
error = ""
has_failed = False
for rec in self:
db_list = self.get_db_list(rec.host, rec.port)
pathToWriteTo = rec.sftp_path
ipHost = rec.sftp_host
portHost = rec.sftp_port
usernameLogin = rec.sftp_user
passwordLogin = rec.sftp_password
# Connect with external server over SFTP, so we know sure that everything works.
try:
with ftputil.FTPHost(ipHost, usernameLogin, passwordLogin) as s:
messageTitle = _("Connection Test Succeeded!\nEverything seems properly set up for FTP back-ups!")
except Exception as e:
_logger.critical('There was a problem connecting to the remote ftp: ' + str(e))
error += str(e)
has_failed = True
messageTitle = _("Connection Test Failed!")
if len(rec.sftp_host) < 8:
messageContent += "\nYour IP address seems to be too short.\n"
messageContent += _("Here is what we got instead:\n")
finally:
if s:
s.close()
if has_failed:
raise Warning(messageTitle + '\n\n' + messageContent + "%s" % str(error))
else:
raise Warning(messageTitle + '\n\n' + messageContent)
#api.model
def schedule_backup(self):
conf_ids = self.search([])
for rec in conf_ids:
db_list = self.get_db_list(rec.host, rec.port)
if rec.name in db_list:
try:
if not os.path.isdir(rec.folder):
os.makedirs(rec.folder)
except:
raise
# Create name for dumpfile.
bkp_file = '%s_%s.%s' % (time.strftime('%Y_%m_%d_%H_%M_%S'), rec.name, rec.backup_type)
file_path = os.path.join(rec.folder, bkp_file)
uri = 'http://' + rec.host + ':' + rec.port
conn = xmlrpclib.ServerProxy(uri + '/xmlrpc/db')
bkp = ''
try:
# try to backup database and write it away
fp = open(file_path, 'wb')
odoo.service.db.dump_db(rec.name, fp, rec.backup_type)
fp.close()
except Exception as error:
_logger.debug(
"Couldn't backup database %s. Bad database administrator password for server running at http://%s:%s" % (
rec.name, rec.host, rec.port))
_logger.debug("Exact error from the exception: " + str(error))
continue
else:
_logger.debug("database %s doesn't exist on http://%s:%s" % (rec.name, rec.host, rec.port))
# Check if user wants to write to SFTP or not.
if rec.sftp_write is True:
try:
# Store all values in variables
dir = rec.folder
pathToWriteTo = rec.sftp_path
ipHost = rec.sftp_host
portHost = rec.sftp_port
usernameLogin = rec.sftp_user
passwordLogin = rec.sftp_password
_logger.debug('sftp remote path: %s' % pathToWriteTo)
try:
with ftputil.FTPHost(ipHost, usernameLogin, passwordLogin) as sftp:
pass
except Exception as error:
_logger.critical('Error connecting to remote server! Error: ' + str(error))
try:
sftp.chdir(pathToWriteTo)
except IOError:
# Create directory and subdirs if they do not exist.
currentDir = ''
for dirElement in pathToWriteTo.split('/'):
currentDir += dirElement + '/'
try:
sftp.chdir(currentDir)
except:
_logger.info('(Part of the) path didn\'t exist. Creating it now at ' + currentDir)
# Make directory and then navigate into it
sftp.mkdir(currentDir, 777)
sftp.chdir(currentDir)
pass
sftp.chdir(pathToWriteTo)
# Loop over all files in the directory.
for f in os.listdir(dir):
if rec.name in f:
fullpath = os.path.join(dir, f)
if os.path.isfile(fullpath):
try:
sftp.StatResult(os.path.join(pathToWriteTo, f))
_logger.debug(
'File %s already exists on the remote FTP Server ------ skipped' % fullpath)
# This means the file does not exist (remote) yet!
except IOError:
try:
# sftp.put(fullpath, pathToWriteTo)
sftp.upload(fullpath, os.path.join(pathToWriteTo, f))
_logger.info('Copying File % s------ success' % fullpath)
except Exception as err:
_logger.critical(
'We couldn\'t write the file to the remote server. Error: ' + str(err))
# Navigate in to the correct folder.
sftp.chdir(pathToWriteTo)
# Loop over all files in the directory from the back-ups.
# We will check the creation date of every back-up.
for file in sftp.listdir(pathToWriteTo):
if rec.name in file:
# Get the full path
fullpath = os.path.join(pathToWriteTo, file)
# Get the timestamp from the file on the external server
timestamp = sftp.StatResult(fullpath).st_atime
createtime = datetime.datetime.fromtimestamp(timestamp)
now = datetime.datetime.now()
delta = now - createtime
# If the file is older than the days_to_keep_sftp (the days to keep that the user filled in on the Odoo form it will be removed.
if delta.days >= rec.days_to_keep_sftp:
# Only delete files, no directories!
if sftp.isfile(fullpath) and (".dump" in file or '.zip' in file):
_logger.info("Delete too old file from SFTP servers: " + file)
sftp.unlink(file)
# Close the SFTP session.
sftp.close()
except Exception as e:
_logger.debug('Exception! We couldn\'t back up to the FTP server..')
# At this point the SFTP backup failed. We will now check if the user wants
# an e-mail notification about this.
if rec.send_mail_sftp_fail:
try:
ir_mail_server = self.env['ir.mail_server']
message = "Dear,\n\nThe backup for the server " + rec.host + " (IP: " + rec.sftp_host + ") failed.Please check the following details:\n\nIP address SFTP server: " + rec.sftp_host + "\nUsername: " + rec.sftp_user + "\nPassword: " + rec.sftp_password + "\n\nError details: " + tools.ustr(
e) + "\n\nWith kind regards"
msg = ir_mail_server.build_email("auto_backup#" + rec.name + ".com", [rec.email_to_notify],
"Backup from " + rec.host + "(" + rec.sftp_host + ") failed",
message)
ir_mail_server.send_email(self._cr, self._uid, msg)
except Exception:
pass
"""
Remove all old files (on local server) in case this is configured..
"""
if rec.autoremove:
dir = rec.folder
# Loop over all files in the directory.
for f in os.listdir(dir):
fullpath = os.path.join(dir, f)
# Only delete the ones which are from the current database
# (Makes it possible to save different databases in the same folder)
if rec.name in fullpath:
timestamp = os.stat(fullpath).st_ctime
createtime = datetime.datetime.fromtimestamp(timestamp)
now = datetime.datetime.now()
delta = now - createtime
if delta.days >= rec.days_to_keep:
# Only delete files (which are .dump and .zip), no directories.
if os.path.isfile(fullpath) and (".dump" in f or '.zip' in f):
_logger.info("Delete local out-of-date file: " + fullpath)
os.remove(fullpath)
I cant get pass this logger "_logger.critical(
'We couldn't write the file to the remote server. Error: ' + str(err))"
I have the following script working properly. It is to check IP information from netbox API. I would like to know what to add so I can import a list of IPs and run it against the script:
#!/bin/python3
import requests
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning) #Disable warning for SSL Error
ip_address = input("Enter the IP Address you want to search: ")
apiBaseUrl = "https://netbox.local/api"
headers = {
'Content-Type': 'application/json',
'Accept': 'application/json',
'Authorization': 'Token 5c915999999998ad82112b3b5880199769894421' #Here you can add your own token
}
def get_hostInterfaceDescription(ip4):
resp = requests.get(apiBaseUrl + '/ipam/ip-addresses/?q=' + ip4,headers=headers,verify=False).json()
return resp['results'][0]["description"] #this gets the description information
try:
desc = get_hostInterfaceDescription(ip_address)
print("")
print("Description found in Netbox: " + desc)
except TypeError or IndexError:
print("Description Not found")
def get_hostInterfaceTenant(ip4):
resp = requests.get(apiBaseUrl + '/ipam/ip-addresses/?q=' + ip4,headers=headers,verify=False).json()
return resp['results'][0]["tenant"]["name"] #this gets the description information
try:
tenant = get_hostInterfaceTenant(ip_address)
print("")
print("Tenant found in Netbox: " + tenant)
except TypeError or IndexError:
print("Tenant Not found")
def get_hostInterfaceVRF(ip4):
resp = requests.get(apiBaseUrl + '/ipam/ip-addresses/?q=' + ip4, headers=headers, verify=False).json()
return resp['results'][0]["tenant"]["name"] # this gets the description information
try:
vrf = get_hostInterfaceVRF(ip_address)
print("")
print("VRF found in Netbox: " + vrf)
except TypeError or IndexError:
print("VRF Not Found")
Loading a text file and looping over its contents is simple in Python:
with open("ip_list.txt") as ip_list:
for address in ip_list:
address = address.strip() # remove trailing newline
do_something_with(address)
In your case, do_something_with might look like this:
def do_something_with(ip4):
try:
desc = get_hostInterfaceDescription(ip4)
...
except ...:
...
try:
tenant = get_hostInterfaceTenant(ip4)
...
except ...:
...
...
i need a script to make it like a cpanel checker, with more than 1 url and the url is stored in a txt file.
usage : python script.py list.txt
format in file list.txt : https://demo.cpanel.net:2083|democom|DemoCoA5620
this is my code but it doesn't work, can someone help me?
Thanks.
import requests, sys
from multiprocessing.dummy import Pool as ThreadPool
try:
with open(sys.argv[1], 'r') as f:
list_data = [line.strip() for line in f if line.strip()]
except IOError:
pass
def cpanel(url):
try:
data = {'user':'democom', 'pass':'DemoCoA5620'}
r = requests.post(url, data=data)
if r.status_code==200:
print "login success"
else:
print "login failed"
except:
pass
def chekers(url):
try:
cpanel(url)
except:
pass
def Main():
try:
start = timer()
pp = ThreadPool(25)
pr = pp.map(chekers, list_data)
print('Time: ' + str(timer() - start) + ' seconds')
except:
pass
if __name__ == '__main__':
Main()
I fixed your code in a way that it will return an actual array containing a boolean array indicating the success of the cpanel function.
from __future__ import print_function
import requests
from multiprocessing.pool import ThreadPool
try:
list_data = ["https://demo.cpanel.net:2083|democom|DemoCoA5620",
"https://demo.cpanel.net:2083|UserDoesNotExist|WRONGPASSWORD",
]
except IOError:
pass
def cpanel(url):
try:
# try to split that url to get username / password
try:
url, username, password = url.split('|')
except Exception as e:
print("Url {} seems to have wrong format. Concrete error: {}".format(url, e))
return False
# build the correct url
url += '/login/?login_only=1'
# build post parameters
params = {'user': username,
'pass': password}
# make request
r = requests.post(url, params)
if r.status_code==200:
print("login for user {} success".format(username))
return True
else:
print("login for user {} failed due to Status Code {} and message \"{}\"".format(username, r.status_code, r.reason))
return False
except Exception as e:
print("Error occured for url {} ".format(e))
return False
def chekers(url):
return cpanel(url)
def Main():
try:
# start = timer()
pp = ThreadPool(1)
pr = pp.map(chekers, list_data)
print(pr)
# print('Time: ' + str(timer() - start) + ' seconds')
except:
pass
if __name__ == '__main__':
Main()
Output:
login for user democom success
login for user UserDoesNotExist failed due to Status Code 401 and message "Access Denied"
[True, False]
Be aware that I replaced your file read operation by some fixed urls.
Since you use request.post I guess you actually want to POST something to that urls. Your code does not do that. If you just want to send a request, use the requests.get method.
See the official documentation for the requests packet: https://2.python-requests.org/en/master/user/quickstart/#make-a-request for more details.
Also note that
"but it doesn't work"
is NOT a question.
I'm coding a website cloner in python, It is doing fine as well for most files but I have found a challenge in getting the url of background images eg
<div style="background-image: url(images/banner.jpg)" >
The script detects background-image as a folder and assume the url is 'background_image: url(images/banner.jpg' .How do I set it to get the actual url.
Python 2.7
import urllib2
import sys
import socket
import os
import re
socket.setdefaulttimeout(15)
dataTypesToDownload = [".jpg", ".jpeg", ".png", ".gif", ".ico", ".css", ".js", ".html"]
url = 'http://example.com/'
pathbase = 'theme'
if "http://" not in url and "https://" not in url:
url = "http://"+url
try:
os.mkdir(pathbase)
except OSError:
pass
file = open(pathbase + "/index.html", "w")
try:
content = urllib2.urlopen(url).read()
except urllib2.URLError as e:
print "An error occured: " + str(e.reason)
exit()
resources = re.split("=\"|='", content)
first = False
for resource in resources:
if first == False:
first = True
continue
resource = re.split("\"|'", resource)[0]
if any(s in resource for s in dataTypesToDownload):
print "Downloading " + resource
try:
path = resource.split("/")
if len(path) != 1:
path.pop(len(path) - 1)
trail = "./" + pathbase + "/"
for folder in path:
trail += folder+"/"
try:
os.mkdir(trail)
except OSError:
pass
except IOError:
pass
try:
if "?" in resource:
download = open(pathbase + "/"+resource.split("?")[len(resource.split("?")) - 2], "w")
else:
download = open(pathbase + "/"+resource, "w")
print url+"/"+resource
dContent = urllib2.urlopen(url+"/"+resource).read()
except urllib2.URLError as e:
print "An error occured: " + str(e.reason)
download.close()
continue
except IOError:
pass
continue
download.write(dContent)
download.close()
print "Downloaded!"
file.write(content)
file.close()
I expect it when it encounter style="background-image: url(images/banner.jpg),
It should set resource to be images/banner.jpg. But it is setting the resource as background-image: url(images/images.jpg
I can upload document to GDrive, but I want it work in overwrite mode, which means the second time upload will overwrite the same file.
Code snippet as below:
import subprocess
import re
import gdata.client, gdata.docs.client, gdata.docs.data
import atom.data
import os
import time
#----------------------------------------#
def main():
filename = "test.xls"
mimetype = "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet"
username = "mymail#gmail.com"
password = "mypassword"
upload_gdoc(filename,mimetype,username,password)
return
def upload_gdoc(filename,mimetype,username,password):
try:
fdin = open(filename)
except IOError, e:
print 'ERROR: Unable to open ' + filename + ': ' + e[1]
return
file_size = os.path.getsize(fdin.name)
docsclient = gdata.docs.client.DocsClient()
try:
docsclient.ClientLogin(username, password, docsclient.source);
except (gdata.client.BadAuthentication, gdata.client.Error), e:
print 'ERROR: ' + str(e)
return
except:
print 'ERROR: Unable to login'
return
# The default root collection URI
uri = 'https://docs.google.com/feeds/upload/create-session/default/private/full'
uri += '?convert=true'
t1 = time.time()
uploader = gdata.client.ResumableUploader(docsclient,fdin,mimetype,file_size,chunk_size=1048576,desired_class=gdata.data.GDEntry)
new_entry = uploader.UploadFile(uri,entry=gdata.data.GDEntry(title=atom.data.Title(text=os.path.basename(fdin.name))))
t2 = time.time()
print 'Uploaded', '{0:.2f}'.format(file_size / 1024.0 / 1024.0) + ' MiB in ' + str(round(t2 - t1, 2)) + ' secs'
fdin.close()
return
if __name__ == "__main__":
main()
Now each time upload, it will create a new file with the same file name.