override exception handling in class function of imported module - python

I have a definition inside of a class which handles an exception in a manner I don't like.
The class itself is inside a module, which itself is called by a module that I import.
the error handling I don't like looks like this:
class BitSharesWebsocket(Events):
#[snip]
def run_forever(self):
""" This method is used to run the websocket app continuously.
It will execute callbacks as defined and try to stay
connected with the provided APIs
"""
cnt = 0
while not self.run_event.is_set():
cnt += 1
self.url = next(self.urls)
log.debug("Trying to connect to node %s" % self.url)
try:
# websocket.enableTrace(True)
self.ws = websocket.WebSocketApp(
self.url,
on_message=self.on_message,
on_error=self.on_error,
on_close=self.on_close,
on_open=self.on_open
)
self.ws.run_forever()
except websocket.WebSocketException as exc:
if (self.num_retries >= 0 and cnt > self.num_retries):
raise NumRetriesReached()
sleeptime = (cnt - 1) * 2 if cnt < 10 else 10
if sleeptime:
log.warning(
"Lost connection to node during wsconnect(): %s (%d/%d) "
% (self.url, cnt, self.num_retries) +
"Retrying in %d seconds" % sleeptime
)
time.sleep(sleeptime)
I wish to preempt the exception here:
except websocket.WebSocketException as exc:
and handle it in my own way, namely to try a new address rather than trying the same address again and again.
I am presented with this exception when calling:
from bitshares.blockchain import Blockchain
from bitshares import BitShares
try:
chain = Blockchain(bitshares_instance=BitShares(n))
except:
print ('hello world')
pass
when n is a bad/unresponsive websocket address
I never get the 'hello world' message because the module handles the exception before I do.
the module is hosted at github here:
https://github.com/xeroc/python-bitshares/blob/9250544ca8eadf66de31c7f38fc37294c11f9548/bitsharesapi/websocket.py
I can do:
from bitsharesapi import websocket as ws
but I am not sure what to do with the module ws now that it is imported to preempt its exception handling, or if this is even the correct way to approach it.

I resolved my issue here:
chain = Blockchain(bitshares_instance=BitShares(n))
I was able to do:
chain = Blockchain(bitshares_instance=BitShares(n,num_retries=0))
I had previously tried this and assumed it wouldn't work:
chain = Blockchain(bitshares_instance=BitShares(n),num_retries=0)
*note parenthesis placement

Related

'TypeError("can't pickle generator objects"): Concurrent.future versus Asyncio

I want to concurrently execute the same instance method from each object in a list in python.
I created a DataPipe class that downloads pages and stores the result in an array. Then when I'm done downloading the links of interest of a specific domain, I yield these pages and then yield from the pages the corresponding items.
The code works pretty much as expected and now, I want to download from mutliple domains at the same time.
class DownloadCommand(Command):
def __init__(self, domain):
self.domain = domain
self.request_config = {'headers': self.domain.get_header(), 'proxy': self.domain.get_proxy()}
self.data_pipe = DataPipe(command=self)
def execute(self):
# try:
for brand, start_urls in self.domain.start_url.items():
for start_url in start_urls:
# yield from self.data_pipe.get_item_divs(brand, start_url)
yield from self.data_pipe.get_item_divs(brand, start_url)`
Currently, I'm doing this sequentially.
def scrape(self):
for domain in self.get_initial_domain_list():
yield from self.fetch_from_dom(domain)
def fetch_from_dom(self, domain):
self.set_current_domain(domain)
for start_url_values, brand, start_url in domain.command.execute():
for items in start_url_values:
yield [self.get_item_value(item_div) for item_div in items]
I tried to multithread this application using multiprocessing.pool.Pool but it does not work for instance methods. Then when I used pathos.multiprocessing import ProcessingPool it returned an error:
multiprocess.pool.MaybeEncodingError: Error sending result: '[<generator object fetch_from_dom at 0x7fa984814af0>]'. Reason: 'TypeError("can't pickle generator objects",)'
I want to switch to either asyncio or concurrent.futures but I'm not sure of which one would be better to do what I want that is if it's actually possible to do that in python(concurrently executing instance methods from objects in a list). Can anyone help?
Cant use selenium with python multiprocessing cause it clone memory. You can try to avoid more simple with threads. But this is my solution for multiprocessing
NOTE: self is my driver, cause I have custom class implemented over Selenium
#Exit function
def cleanup(self):
print("++cleanup()++")
try:
try:
self.close()
except Exception as e:
#print("except cleanup - 2 - self.close() -> %s" %e)
pass
try:
self.quit()
except Exception as e:
#print("except cleanup - 3 - self.quit() -> %s" %e)
pass
try:
self.dispose()
#print("Fake disabled dispose()")
except Exception as e:
#print("except cleanup - 4 - self.dispose() -> %s" %e)
pass
try:
self.service.process.send_signal(signal.SIGTERM)
except Exception as e:
#print("except cleanup - 1 - self.service.process.send_signal(signal.SIGTERM) -> %s" %e)
pass
except Exception as e:
print("Except - CLEANUP -> %s" %e)
#print(str(e))
pass
In script code
#Before start threads
browser.cleanup()
del browser
#Now start multiprocessing and instance browser on each subprocess

Exception bypassing in Looping call Python

import sys
from twisted.internet import reactor, defer, task
from twisted.python import log
def periodic_task():
log.msg("periodic task running")
x = 10 / 0
def periodic_task_crashed(reason):
log.err(reason, "periodic_task broken")
log.startLogging(sys.stdout)
my_task = task.LoopingCall(periodic_task)
d = my_task.start(1)
d.addErrback(periodic_task_crashed)
reactor.run()
I am getting the output and it stops the script. is ther any way to continue run the script even if there is an exception error . to be frank instead of x = 10 / 0 I am doing some api calls . but when there is an error it stops the script. But what I want is to run the script even if there is an error and check again and again.
Just handle the exeption, use a try ... except block around the code you know might fail.
def periodic_task():
log.msg("periodic task running")
try:
x = 10 / 0
except Exception as error:
# Here you should at least log the error, exceptions do not should pass silently.
pass
To ensure that the script continues to run, even if there is an error, use a try / except block.
Within the except block, in order to, as specified in your query, ensure that the code checks the error, again and again, you'd use 'function recursion' to run the function again from within the function:
def periodic_task():
log.msg("periodic task running")
try:
x = 10 / 0 # include 'API calls' here
except: # include 'exception type'
periodic_task()
Although, there are many pitfalls with function recursion so be wary!

suggestions for to improve my python function to parse Wordpress/config.php

I am writing a python script function to backup Wordpress. As part of the script i wrote a function to fetch database details from the config.php file.
Working of my function
function takes Wordpress installation location as an argument and using regex to match db_user,db_host,db_user,db_password from that file, the function will exist if can not find "config.php". I am using sys.exit(1) to exit from the function is that the proper way to exit from a function? I am pasting my function code snippet.
def parsing_db_info(location):
config_path = os.path.normpath(location+'/config.php')
if os.path.exists(config_path):
try:
regex_db = r'define\(\s*?\'DB_NAME\'\s*?,\s*?\'(.*?)\'\s*?'.group(1)
regex_user = r'define\(\s*?\'DB_USER\'\s*?,\s*?\'(.*?)\'\s*?'.group(1)
regex_pass = r'define\(\s*?\'DB_PASSWORD\'\s*?,\s*?\'(.*?)\'\s*?'.group(1)
regex_host = r'define\(\s*?\'DB_HOST\'\s*?,\s*?\'(.*?)\'\s*?'.group(1)
db_name = re.match(regex_db,config_path).group(1)
db_user = re.match(regex_user,config_path).group(1)
db_pass = re.match(regex_pass,config_path).group(1)
db_host = re.match(regex_host,config_path).group(1)
return {'dbname':db_name , 'dbuser':db_user , 'dbpass':db_pass , 'dbhost':db_host}
except exception as ERROR:
print(ERROR)
sys.exit(1)
else:
print('Not Found:',config_path)
sys.exit(1)
AFTER EDITING
def parsing_db_info(location):
config_path = os.path.normpath(location+'/wp-config.php')
try:
with open(config_path) as fh:
content = fh.read()
regex_db = r'define\(\s*?\'DB_NAME\'\s*?,\s*?\'(.*?)\'\s*?'
regex_user = r'define\(\s*?\'DB_USER\'\s*?,\s*?\'(.*?)\'\s*?'
regex_pass = r'define\(\s*?\'DB_PASSWORD\'\s*?,\s*?\'(.*?)\'\s*?'
regex_host = r'define\(\s*?\'DB_HOST\'\s*?,\s*?\'(.*?)\'\s*?'
db_name = re.search(regex_db,content).group(1)
db_user = re.search(regex_user,content).group(1)
db_pass = re.search(regex_pass,content).group(1)
db_host = re.search(regex_host,content).group(1)
return {'dbname':db_name , 'dbuser':db_user , 'dbpass':db_pass , 'dbhost':db_host}
except FileNotFoundError:
print('File Not Found,',config_path)
sys.exit(1)
except PermissionError:
print('Unable To read Permission Denied,',config_path)
sys.exit(1)
except AttributeError:
print('Parsing Error wp-config seems to be corrupt,')
sys.exit(1)
To answer your question, you shouldn't normally use sys.exit inside a function like that. Rather, get it to raise an exception in the case where it fails. Preferably, it should be an exception detailing what went wrong, or you could just let the existing exceptions propagate.
The normal rule in Python is this: deal with exceptions at the place you know how to deal with them.
In your code, you catch an exception, and then don't know what to do, so call sys.exit. Instead of this, you should:
let an exception propagate up to a top-level function which can catch it, and then call sys.exit if appropriate
wrap the exception in something more specific, and re-raise, so that a higher level function will have a specific exception to catch. For example, your function might raise a custom ConfigFileNotFound exception or ConfigFileUnparseable exception.
Also, you have put except exception, you probably mean except Exception. However, this is extremely broad, and will mask other programming errors. Instead, catch the specific exception class you expect.

Mitmproxy load and unload scripts with python

I'm a running a proxy as suggested in Mitmproxy github examples:
from libmproxy import proxy, flow
class MitmProxy(flow.FlowMaster):
def run(self):
try:
flow.FlowMaster.run(self)
except KeyboardInterrupt:
self.shutdown()
def handle_request(self, r):
f = flow.FlowMaster.handle_request(self, r)
if f:
r.reply()
return f
def handle_response(self, r):
f = flow.FlowMaster.handle_response(self, r)
if f:
r.reply()
return f
config = proxy.ProxyConfig(
cacert = os.path.expanduser("~/.ssl/mitmproxy.pem")
)
state = flow.State()
server = proxy.ProxyServer(config, 8083)
m = MitmProxy(server, state)
try:
m.run()
except Exception, e:
print e.message
m.shutdown()
I want to handle each request/response without blocking the others,
for that i need to use the concurrent decorator and scripts
my question is: how do i load and unload scripts to the proxy running in this configuration?
You can use concurrent mode with script loading.
Here is an example for this kind of usage
I preferred to implement the mitmproxy logic in the flow level.
You can use this code
def handle_response(self, r):
reply = f.response.reply
f.response.reply = controller.DummyReply()
if hasattr(reply, "q"):
f.response.reply.q = reply.q
def run():
pass
threading.Thread(target=run)
You basically have to copy how handle_concurrent_reply works in libmproxy.script
f = flow.FlowMaster.handle_request(self,r)
if f:
def run():
request.reply() #if you forget this you'll end up in a loop and never reply
threading.Thread(target=run).start() #this will start run

Resolving and saving hostnames in parallel with Python

I'm trying to resolve a list of hostnames. The problem is when I hit a non existent domain, it slows down the whole process. The code is a trivial for loop:
for domain in domains:
try:
if socket.gethostbyname(domain.split('#')[1]):
file1.write(domain)
else:
file2.write(domain)
except socket.gaierror:
pass
I was wondering if there is a simple way to parallelize what is inside the for loop.
You could use one of example from Gevent - dns_mass_resolve.py. There's also usefull possibility of setting timeout for all queries.
from __future__ import with_statement
import sys
import gevent
from gevent import socket
from gevent.pool import Pool
N = 1000
# limit ourselves to max 10 simultaneous outstanding requests
pool = Pool(10)
finished = 0
def job(url):
global finished
try:
try:
ip = socket.gethostbyname(url)
print ('%s = %s' % (url, ip))
except socket.gaierror:
ex = sys.exc_info()[1]
print ('%s failed with %s' % (url, ex))
finally:
finished += 1
with gevent.Timeout(2, False):
for x in xrange(10, 10 + N):
pool.spawn(job, '%s.com' % x)
pool.join()
print ('finished within 2 seconds: %s/%s' % (finished, N))
I don't know a simple solution. Using multiple threads/process would be complicated and would probably don't help that much, because your execution speed is bound to IO. Therefore I would have a look at some async lib like Twisted. There is a method resolve in IReactorCore: http://twistedmatrix.com/documents/12.2.0/api/twisted.internet.interfaces.IReactorCore.html
import thread
def resolve_one_domain(domain):
...
for domain in domains:
thread.start_new_thread(resolve_one_domain, [domain])

Categories

Resources