The problem of using the try exception dynamically - python

I have a function called transform_exceptions() that takes a list of functions, then calls each of the functions (functions are without arguments) and the exceptions that occur with the above convention to an object of ExceptionProxy and finally the list of transformed errors. It returns functions in the same order
Note: If a function is executed without error, an ExceptionProxy object should be created and its msg value should be "ok!" Slow quantification
smple:
class ExceptionProxy(Exception):
# define your class here
def transform_exceptions(func_ls):
# implement your function here
def f():
1/0
def g():
pass
tr_ls = transform_exceptions([f, g])
for tr in tr_ls:
print("msg: " + tr.msg + "\nfunction name: " + tr.function.__name__)
Output:
msg: division by zero
function name: f
msg: ok!
function name: g
my code :
from mimetypes import init
class ExceptionProxy(Exception):
def __init__(self, msg, function):
self.msg = msg
self.function = function
def transform_exceptions(func_ls):
exception_list = []
for func in func_ls:
try:
func
except Exception as e:
r = ExceptionProxy(str(e), func)
exception_list.append(r)
else:
r = ExceptionProxy("ok!", func)
exception_list.append(r)
return exception_list

You should do this when calling the function name in the list
func()
Also modified code:
class ExceptionProxy(Exception):
def __init__(self,msg,function):
self.msg = msg
self.function = function
def transform_exceptions(func_ls):
out = []
for x in func_ls:
try:
x()
a = ExceptionProxy("ok!", x)
except Exception as e:
a = ExceptionProxy(str(e), x)
out.append(a)
return out

Related

python decorator takes 1 positional argument but 5 were given after modifying for pytest

I'd appreciate some help with the following code, as I'm still relatively new to Python, and despite countless days trying to figure out where i'm going wrong, i cant seem to spot the error i'm making.
I've adapted the following code from an article on medium to create a logging decorator and then enhanced it to try and "redact pandas df and dictionary" from the logs. Using functools caused me a problem with pytest and pytest fixtures. A post on stack overflow suggested dropping functools in favour of decorators.
def log_decorator(_func=None):
def log_decorator_info(func):
def log_decorator_wrapper(*args, **kwargs):
_logger = Logger()
logger_obj = _logger.get_logger()
args_passed_in_function = args_excl_df_dict(*args)
kwargs_passed_in_function = kwargs_excl_df_dict(**kwargs)
formatted_arguments = join_args_kwargs(args_passed_in_function,kwargs_passed_in_function)
py_file_caller = getframeinfo(stack()[1][0])
extra_args = { 'func_name_override': func.__name__,'file_name_override': os.path.basename(py_file_caller.filename) }
""" Before to the function execution, log function details."""
logger_obj.info(f"Begin function - Arguments: {formatted_arguments}", extra=extra_args)
try:
""" log return value from the function """
args_returned_from_function = args_excl_df_dict(func(*args))
kwargs_returned_from_function = []
formatted_arguments = join_args_kwargs(args_returned_from_function,kwargs_returned_from_function)
logger_obj.info(f"End function - Returned: {formatted_arguments}", extra=extra_args)
except:
"""log exception if occurs in function"""
error_raised = str(sys.exc_info()[1])
logger_obj.error(f"Exception: {str(sys.exc_info()[1])}",extra=extra_args)
msg_to_send = f"{func.__name__} {error_raised}"
send_alert(APP_NAME,msg_to_send,'error')
raise
return func(*args, **kwargs)
return decorator.decorator(log_decorator_wrapper, func)
if _func is None:
return log_decorator_info
else:
return log_decorator_info(_func)
Having adapted the above code i cant figure out what is causing the following error
args_returned_from_function = args_excl_df_dict(func(*args))
TypeError: test_me() takes 4 positional arguments but 5 were given
Other functions which the log decorator relies on
def args_excl_df_dict(*args):
args_list = []
for a in args:
if isinstance(a,(pd.DataFrame,dict)):
a = 'redacted from log'
args_list.append(repr(a))
else:
args_list.append(repr(a))
return args_list
def kwargs_excl_df_dict(**kwargs):
kwargs_list = []
for k, v in kwargs.items():
if isinstance(v,(dict,pd.DataFrame)):
v = 'redacted from log'
kwargs_list.append(f"{k}={v!r}")
else:
kwargs_list.append(f"{k}={v!r}")
return kwargs_list
def join_args_kwargs(args,kwargs):
formatted_arguments = ", ".join(args + kwargs)
return str(formatted_arguments)
This is the code calling the decorator
#log_decorator.log_decorator()
def test_me(a, b, c, d):
return a, b
test_me(string, number, dictionary, pandas_df)
I think the problem is that the wrapper is including the function as an argument to the function.
Try adding this line and see if it helps
args = args[1:]
intor your log_decorator_wrapper function towards the top. Like this.
def log_decorator(_func=None):
def log_decorator_info(func):
def log_decorator_wrapper(*args, **kwargs):
args = args[1:] # < -------------------here
_logger = Logger()
logger_obj = _logger.get_logger()
args_passed_in_function = args_excl_df_dict(*args)
kwargs_passed_in_function = kwargs_excl_df_dict(**kwargs)
formatted_arguments = join_args_kwargs(args_passed_in_function,kwargs_passed_in_function)
py_file_caller = getframeinfo(stack()[1][0])
extra_args = { 'func_name_override': func.__name__,'file_name_override': os.path.basename(py_file_caller.filename) }
""" Before to the function execution, log function details."""
logger_obj.info(f"Begin function - Arguments: {formatted_arguments}", extra=extra_args)
try:
""" log return value from the function """
args_returned_from_function = args_excl_df_dict(func(*args))
kwargs_returned_from_function = []
formatted_arguments = join_args_kwargs(args_returned_from_function,kwargs_returned_from_function)
logger_obj.info(f"End function - Returned: {formatted_arguments}", extra=extra_args)
except:
"""log exception if occurs in function"""
error_raised = str(sys.exc_info()[1])
logger_obj.error(f"Exception: {str(sys.exc_info()[1])}",extra=extra_args)
msg_to_send = f"{func.__name__} {error_raised}"
send_alert(APP_NAME,msg_to_send,'error')
raise
return func(*args, **kwargs)
return decorator.decorator(log_decorator_wrapper, func)
if _func is None:
return log_decorator_info
else:
return log_decorator_info(_func)
If your code is as is in your editor, maybe look at the indentation on the first three functions. Then start from there to move down

Python : Parallel execution of function

I would like to execute set of tasks in parallel. I have defined a function in a class which takes the parameter and executes the operation based on parameter. Class structure is like below.
from threading import Thread
from concurrent.futures import *
class Test(object):
def process_dataframe(self,id:int):
print(id*id)
def run_task(self):
thd = []
for i in range(1,10):
thd.append( "self.process_dataframe({0})".format(i))
self.run_functions_in_parallel(thd)
def run_functions_in_parallel(self,fns)->bool:
def wrap_function(self,fnToCall):
try:
eval(fnToCall)
return ("0")
except Exception as e:
return "{0}".format(e)
thd = []
isError = False
executor = ThreadPoolExecutor(max_workers=len(fns))
errorMessage = ""
for fn in fns:
t = executor.submit(wrap_function,self,fn)
thd.append(t)
for td in thd:
ret = td.result()
if ret != "0":
isError = True
errorMessage = errorMessage + """
""" + ret
if isError == True:
print (errorMessage)
raise Exception (errorMessage)
else:
return True
d=Test()
d.run_task()
I have managed to make it work and tasks are executing properly. I am wondering whether there is better/simpler way to accomplish the same. I would like to keep run_functions_in_parallel method generic so that it can be used as common method in a module.
You don't need to use a wrapper, since ThreadPoolExecutor catches errors in a better way. A function, that always returns True or raises an error, don't need a return value, but if you have functions with return values, you want to call in parallel, you should return their results.
It is a bad idea to use a magic string as indicator for errors. format(e) of a KeyError: 0 also leads to "0". Better use a unique value, like None in our case.
Don't use eval if you don't have to. In your case, you can use partial.
Don't use a to large value for max_workers.
from functools import partial
from concurrent.futures import ThreadPoolExecutor
class Test(object):
def process_dataframe(self, id):
print(id*id)
def run_task(self):
functions = []
for i in range(1,10):
functions.append(partial(self.process_dataframe, i))
self.run_functions_in_parallel(functions)
def run_functions_in_parallel(self, functions, max_workers=8):
executor = ThreadPoolExecutor(max_workers=max_workers)
futures = [
executor.submit(function)
for function in functions
]
errors = []
results = []
for future in futures:
try:
result = future.result()
except Exception as e:
errors.append(e)
else:
results.append(result)
if errors:
raise Exception(errors)
return results
d = Test()
d.run_task()

Wrapping a Python function that uses with

Say have a python function foo() that uses some resource and is meant to be called as follows:
with foo(x,y,z) as f:
doSomething(f)
So far so good. Now lets say foo takes in a complex set of arguments based on a variety of factors, and I'd like to define a wrapper function to make things simpler. Something like:
def simple_foo():
if x:
return foo(a,b,c)
else:
return foo(d,e,f)
Now, I'd like to use simple_foo in place of foo, like:
with simple_foo() as f:
doSomething(f)
However, unsurprisingly, this does not work. How can I write simple_foo() to get this behavior?
Decorate function foo() with contextmanager (doc):
from contextlib import contextmanager
#contextmanager
def foo(a, b, c):
try:
yield a + b + c
finally:
pass
def simple_foo(x):
if x:
return foo(1, 2, 3)
return foo(4, 5, 6)
with simple_foo(True) as v:
print(v)
with simple_foo(False) as v:
print(v)
Prints:
6
15
You can do by writing a custom context manager that internally calls that function, try code given below:
class SimpleFoo:
def __init__(self,x,y,z, option):
self.x = x
self.y = y
self.z = z
self.option = option
def __enter__(self):
if self.option:
return foo(self.x,self.y,self.z)
else:
return foo(self.y,self.z,self.x)
def __exit__(self, type, value, traceback):
if type != None:
print("Error in SimpleFoo")
print("Error Type :", type)
print("Error Value :", value)
print("Error Traceback :", traceback)
self.status = value
Now if you want to use this, use it as below:
with SimpleFoo(1,2,3,True) as foo:
doSomething(foo)
I hope this helps.

python decorator TypeError missing 1 required positional argument

I'm trying to write a decorator to repeat an erroring function N times with increasingly sleeping times in between. This is my attempt so far:
def exponential_backoff(seconds=10, attempts=10):
def our_decorator(func):
def function_wrapper(*args, **kwargs):
for s in range(0, seconds*attempts, attempts):
sleep(s)
try:
return func(*args, **kwargs)
except Exception as e:
print(e)
return function_wrapper
return our_decorator
#exponential_backoff
def test():
for a in range(100):
if a - random.randint(0,1) == 0:
print('success count: {}'.format(a))
pass
else:
print('error count {}'.format(a))
'a' + 1
test()
I keep getting the error:
TypeError: our_decorator() missing 1 required positional argument: 'func'
Understand what decorator is:
#exponential_backoff
def test():
pass
equals to:
def test():
pass
test = exponential_backoff(test)
In this case, test is def our_decorator(func):. That's why you get TypeError when calling test().
So further:
#exponential_backoff()
def test():
pass
equals to:
def test():
pass
test = exponential_backoff()(test)
In this case, now test is what you need.
Further, functools.wraps helps you to copy all properties of original function to decorated function. Such as function's name or docstring:
from functools import wraps
def exponential_backoff(func):
# #wraps(func)
def function_wrapper(*args, **kwargs):
pass
return function_wrapper
#exponential_backoff
def test():
pass
print(test) # <function exponential_backoff.<locals>.function_wrapper at 0x7fcc343a4268>
# uncomment `#wraps(func)` line:
print(test) # <function test at 0x7fcc343a4400>
You should be using:
#exponential_backoff()
def test():
...
The overall decorator is not designed to have arguments be optional, so you must provide () when using it.
If want an example of how to make decorator allow argument list be optional, see:
https://wrapt.readthedocs.io/en/latest/decorators.html#decorators-with-optional-arguments
You might also consider using the wrapt package to make your decorators easier and more robust.
Either you go for the solution provided by #Graham Dumpleton or you can just modify your decorator like so:
from functools import wraps, partial
def exponential_backoff(func=None, seconds=10, attempts=10):
if func is None:
return partial(exponential_backoff, seconds=seconds, attempts=attempts)
#wraps(func)
def function_wrapper(*args, **kwargs):
for s in range(0, seconds*attempts, attempts):
sleep(s)
try:
return func(*args, **kwargs)
except Exception as e:
print(e)
return function_wrapper
#exponential_backoff
def test():
for a in range(100):
if a - random.randint(0,1) == 0:
print('success count: {}'.format(a))
pass
else:
print('error count {}'.format(a))
'a' + 1
test()
EDIT
My answer was not entirely correct, please see #GrahamDumpleton's answer which shows how to make my attempt of a solution viable (i.e. this link). Fixed it now, thank you #GrahamDumpleton !

better python pattern for exception handler in a loop?

I found myself using the following pattern in my tests quite often:
def test(params):
e_list = []
for p in params:
try:
run_test(p) # Or a block of codes that can continue or break
except Exception as e:
e_list.append(e)
assert isEmpty(e_list), 'error encountered: {}'.format(e_list)
I find myself rewriting this pattern quite often, especially with long code block for the loop that has some flow controls with continue and break. I am wondering if there is a python-ic wrapping to this pattern.
I have thought about a wrapper function like this:
def assert_all_tests(test_list):
e_list = []
for t in test_list:
try:
t()
except Exception as e:
e_list.append(e)
assert isEmpty(e_list), 'error encountered: {}'.format(e_list)
def test(params):
assert_all_tests([functools.partial(run_test, p) for p in params])
But I dislike this approach because it wrapped away the loop. There is no way for callable t to do flow control of the loop with continue or break (there is no loop any more, only a list comprehension).
Another approach is to use a context class like this:
def test(params):
ErrorHandler.clearErrorList()
for p in params:
with ErrorHandler():
run_test(p) # or code block that can continue or break
ErrorHandler.assertEmptyErrorList()
where ErrorHandler would be a class with appropriate __enter__ and __exit__ and keep an error list in a class variable. But I feel that at the test function level, this is not any simpler than the original pattern: since there is no way for a ErrorHandler instance to know when a loop has began and ended, I still have to write the pre- and post- loop fixtures.
I'd like to hear idea of approaches to wrap around this pattern. Thanks.
EDIT
Thank you all for your comments.
New approach inspired by #paul-cornelius's answer
class ResultCollector(object):
def __init__(self, raise_on_error=True):
self.result_list = []
self.raise_on_error = raise_on_error
def do(self, func, *args, **kwds):
'''do can only deal with code block that can be wrapped into a function'''
try:
return func(*args, **kwds)
except Exception as e:
if not isinstance(e, AssertionError) and self.raise_on_error:
raise
self.result_list.append(e.message or e)
else:
self.result_list.append(None)
def assertClean(self):
assert not [x for x in self.result_list if x is not None], 'test results: {}'.format(self.result_list)
def __enter__(self):
self.result_list = []
return self
def __exit__(self, exc_t, exc_i, exc_tb):
if exc_t:
return None
self.assertClean()
return True
def test():
def can_be_refactored_into_func(p):
assert p%3, 'failed {}'.format(p)
def condition_for_skip(p):
return p%2
def condition_for_break(p):
return p>5
with ResultCollector() as rc:
for p in range(10):
if condition_for_skip(p):
rc.result_list.append('skipped {}'.format(p))
continue
if condition_for_break(p):
rc.result_list.append('ended {}'.format(p))
break
rc.do(can_be_refactored_into_func, p)
It works pretty well when the code into loop block can be divided up into functions like above.
How about a little class that only does the one thing you find yourself doing over and over:
class TestTracker:
def __init__(self):
self.error_list = []
def do_test(self, f, p):
try:
f(p)
except Exception as e:
self.error_list.append(e)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
if exc_value is not None:
self.error_list.append(exc_value)
return True
def test(params):
tt = TestTracker()
for p in params:
tt.do_test(run_test, p)
assert isEmpty(tt.error_list), 'error encountered: {}'.format(tt.error_list)
def test2(params):
tt = TestTracker()
for p in params:
with tt:
# a block of code with loop control statements
pass
assert isEmpty(tt.error_list), 'error encountered: {}'.format(tt.error_list)
I modified this answer to make the class a context manager. The test2 shows how that can be used with loop control statements. If no exception is raised within the context, the arguments to __exit__ will be None.
You could even mix with statements and calls to do_test.
Python can do anything!
Edits :
Add some convenience to TestTracker
class TestTracker:
def __init__(self):
self.error_list = []
def do_test(self, f, p):
try:
f(p)
except Exception as e:
self.error_list.append(e)
def __bool__(self):
return len(self.error_list) == 0
def __str__(self):
return 'error encountered: {}'.format(self.error_list)
def test(params):
tt = TestTracker()
for p in params:
tt.do_test(run_test, p)
assert tt, str(tt)

Categories

Resources