Pythonic way of not printing a error message up - python

I am trying to suppress a error/warning in my log while calling a library. Assume i have this code
try:
kazoo_client.start()
except:
pass
This is calling a zookeeper client which throws some exception which bubble up, now i don't want the warn/error in my logs when i call kazoo_client.start() is there a way to get this suppressed when you call the client

Assuming python 2.7.17
Try this approach:
import sys, StringIO
def funky() :
"1" + 1 # This should raise an error
sys.stderr = StringIO.StringIO()
funky() # this should call the funky function
And your code should look something like this:
import sys, StringIO
# import kazoo somehere around here
sys.stderr = StringIO.StringIO()
kazoo_client.start()
And lastly the Python 3 example:
import sys
from io import StringIO
# import kazoo somehere around here
sys.stderr = StringIO()
kazoo_client.start()

If you know the exception, try contextlib.suppress:
>>> from contextlib import suppress
>>> x = (i for i in range(10))
>>> with suppress(StopIteration):
... for i in range(11):
... print(next(x))
0
1
2
3
4
5
6
7
8
9
Without suppress it throws StopIteration error at last iteration.
>>> x = (i for i in range(10))
>>> for i in range(11):
... print(next(x))
0
1
2
3
4
5
6
7
8
9
Traceback (most recent call last):
File "<ipython-input-10-562798e05ad5>", line 2, in <module>
print(next(x))
StopIteration
Suppress is Pythonic, safe and explicit.
So in your case:
with suppress(SomeError):
kazoo_client.start()
EDIT:
To suppress all exceptions:
with suppress(Exception):
kazoo_client.start()

I would like to suggest a more generic approach, which can be used in general.
I leave you an example of how to create an decorator who ignore errors.
import functools
# Use the Decorator Design Pattern
def ignore_error_decorator(function_reference):
#functools.wraps(function_reference) # the decorator inherits the original function signature
def wrapper(*args):
try:
result = function_reference(*args) # execute the function
return result # If the function executed correctly, return
except Exception as e:
pass # Default ignore; You can also log the error or do what ever you want
return wrapper # Return the wrapper reference
if __name__ == '__main__':
# First alternative to use. Compose the decorator with another function
def my_first_function(a, b):
return a + b
rez_valid = ignore_error_decorator(my_first_function)(1, 3)
rez_invalid = ignore_error_decorator(my_first_function)(1, 'a')
print("Alternative_1 valid: {result}".format(result=rez_valid))
print("Alternative_1 invalid: {result}".format(result=rez_invalid)) # None is return by the exception bloc
# Second alternative. Decorating a function
#ignore_error_decorator
def my_second_function(a, b):
return a + b
rez_valid = my_second_function(1, 5)
rez_invalid = my_second_function(1, 'a')
print("Alternative_2 valid: {result}".format(result=rez_valid))
print("Alternative_2 invalid: {result}".format(result=rez_invalid)) # None is return by the exception bloc
Getting back to your problem, using my alternative you have to run
ignore_error_decorator(kazoo_client.start)()

Related

Invalid handle when calling Windows API from Python 3

The following code works well in Python 2:
import ctypes
def test():
OpenSCManager = ctypes.windll.advapi32.OpenSCManagerA
CloseServiceHandle = ctypes.windll.advapi32.CloseServiceHandle
handle = OpenSCManager(None, None, 0)
print(hex(handle))
assert handle, ctypes.GetLastError()
assert CloseServiceHandle(handle), ctypes.GetLastError()
test()
It does not work in Python 3:
0x40d88f90
Traceback (most recent call last):
File ".\test1.py", line 12, in <module>
test()
File ".\test1.py", line 10, in test
assert CloseServiceHandle(handle), ctypes.GetLastError()
AssertionError: 6
6 means invalid handle.
It seems that in addition, the handles retrieved in Python 2 are smaller numbers, such as 0x100ffc0. It isn't something specific with CloseServiceHandle. This handle cannot be used with any service function.
Both Python versions are 64 bit native Windows Python.
You should use argtypes and restype otherwise all argument default to int and are truncated in 64-bit. Also you shouldn't call GetLastError directly but use ctypes.get_last_error()which cache the last error code (there might have been windows APIs called by the interpreter after you perform a call, you can't be sure).
Here's a working example:
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
import ctypes
def test():
advapi32 = ctypes.WinDLL("advapi32", use_last_error=True)
OpenSCManager = advapi32.OpenSCManagerA
OpenSCManager.argtypes = [ctypes.c_char_p, ctypes.c_char_p, ctypes.c_ulong]
OpenSCManager.restype = ctypes.c_void_p
CloseServiceHandle = advapi32.CloseServiceHandle
CloseServiceHandle.argtypes = [ctypes.c_void_p]
CloseServiceHandle.restype = ctypes.c_long
handle = OpenSCManager(None, None, 0)
if not handle:
raise ctypes.WinError(ctypes.get_last_error())
print(f"handle: {handle:#x}")
result = CloseServiceHandle(handle)
if result == 0:
raise ctypes.WinError(ctypes.get_last_error())
def main():
test()
if __name__ == "__main__":
sys.exit(main())
Try using ctypes.windll.Advapi32 instead of ctypes.windll.advapi32

How can I extract local variables from a stack trace?

Suppose I have a function that raises unexpected exceptions, so I wrap it in ipdb:
def boom(x, y):
try:
x / y
except Exception as e:
import ipdb; ipdb.set_trace()
def main():
x = 2
y = 0
boom(x, y)
if __name__ == '__main__':
main()
I can move up the stack to find out what values x and y have:
$ python crash.py
> /tmp/crash.py(6)boom()
5 except Exception as e:
----> 6 import ipdb; ipdb.set_trace()
7
ipdb> u
> /tmp/crash.py(11)main()
10 y = 0
---> 11 boom(x, y)
12
ipdb> p y
0
However, when debugging, I want to just put a debugger at the top level:
def boom(x, y):
x / y
def main():
x = 2
y = 0
boom(x, y)
if __name__ == '__main__':
try:
main()
except Exception as e:
import ipdb; ipdb.set_trace()
I can display the traceback, but I can't view the variables inside the function called:
$ python crash.py
> /tmp/crash.py(14)<module>()
12 main()
13 except Exception as e:
---> 14 import ipdb; ipdb.set_trace()
ipdb> !import traceback; traceback.print_exc(e)
Traceback (most recent call last):
File "crash.py", line 12, in <module>
main()
File "crash.py", line 8, in main
boom(x, y)
File "crash.py", line 3, in boom
x / y
ZeroDivisionError: integer division or modulo by zero
ipdb> d # I want to see what value x and y had!
*** Newest frame
The exception object clearly still has references to the stack when the exception occurred. Can I access x and y here, even though the stack has unwound?
Turns out that it is possible to extract variables from a traceback object.
To manually extract values:
ipdb> !import sys
ipdb> !tb = sys.exc_info()[2]
ipdb> p tb.tb_next.tb_frame.f_locals
{'y': 0, 'x': 2}
Even better, you can use an exception to explicitly do post-mortem debugging on that stack:
import sys
def boom(x, y):
x / y
def main():
x = 2
y = 0
boom(x, y)
if __name__ == '__main__':
try:
main()
except Exception as e:
# Most debuggers allow you to just do .post_mortem()
# but see https://github.com/gotcha/ipdb/pull/94
tb = sys.exc_info()[2]
import ipdb; ipdb.post_mortem(tb)
Which gets us straight to the offending code:
> /tmp/crash.py(4)boom()
3 def boom(x, y):
----> 4 x / y
5
ipdb> p x
2
You can also use the context manager
with ipdb.launch_ipdb_on_exception():
main()
It's an easy-to-use wrapper using ipdb.post_mortem.
Depending on what you need, there are 2 general best practices.
Just print the variables with minimal code edits
Have a look at some related packages. For simple usage you might pick traceback-with-variables (pip install traceback-with-variables), here is it's postcard
Or try tbvaccine, or better-exceptions, or any other package
Programmatically access variables to use them in your code
Use inspect module
except ... as ...:
x = inspect.trace()[-1][0].f_locals['x']
What about debugger?
Debugger is made for step-by-step execution and breakpoints. Using it to inspect exception reasons is really inconvenient and should be avoided. You can automate your debug session using two mentioned best practices.

locals() and globals() in stack trace on exception (Python)

While stack traces are useful in Python, most often the data at the root of the problem are missing - is there a way of making sure that at least locals() (and possibly globals()) are added to printed stacktrace?
You can install your own exception hook and output what you need from there:
import sys, traceback
def excepthook(type, value, tb):
traceback.print_exception(type, value, tb)
while tb.tb_next:
tb = tb.tb_next
print >>sys.stderr, 'Locals:', tb.tb_frame.f_locals
print >>sys.stderr, 'Globals:', tb.tb_frame.f_globals
sys.excepthook = excepthook
def x():
y()
def y():
foo = 1
bar = 0
foo/bar
x()
To print vars from each frame in a traceback, change the above loop to
while tb:
print >>sys.stderr, 'Locals:', tb.tb_frame.f_locals
print >>sys.stderr, 'Globals:', tb.tb_frame.f_globals
tb = tb.tb_next
This is a Box of Pandora. Values can be very large in printed form; printing all locals in a stack trace can easily lead to new problems just due to error output. That's why this is not implemented in general in Python.
In small examples, though, i. e. if you know that your values aren't too large to be printed properly, you can step along the traceback yourself:
import sys
import traceback
def c():
clocal = 1001
raise Exception("foo")
def b():
blocal = 23
c()
def a():
alocal = 42
b()
try:
a()
except Exception:
frame = sys.exc_info()[2]
formattedTb = traceback.format_tb(frame)
frame = frame.tb_next
while frame:
print formattedTb.pop(0), '\t', frame.tb_frame.f_locals
frame = frame.tb_next
The output will be sth like this:
File "/home/alfe/tmp/stacktracelocals.py", line 19, in <module>
a()
{'alocal': 42}
File "/home/alfe/tmp/stacktracelocals.py", line 16, in a
b()
{'blocal': 23}
File "/home/alfe/tmp/stacktracelocals.py", line 12, in b
c()
{'clocal': 1001}
And you can, of course, install your own except hook as thg435 suggested in his answer.
if you didn't know about this already, use the pdb post-mortem feature:
x = 3.0
y = 0.0
print x/y
def div(a, b):
return a / b
print div(x,y)
---------------------------------------------------------------------------
ZeroDivisionError Traceback (most recent call last)
<ipython-input-3-d03977de5fc3> in div(a, b)
1 def div(a, b):
----> 2 return a / b
ZeroDivisionError: float division
import pdb
pdb.pm()
> <ipython-input-3-148da0dcdc9e>(2)div()
0 return a/b
ipdb> l
1 def div(a,b):
----> 2 return a/b
ipdb> a
3.0
ipdb> b
0.0
etc.
there are cases where you really need the prints though, of course. you're better off instrumenting the code (via try/except) to print out extra information around a specific weird exception you are debugging than putting this for everything though, imho.
Try traceback-with-variables package.
Usage:
from traceback_with_variables import traceback_with_variables
def main():
...
with traceback_with_variables():
...your code...
Exceptions with it:
Traceback with variables (most recent call last):
File "./temp.py", line 7, in main
return get_avg_ratio([h1, w1], [h2, w2])
sizes_str = '300 200 300 0'
h1 = 300
w1 = 200
h2 = 300
w2 = 0
File "./temp.py", line 10, in get_avg_ratio
return mean([get_ratio(h, w) for h, w in [size1, size2]])
size1 = [300, 200]
size2 = [300, 0]
File "./temp.py", line 10, in <listcomp>
return mean([get_ratio(h, w) for h, w in [size1, size2]])
.0 = <tuple_iterator object at 0x7ff61e35b820>
h = 300
w = 0
File "./temp.py", line 13, in get_ratio
return height / width
height = 300
width = 0
builtins.ZeroDivisionError: division by zero
Installation:
pip install traceback-with-variables

lxml: Obtain file current line number when calling etree.iterparse(f)

Since no one answer or comment this post, I decide to have this post rewritten.
Consider the following Python code using lxml:
treeIter = etree.iterparse(fObj)
for event, ele in treeIter:
if ele.tag == 'logRoot':
try:
somefunction(ele)
except InternalException as e:
e.handle(*args)
ele.clear()
InternalException is user-defined and wraps all exceptions from somefunction() besides lxml.etree.XMLSyntaxError. InternalException has well-defined handler function .handle().
fObj has "trueRoot" as top-level tag, and many "logRoot" as 2nd-level leaves.
My question is: Is there a way to record current line number when handling the exception e? *args can be replaced by any arguments available.
Any suggestion is much appreciated.
import lxml.etree as ET
import io
def div(x):
return 1/x
content = '''\
<trueRoot>
<logRoot a1="x1"> 2 </logRoot>
<logRoot a1="x1"> 1 </logRoot>
<logRoot a1="x1"> 0 </logRoot>
</trueRoot>
'''
for event, elem in ET.iterparse(io.BytesIO(content), events=('end', ), tag='logRoot'):
num = int(elem.text)
print('Calling div({})'.format(num))
try:
div(num)
except ZeroDivisionError as e:
print('Ack! ZeroDivisionError on line {}'.format(elem.sourceline))
prints
Calling div(2)
Calling div(1)
Calling div(0)
Ack! ZeroDivisionError on line 4

Python unittesting initiate values

Sorry if this question is stupid. I created an unittest class which needs to take given inputs and outputs from outside. Thus, I guess these values should be initiated. However, I met some errors in the following code:
CODE:
import unittest
from StringIO import StringIO
##########Inputs and outputs from outside#######
a=[1,2]
b=[2,3]
out=[3,4]
####################################
def func1(a,b):
return a+b
class MyTestCase(unittest.TestCase):
def __init__(self,a,b,out):
self.a=a
self.b=b
self.out=out
def testMsed(self):
for i in range(self.tot_iter):
print i
fun = func1(self.a[i],self.b[i])
value = self.out[i]
testFailureMessage = "Test of function name: %s iteration: %i expected: %i != calculated: %i" % ("func1",i,value,fun)
self.assertEqual(round(fun,3),round(value,3),testFailureMessage)
if __name__ == '__main__':
f = MyTestCase(a,b,out)
from pprint import pprint
stream = StringIO()
runner = unittest.TextTestRunner(stream=stream, verbosity=2)
result = runner.run(unittest.makeSuite(MyTestCase(a,b,out)))
print 'Tests run', result.testsRun
However, I got the following error
Traceback (most recent call last):
File "C:testing.py", line 33, in <module>
result = runner.run(unittest.makeSuite(MyTestCase(a,b,out)))
File "C:\Python27\lib\unittest\loader.py", line 310, in makeSuite
return _makeLoader(prefix, sortUsing, suiteClass).loadTestsFromTestCase(testCaseClass)
File "C:\Python27\lib\unittest\loader.py", line 50, in loadTestsFromTestCase
if issubclass(testCaseClass, suite.TestSuite):
TypeError: issubclass() arg 1 must be a class
Can anyone give me some suggestions? Thanks!
The root of the problem is this line,
result = runner.run(unittest.makeSuite(MyTestCase(a,b,out)))
unittest.makeSuite expects a class, not an instance of a class. So just MyTestCase, not MyTestCase(a, b, out). This means that you can't pass parameters to your test case in the manner you are attempting to. You should probably move the code from init to a setUp function. Either access a, b, and out as globals inside setUp or take a look at this link for information regarding passing parameters to a unit test.
By the way, here is the source file within python where the problem originated. Might be informative to read.

Categories

Resources