Trying to do test files in PyCharm with pytest and I repeatedly get the "fixture [variable name] not found. All that I could find regarding this issue are cases of misspelling parametrize.
liste_paie = []
def calculer_paie_employe(tauxh,heures):
total = tauxh * heures
impot = total * 0.20
net = total - impot
liste_paie = [heures, tauxh, total, impot, net]
return liste_paie
pytest.mark.parametrize("var1,var2,expected_1,expected_2,expected_3", [(14.7 , 25,367.5,73.5,294), (20 , 15, 300, 60, 240),
(15.6 , 23.9, 372.84, 75.568, 300)])
def test_calculer_paie_employe(var1,var2, expected_1, expected_2, expected_3):
calculer_paie_employe(var1,var2)
assert liste_paie[2] == expected_1 and liste_paie[3] == expected_2 and liste_paie[4] == expected_3
When I run it I get:
test setup failed
E fixture 'var1' not found
available fixtures: cache, capfd, capfdbinary, caplog, capsys, capsysbinary, doctest_namespace, monkeypatch, pytestconfig, record_property, record_testsuite_property, record_xml_attribute, recwarn, tmp_path, tmp_path_factory, tmpdir, tmpdir_factory
use 'pytest --fixtures [testpath]' for help on them.
Final set of data should fail to pass. (this is intentional)
You must use it as a decorator, i.e. use the # syntax:
liste_paie = []
def calculer_paie_employe(tauxh,heures):
total = tauxh * heures
impot = total * 0.20
net = total - impot
liste_paie = [heures, tauxh, total, impot, net]
return liste_paie
import pytest
#pytest.mark.parametrize(
"var1,var2,expected_1,expected_2,expected_3", [
(14.7, 25, 367.5, 73.5, 294),
(20, 15, 300, 60, 240),
(15.6, 23.9, 372.84, 75.568, 300)
])
def test_calculer_paie_employe(var1,var2, expected_1, expected_2, expected_3):
liste_paie = calculer_paie_employe(var1,var2)
assert liste_paie[2] == expected_1 and liste_paie[3] == expected_2 and liste_paie[4] == expected_3
a pytest run will then produce:
================================================= test session starts =================================================
platform win32 -- Python 3.5.4, pytest-3.10.1, py-1.8.0, pluggy-0.9.0
rootdir: c:\srv\tmp, inifile:
plugins: django-3.10.0, cov-2.6.1
collected 3 items
pytestparm.py ..F [100%]
====================================================== FAILURES =======================================================
_______________________________ test_calculer_paie_employe[15.6-23.9-372.84-75.568-300] _______________________________
var1 = 15.6, var2 = 23.9, expected_1 = 372.84, expected_2 = 75.568, expected_3 = 300
#pytest.mark.parametrize(
"var1,var2,expected_1,expected_2,expected_3", [
(14.7, 25, 367.5, 73.5, 294),
(20, 15, 300, 60, 240),
(15.6, 23.9, 372.84, 75.568, 300)
])
def test_calculer_paie_employe(var1,var2, expected_1, expected_2, expected_3):
liste_paie = calculer_paie_employe(var1,var2)
> assert liste_paie[2] == expected_1 and liste_paie[3] == expected_2 and liste_paie[4] == expected_3
E assert (372.84 == 372.84 and 74.568 == 75.568)
pytestparm.py:19: AssertionError
========================================= 1 failed, 2 passed in 0.04 seconds ==========================================
Note that I've changed the code to use the return value, since the assignment to liste_paie in calculer_paie_employe doesn't change the global variable (because you're missing the global keyword - but using the return value is better practice anyways...)
Related
I was following a tutorial for creating a new Gstreamer plugin in Python. The following example, took from here https://mathieuduponchelle.github.io/2018-02-01-Python-Elements.html , raises a Segmentation error (core dumped) at the end of its (correct) execution when running the gst-inspect-1.0 audiotestsrc_py.
If you remove the code __gproperties__ it seems to be fine. I am using Python 3.6 and Gstreamer 1.14.5.
Code:
import gi
gi.require_version('Gst', '1.0')
gi.require_version('GstBase', '1.0')
gi.require_version('GstAudio', '1.0')
from gi.repository import Gst, GLib, GObject, GstBase, GstAudio
import numpy as np
OCAPS = Gst.Caps.from_string (
'audio/x-raw, format=F32LE, layout=interleaved, rate=44100, channels=2')
SAMPLESPERBUFFER = 1024
DEFAULT_FREQ = 440
DEFAULT_VOLUME = 0.8
DEFAULT_MUTE = False
DEFAULT_IS_LIVE = False
class AudioTestSrc(GstBase.BaseSrc):
__gstmetadata__ = ('CustomSrc','Src', \
'Custom test src element', 'Mathieu Duponchelle')
__gproperties__ = {
"freq": (int,
"Frequency",
"Frequency of test signal",
1,
GLib.MAXINT,
DEFAULT_FREQ,
GObject.ParamFlags.READWRITE
),
"volume": (float,
"Volume",
"Volume of test signal",
0.0,
1.0,
DEFAULT_VOLUME,
GObject.ParamFlags.READWRITE
),
"mute": (bool,
"Mute",
"Mute the test signal",
DEFAULT_MUTE,
GObject.ParamFlags.READWRITE
),
"is-live": (bool,
"Is live",
"Whether to act as a live source",
DEFAULT_IS_LIVE,
GObject.ParamFlags.READWRITE
),
}
__gsttemplates__ = Gst.PadTemplate.new("src",
Gst.PadDirection.SRC,
Gst.PadPresence.ALWAYS,
OCAPS)
def __init__(self):
GstBase.BaseSrc.__init__(self)
self.info = GstAudio.AudioInfo()
self.freq = DEFAULT_FREQ
self.volume = DEFAULT_VOLUME
self.mute = DEFAULT_MUTE
self.set_live(DEFAULT_IS_LIVE)
self.set_format(Gst.Format.TIME)
def do_set_caps(self, caps):
self.info.from_caps(caps)
self.set_blocksize(self.info.bpf * SAMPLESPERBUFFER)
return True
def do_get_property(self, prop):
if prop.name == 'freq':
return self.freq
elif prop.name == 'volume':
return self.volume
elif prop.name == 'mute':
return self.mute
elif prop.name == 'is-live':
return self.is_live
else:
raise AttributeError('unknown property %s' % prop.name)
def do_set_property(self, prop, value):
if prop.name == 'freq':
self.freq = value
elif prop.name == 'volume':
self.volume = value
elif prop.name == 'mute':
self.mute = value
elif prop.name == 'is-live':
self.set_live(value)
else:
raise AttributeError('unknown property %s' % prop.name)
def do_start (self):
self.next_sample = 0
self.next_byte = 0
self.next_time = 0
self.accumulator = 0
self.generate_samples_per_buffer = SAMPLESPERBUFFER
return True
def do_gst_base_src_query(self, query):
if query.type == Gst.QueryType.LATENCY:
latency = Gst.util_uint64_scale_int(self.generate_samples_per_buffer,
Gst.SECOND, self.info.rate)
is_live = self.is_live
query.set_latency(is_live, latency, Gst.CLOCK_TIME_NONE)
res = True
else:
res = GstBase.BaseSrc.do_query(self, query)
return res
def do_get_times(self, buf):
end = 0
start = 0
if self.is_live:
ts = buf.pts
if ts != Gst.CLOCK_TIME_NONE:
duration = buf.duration
if duration != Gst.CLOCK_TIME_NONE:
end = ts + duration
start = ts
else:
start = Gst.CLOCK_TIME_NONE
end = Gst.CLOCK_TIME_NONE
return start, end
def do_create(self, offset, length):
if length == -1:
samples = SAMPLESPERBUFFER
else:
samples = int(length / self.info.bpf)
self.generate_samples_per_buffer = samples
bytes_ = samples * self.info.bpf
next_sample = self.next_sample + samples
next_byte = self.next_byte + bytes_
next_time = Gst.util_uint64_scale_int(next_sample, Gst.SECOND, self.info.rate)
if not self.mute:
r = np.repeat(
np.arange(self.accumulator, self.accumulator + samples),
self.info.channels)
data = ((np.sin(2 * np.pi * r * self.freq / self.info.rate) * self.volume)
.astype(np.float32))
else:
data = [0] * bytes_
buf = Gst.Buffer.new_wrapped(bytes(data))
buf.offset = self.next_sample
buf.offset_end = next_sample
buf.pts = self.next_time
buf.duration = next_time - self.next_time
self.next_time = next_time
self.next_sample = next_sample
self.next_byte = next_byte
self.accumulator += samples
self.accumulator %= self.info.rate / self.freq
return (Gst.FlowReturn.OK, buf)
__gstelementfactory__ = ("audiotestsrc_py", Gst.Rank.NONE, AudioTestSrc)
Output:
Factory Details:
Rank none (0)
Long-name CustomSrc
Klass Src
Description Custom test src element
Author Mathieu Duponchelle
Plugin Details:
Name python
Description loader for plugins written in python
Filename /usr/lib/x86_64-linux-gnu/gstreamer-1.0/libgstpython.cpython-36m-x86_64-linux-gnu.so
Version 1.14.5
License LGPL
Source module gst-python
Binary package GStreamer GObject Introspection overrides for Python
Origin URL http://gstreamer.freedesktop.org
GObject
+----GInitiallyUnowned
+----GstObject
+----GstElement
+----GstBaseSrc
+----audiotestsrc_py+AudioTestSrc
Pad Templates:
SRC template: 'src'
Availability: Always
Capabilities:
audio/x-raw
format: F32LE
layout: interleaved
rate: 44100
channels: 2
Element has no clocking capabilities.
Element has no URI handling capabilities.
Pads:
SRC: 'src'
Pad Template: 'src'
Element Properties:
name : The name of the object
flags: readable, writable
String. Default: "audiotestsrc_py+audiotestsrc0"
parent : The parent of the object
flags: readable, writable
Object of type "GstObject"
blocksize : Size in bytes to read per buffer (-1 = default)
flags: readable, writable
Unsigned Integer. Range: 0 - 4294967295 Default: 4096
num-buffers : Number of buffers to output before sending EOS (-1 = unlimited)
flags: readable, writable
Integer. Range: -1 - 2147483647 Default: -1
typefind : Run typefind before negotiating (deprecated, non-functional)
flags: readable, writable, deprecated
Boolean. Default: false
do-timestamp : Apply current stream time to buffers
flags: readable, writable
Boolean. Default: false
freq : Frequency of test signal
flags: readable, writable
Integer. Range: 1 - 2147483647 Default: 440
is-live : Whether to act as a live source
sys:1: Warning: g_object_get_property: assertion 'G_IS_OBJECT (object)' failed
flags: readable, writable
Boolean. Default: false
mute : Mute the test signal
flags: readable, writable
Boolean. Default: false
volume : Volume of test signal
flags: readable, writable
Double. Range: 0 - 1 Default: 0
Segmentation fault (core dumped)
I am trying to learn pytest and testing my knowledge on the below code.
src.py
def a(s):
if s == 1:
return True
if s == 2:
return False
return 3
def abc(u):
if u ==1:
if a(u):
return 1
else:
if a(u):
return 2
else:
return 3
and this is my test file:
import pytest
import src
#pytest.mark.parametrize("input_data, expected", [(1,1), (2,2), (3,2)])
def test_abc(input_data, expected, mocker):
s = mocker.patch('src.a', side_effect=[True, False])
assert src.abc(input_data) == expected
s.assert_called_once_with(input_data)
#pytest.mark.parametrize("input_data, expected", [(1,True), (2,False), (3,3)])
def test_a(input_data, expected,):
assert src.a(input_data) == expected
Testing the code returns all passed, but the coverage reports that the line 16 is not being tested:
% pytest -v --cov=. . --cov-report term-missing
==================================================================== test session starts ====================================================================
platform darwin -- Python 3.10.4, pytest-7.1.2, pluggy-1.0.0 -- /Users/tomhanks/.pyenv/versions/3.10.4/bin/python3.10
cachedir: .pytest_cache
rootdir: /Users/tomhanks/projects/pytest
plugins: mock-3.7.0, cov-3.0.0
collected 6 items
test_me.py::test_abc[1-1] PASSED [ 16%]
test_me.py::test_abc[2-2] PASSED [ 33%]
test_me.py::test_abc[3-2] PASSED [ 50%]
test_me.py::test_a[1-True] PASSED [ 66%]
test_me.py::test_a[2-False] PASSED [ 83%]
test_me.py::test_a[3-3] PASSED [100%]
---------- coverage: platform darwin, python 3.10.4-final-0 ----------
Name Stmts Miss Cover Missing
------------------------------------------
src.py 13 1 92% 16
test_me.py 10 0 100%
------------------------------------------
TOTAL 23 1 96%
===================================================================== 6 passed in 0.07s =====================================================================
Can somebody please help me understand why the line 16 is not being tested?
Thanks in advance!
I'm trying this code on Ropsten, it but keeps failing:
contract_instance = w3.eth.contract(address="0xC36442b4a4522E871399CD717aBDD847Ab11FE88", abi=liq_ABI)
tx_hash = contract_instance.functions.mint(
(
'0x31F42841c2db5173425b5223809CF3A38FEde360',
'0xc778417E063141139Fce010982780140Aa0cD5Ab',
3000,
49548,
50549,
w3.toWei(0.001,'ether'),
w3.toWei(0.17,'ether'),
w3.toWei(0,'ether'),
w3.toWei(0,'ether'),
wallet_address,
round(time.time()) + 60*20,
)
).buildTransaction({
'from': wallet_address,
'chainId': 3,
'gas': 300000,
'gasPrice': w3.toWei(500, 'gwei'),
'nonce': w3.eth.getTransactionCount(wallet_address),
'value': Web3.toWei(0, 'ether')
})
print(tx_hash)
signed_tx = w3.eth.account.signTransaction(tx_hash, private_key=wallet_key)
tx = w3.eth.sendRawTransaction(signed_tx.rawTransaction)
Failed transaction: https://ropsten.etherscan.io/tx/0xc2f3d6ffff164df331dd4b46fc65dadc5dba8f135f6e13ef1cd383a73a2d0c4b
Web3-Ethereum-Defi Python library has a function called add_liquidity for Uniswap v3 that is probably what you are looking for.
Here is some example code:
fee = 3000
pool1 = deploy_pool(
web3,
deployer,
deployment=uniswap_v3,
token0=weth,
token1=usdc,
fee=fee,
)
pool2 = deploy_pool(
web3,
deployer,
deployment=uniswap_v3,
token0=usdc,
token1=dai,
fee=fee,
)
# add same liquidity amount to both pools as in SDK tests
min_tick, max_tick = get_default_tick_range(fee)
add_liquidity(
web3,
deployer,
deployment=uniswap_v3,
pool=pool1,
amount0=100_000,
amount1=100_000,
lower_tick=min_tick,
upper_tick=max_tick,
)
add_liquidity(
web3,
deployer,
deployment=uniswap_v3,
pool=pool2,
amount0=120_000,
amount1=100_000,
lower_tick=min_tick,
upper_tick=max_tick,
)
price_helper = UniswapV3PriceHelper(uniswap_v3)
# test get_amount_out, based on: https://github.com/Uniswap/v3-sdk/blob/1a74d5f0a31040fec4aeb1f83bba01d7c03f4870/src/entities/trade.test.ts#L394
for slippage, expected_amount_out in [
(0, 7004),
(5 * 100, 6670),
(200 * 100, 2334),
]:
amount_out = price_helper.get_amount_out(
10_000,
[
weth.address,
usdc.address,
dai.address,
],
[fee, fee],
slippage=slippage,
)
assert amount_out == expected_amount_out
# test get_amount_in, based on: https://github.com/Uniswap/v3-sdk/blob/1a74d5f0a31040fec4aeb1f83bba01d7c03f4870/src/entities/trade.test.ts#L361
for slippage, expected_amount_in in [
(0, 15488),
(5 * 100, 16262),
(200 * 100, 46464),
]:
amount_in = price_helper.get_amount_in(
10_000,
[
weth.address,
usdc.address,
dai.address,
],
[fee, fee],
slippage=slippage,
)
assert amount_in == expected_amount_in
See the fulle example code.
More information about Uniswap v3 and Python.
Here is my test sample (test_time.py):
#!/usr/bin/python
# -*- coding: utf-8 -*-
import pytest
from datetime import datetime, timedelta
testdata = [
(datetime(2001, 12, 12), datetime(2001, 12, 11), timedelta(1)),
(datetime(2001, 12, 11), datetime(2001, 12, 12), timedelta(-1)),
]
#pytest.mark.parametrize("a,b,expected", testdata, ids=[u"中文", u"English"])
def test_timedistance_v1(a, b, expected):
diff = a - b
assert diff != expected
Here is the pytest output:
============================================================================== FAILURES ==============================================================================
_________________________________________________________________ test_timedistance_v1[\u4e2d\u6587] _________________________________________________________________
a = datetime.datetime(2001, 12, 12, 0, 0), b = datetime.datetime(2001, 12, 11, 0, 0), expected = datetime.timedelta(1)
#pytest.mark.parametrize("a,b,expected", testdata, ids=[u"中文", u"English"])
def test_timedistance_v1(a, b, expected):
diff = a - b
> assert diff != expected
E assert datetime.timedelta(1) != datetime.timedelta(1)
test_time.py:15: AssertionError
___________________________________________________________________ test_timedistance_v1[English] ____________________________________________________________________
a = datetime.datetime(2001, 12, 11, 0, 0), b = datetime.datetime(2001, 12, 12, 0, 0), expected = datetime.timedelta(-1)
#pytest.mark.parametrize("a,b,expected", testdata, ids=[u"中文", u"English"])
def test_timedistance_v1(a, b, expected):
diff = a - b
> assert diff != expected
E assert datetime.timedelta(-1) != datetime.timedelta(-1)
test_time.py:15: AssertionError
====================================================================== 2 failed in 0.05 seconds ======================================================================
For the second line in output , the test name is "test_timedistance_v1[\u4e2d\u6587]" , I hope it's "test_timedistance_v1[中文]", does py.test support it?
(my pytest version is 3.1.2, OS: macOS 10.12.5)
It does not depend of pytest but of your computer locale.
Here the trace-log of test (LC_ALL="en_US.UTF-8") :
================================ test session starts ================================
platform linux -- Python 3.5.3, pytest-2.9.2, py-1.4.34, pluggy-0.3.1
rootdir: /home/..../tmp, inifile:
collected 2 items
pytest_chin.py FF
===================================== FAILURES ======================================
_____________________________ test_timedistance_v1[中文] ______________________________
...
And with with LC_ALL="fr_FR.iso8859-1" :
================================ test session starts ================================
platform linux -- Python 3.5.3, pytest-2.9.2, py-1.4.34, pluggy-0.3.1
rootdir: /home/gustavi/tmp, inifile:
collected 2 items
pytest_chin.py FF
===================================== FAILURES ======================================
\x1b[1m\x1b[31m_____________________________ test_timedistance_v1[\u4e2d\u6587] ______________________________\x1b[0m
...
Here an usefull link to setup your locale on OS X.
I would love to see the last 10 lines which were executed by the python interpreter before this exception occured:
test_has_perm_in_foobar.py F
Traceback (most recent call last):
File "/.../test_has_perm_in_foobar.py", line 50, in test_has_perm
self.assertFalse(check_perm(request, some_object))
File "/usr/lib/python2.7/unittest/case.py", line 416, in assertFalse
raise self.failureException(msg)
AssertionError: True is not false
I want to see where check_perm() returned True.
I know that I could use interactive debugging to find the matching line, but I am lazy and want to find a easier way to the line where check_perm() returned the return value.
I use pyCharm, but a text based tool, would solve my need, too.
BTW: Please don't tell me how to use the debugger with step-over and step-into. I know this.
Here is some code to illustrate it.
def check_perm(request, some_object):
if condition_1:
return True
if condition_2:
return sub_check(some_object)
if condition_3:
return sub_check2(some_object)
...
There are several ways where check_perm() could return True. If True was returned because of condition_1, then I want to see something like this
+ if condition_1:
+ return True
The output I have in mind is like set -x on the shell.
Update
cgitb, pytest and other tools can show the lines before the line where the assertion failed. BUT, they only show the lines of the current python file. This question is about the lines which were executed before the assertion happens, but covering all files. In my case I want to know where the return value of check_perm() was created. The tools pytest, cgitb, ... don't show this.
What I am searching is like set -x on the shell:
help set
-x Print commands and their arguments as they are executed.
For this reason I've switched testing to pytest.
It can show local variables and traceback with different detalization level. Line where call was done is marked with >.
For example in my django project:
$ py.test --showlocals --tb=long
=============================== test session starts ===============================
platform darwin -- Python 3.5.1, pytest-3.0.3, py-1.4.31, pluggy-0.4.0
Django settings: dj_tg_bot.settings (from ini file)
rootdir: /Users/el/Projects/dj-tg-alpha-bot, inifile: tox.ini
plugins: django-3.0.0, cov-2.4.0
collected 8 items
tests/test_commands.py ....F
tests/test_logger.py .
tests/test_simple.py ..
==================================== FAILURES =====================================
__________________________ TestSimpleCommands.test_start __________________________
self = <tests.test_commands.TestSimpleCommands testMethod=test_start>
def test_start(self,):
"""
Test bot accept normally command /start and replies as it should.
"""
> self._test_message_ok(self.start)
self = <tests.test_commands.TestSimpleCommands testMethod=test_start>
tests/test_commands.py:56:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <tests.test_commands.TestSimpleCommands testMethod=test_start>
action = {'in': ' /start', 'out': {'parse_mode': 'Markdown', 'reply_markup': '', 'text': 'Welcome'}}
update = <telegram.update.Update object at 0x113e16cf8>, number = 1
def _test_message_ok(self, action, update=None, number=1):
if not update:
update = self.update
with mock.patch("telegram.bot.Bot.sendMessage", callable=mock.MagicMock()) as mock_send:
if 'in' in action:
update.message.text = action['in']
response = self.client.post(self.webhook_url, update.to_json(), **self.kwargs)
# Check response 200 OK
self.assertEqual(response.status_code, status.HTTP_200_OK)
# Check
> self.assertBotResponse(mock_send, action)
action = {'in': ' /start', 'out': {'parse_mode': 'Markdown', 'reply_markup': '', 'text': 'Welcome'}}
mock_send = <MagicMock name='sendMessage' id='4619939344'>
number = 1
response = <Response status_code=200, "application/json">
self = <tests.test_commands.TestSimpleCommands testMethod=test_start>
update = <telegram.update.Update object at 0x113e16cf8>
../../.pyenv/versions/3.5.1/lib/python3.5/site-packages/telegrambot/test/testcases.py:83:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <tests.test_commands.TestSimpleCommands testMethod=test_start>
mock_send = <MagicMock name='sendMessage' id='4619939344'>
command = {'in': ' /start', 'out': {'parse_mode': 'Markdown', 'reply_markup': '', 'text': 'Welcome'}}
def assertBotResponse(self, mock_send, command):
> args, kwargs = mock_send.call_args
E TypeError: 'NoneType' object is not iterable
command = {'in': ' /start', 'out': {'parse_mode': 'Markdown', 'reply_markup': '', 'text': 'Welcome'}}
mock_send = <MagicMock name='sendMessage' id='4619939344'>
self = <tests.test_commands.TestSimpleCommands testMethod=test_start>
../../.pyenv/versions/3.5.1/lib/python3.5/site-packages/telegrambot/test/testcases.py:61: TypeError
------------------------------ Captured stderr call -------------------------------
Handler not found for {'message': {'from': {'username': 'username_4', 'last_name': 'last_name_4', 'id': 5, 'first_name': 'first_name_4'}, 'chat': {'username': 'username_4', 'last_name': 'last_name_4', 'first_name': 'first_name_4', 'title': 'title_4', 'type': 'private', 'id': 5}, 'text': ' /start', 'message_id': 5, 'date': 1482500826}, 'update_id': 5}
======================= 1 failed, 7 passed in 2.29 seconds ========================
(.env) ✘-1 ~/Projects/dj-tg-alpha-bot [master|✚ 1…8⚑ 12]
16:47 $
What about cgitb? You just need import this module to your code.
import cgitb
cgitb.enable(format='text')
def f():
a = 1
b = 2
c = 3
x = 0
d = a * b * c / x
return d
if __name__ == "__main__":
f()
Gives:
ZeroDivisionError
Python 3.5.2: /usr/bin/python3
Mon Dec 19 17:42:34 2016
A problem occurred in a Python script. Here is the sequence of
function calls leading up to the error, in the order they occurred.
/home/user1/123.py in <module>()
10 d = a * b * c / x
11 return x
12
13 if __name__ == "__main__":
14 f()
f = <function f>
/home/user1/123.py in f()
8 c = 3
9 x = 0
10 d = a * b * c / x
11 return x
12
d undefined
a = 1
b = 2
c = 3
x = 0
ZeroDivisionError: division by zero
...
The above is a description of an error in a Python program. Here is
the original traceback:
Traceback (most recent call last):
File "123.py", line 14, in <module>
f()
File "123.py", line 10, in f
d = a * b * c / x
ZeroDivisionError: division by zero
Since I could not find a solution, I wrote this myself:
with trace_function_calls():
self.assertFalse(check_perm(request, some_object))
Implementation of trace_function_calls():
class trace_function_calls(object):
depth_symbol = '+'
def __init__(self, write_method=None, log_lines=True):
'''
write_method: A method which gets called for every executed line. Defauls to logger.info
# Simple example:
with debugutils.trace_function_calls():
method_you_want_to_trace()
'''
if write_method is None:
write_method=logger.info
self.write_method = write_method
self.log_lines = log_lines
def __enter__(self):
self.old = sys.gettrace()
self.depth = 0
sys.settrace(self.trace_callback)
def __exit__(self, type, value, traceback):
sys.settrace(self.old)
def trace_callback(self, frame, event, arg):
# from http://pymotw.com/2/sys/tracing.html#tracing-function-calls
if event == 'return':
self.depth -= 1
return self.trace_callback
if event == 'line':
if not self.log_lines:
return self.trace_callback
elif event == 'call':
self.depth += 1
else:
# self.write_method('unknown: %s' % event)
return self.trace_callback
msg = []
msg.append(self.depth_symbol * self.depth)
co = frame.f_code
func_name = co.co_name
func_line_no = frame.f_lineno
func_filename = co.co_filename
if not is_python_file_from_my_codebase(func_filename):
return self.trace_callback
code_line = linecache.getline(func_filename, func_line_no).rstrip()
msg.append('%s: %s %r on line %s of %s' % (
event, func_name, code_line, func_line_no, func_filename))
self.write_method(' '.join(msg))
return self.trace_callback
PS: This is open source software. If you want to create a python package, do it, tell me, it would make me glad.
The trace module has bourne compatible shell set -x like feature. The trace parameter of trace.Trace class enables line execution tracing. This class takes an ignoredirs parameter which is used to ignore tracing modules or packages located below the specified directory. I use it here to keep the tracer from tracing the unittest module.
test_has_perm_in_foobar.py
import sys
import trace
import unittest
from app import check_perm
tracer = trace.Trace(trace=1, ignoredirs=(sys.prefix, sys.exec_prefix))
class Test(unittest.TestCase):
def test_one(self):
tracer.runctx('self.assertFalse(check_perm("dummy", 3))', globals(), locals())
if __name__ == '__main__':
unittest.main()
app.py
def sub_check1(some_object):
if some_object * 10 == 20:
return True
def sub_check2(some_object):
if some_object * 10 == 30:
return True
def check_perm(request, some_object):
if some_object == 1:
return True
if some_object == 2:
return sub_check1(some_object)
if some_object == 3:
return sub_check2(some_object)
Test;
$ python test_has_perm_in_foobar.py
--- modulename: test_has_perm_in_foobar, funcname: <module>
<string>(1): --- modulename: app, funcname: check_perm
app.py(10): if some_object == 1:
app.py(12): if some_object == 2:
app.py(14): if some_object == 3:
app.py(15): return sub_check2(some_object)
--- modulename: app, funcname: sub_check2
app.py(6): if some_object * 10 == 30:
app.py(7): return True
F
======================================================================
FAIL: test_one (__main__.Test)
----------------------------------------------------------------------
Traceback (most recent call last):
File "test_has_perm_in_foobar.py", line 23, in test_one
tracer.runctx('self.assertFalse(check_perm("dummy", 3))', globals(), locals())
File "/usr/lib/python2.7/trace.py", line 513, in runctx
exec cmd in globals, locals
File "<string>", line 1, in <module>
AssertionError: True is not false
----------------------------------------------------------------------
Ran 1 test in 0.006s
FAILED (failures=1)
To make the code and the output even more shorter, just trace the required function alone.
import trace
import unittest
from app import check_perm
tracer = trace.Trace(trace=1)
class Test(unittest.TestCase):
def test_one(self):
self.assertFalse(tracer.runfunc(check_perm, 'dummy', 3))
if __name__ == '__main__':
unittest.main()
Test;
$ python test_has_perm_in_foobar.py
--- modulename: app, funcname: check_perm
app.py(10): if some_object == 1:
app.py(12): if some_object == 2:
app.py(14): if some_object == 3:
app.py(15): return sub_check2(some_object)
--- modulename: app, funcname: sub_check2
app.py(6): if some_object * 10 == 30:
app.py(7): return True
F
======================================================================
FAIL: test_one (__main__.Test)
----------------------------------------------------------------------
Traceback (most recent call last):
File "test_has_perm_in_foobar.py", line 19, in test_one
self.assertFalse(tracer.runfunc(check_perm, 'dummy', 3))
AssertionError: True is not false
----------------------------------------------------------------------
Ran 1 test in 0.005s
FAILED (failures=1)
Have you considered the following workflow? I read your BTW but hard rules sometimes stop us from solving our problems(especially if you are in an XY trap) so I'm going to suggest you use the debugger anyway. I run into tests that fail all the time. When a full stack trace is critical to solving the problem, I use a combination of pdb and py.test to get the whole shebang. Considering the following program...
import pytest
#pytest.mark.A
def test_add():
a = 1
b = 2
add(a,b)
def add(a, b):
assert a>b
return a+b
def main():
add(1,2)
add(2,1)
if __name__ == "__main__":
# execute only if run as a script
main()
Running the command py.test -v -tb=short -m A code.py results in the following output...
art#macky ~/src/python/so-answer-stacktrace: py.test -v --tb=short -m A code.py
============================= test session starts ==============================
platform darwin -- Python 2.7.5 -- pytest-2.5.0 -- /Users/art/.pyenv/versions/2.7.5/bin/python
collected 1 items
code.py:3: test_add FAILED
=================================== FAILURES ===================================
___________________________________ test_add ___________________________________
code.py:9: in test_add
> add(a,b)
code.py:12: in add
> assert a>b
E assert 1 > 2
=========================== 1 failed in 0.01 seconds ===========================
One simple way to investigate the stack trace is to drop a pdb debug point in the test, Mark the individual test with a pytest mark, invoke that test, and inspect the stack inside the debugger. like so...
def add(a, b):
from pdb import set_trace;set_trace()
assert a>b
return a+b
Now when I run the same test command again I get a suspended pdb debugger. Like so...
art#macky ~/src/python/so-answer-stacktrace: py.test -v --tb=short -m A code.py
=========================================================================================== test session starts ============================================================================================
platform darwin -- Python 2.7.5 -- pytest-2.5.0 -- /Users/art/.pyenv/versions/2.7.5/bin/python
collected 1 items
code.py:3: test_add
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> PDB set_trace (IO-capturing turned off) >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
> /Users/art/src/python/so-answer-stacktrace/code.py(13)add()
-> assert a>b
(Pdb)
If at this point I type the magic w for where and hit enter I see the full stack trace in all its glory...
(Pdb) w
/Users/art/.pyenv/versions/2.7.5/bin/py.test(9)<module>()
-> load_entry_point('pytest==2.5.0', 'console_scripts', 'py.test')()
/Users/art/.pyenv/versions/2.7.5/lib/python2.7/site-packages/pytest-2.5.0-py2.7.egg/_pytest/config.py(19)main()
-> return config.hook.pytest_cmdline_main(config=config)
/Users/art/.pyenv/versions/2.7.5/lib/python2.7/site-packages/pytest-2.5.0-py2.7.egg/_pytest/core.py(376)__call__()
-> return self._docall(methods, kwargs)
/Users/art/.pyenv/versions/2.7.5/lib/python2.7/site-packages/pytest-2.5.0-py2.7.egg/_pytest/core.py(387)_docall()
-> res = mc.execute()
/Users/art/.pyenv/versions/2.7.5/lib/python2.7/site-packages/pytest-2.5.0-py2.7.egg/_pytest/core.py(288)execute()
-> res = method(**kwargs)
/Users/art/.pyenv/versions/2.7.5/lib/python2.7/site-packages/pytest-2.5.0-py2.7.egg/_pytest/main.py(111)pytest_cmdline_main()
-> return wrap_session(config, _main)
/Users/art/.pyenv/versions/2.7.5/lib/python2.7/site-packages/pytest-2.5.0-py2.7.egg/_pytest/main.py(81)wrap_session()
-> doit(config, session)
/Users/art/.pyenv/versions/2.7.5/lib/python2.7/site-packages/pytest-2.5.0-py2.7.egg/_pytest/main.py(117)_main()
-> config.hook.pytest_runtestloop(session=session)
/Users/art/.pyenv/versions/2.7.5/lib/python2.7/site-packages/pytest-2.5.0-py2.7.egg/_pytest/core.py(376)__call__()
-> return self._docall(methods, kwargs)
/Users/art/.pyenv/versions/2.7.5/lib/python2.7/site-packages/pytest-2.5.0-py2.7.egg/_pytest/core.py(387)_docall()
-> res = mc.execute()
/Users/art/.pyenv/versions/2.7.5/lib/python2.7/site-packages/pytest-2.5.0-py2.7.egg/_pytest/core.py(288)execute()
-> res = method(**kwargs)
/Users/art/.pyenv/versions/2.7.5/lib/python2.7/site-packages/pytest-2.5.0-py2.7.egg/_pytest/main.py(137)pytest_runtestloop()
-> item.config.hook.pytest_runtest_protocol(item=item, nextitem=nextitem)
/Users/art/.pyenv/versions/2.7.5/lib/python2.7/site-packages/pytest-2.5.0-py2.7.egg/_pytest/core.py(376)__call__()
-> return self._docall(methods, kwargs)
/Users/art/.pyenv/versions/2.7.5/lib/python2.7/site-packages/pytest-2.5.0-py2.7.egg/_pytest/core.py(387)_docall()
-> res = mc.execute()
/Users/art/.pyenv/versions/2.7.5/lib/python2.7/site-packages/pytest-2.5.0-py2.7.egg/_pytest/core.py(288)execute()
-> res = method(**kwargs)
/Users/art/.pyenv/versions/2.7.5/lib/python2.7/site-packages/pytest-2.5.0-py2.7.egg/_pytest/runner.py(62)pytest_runtest_protocol()
-> runtestprotocol(item, nextitem=nextitem)
/Users/art/.pyenv/versions/2.7.5/lib/python2.7/site-packages/pytest-2.5.0-py2.7.egg/_pytest/runner.py(72)runtestprotocol()
-> reports.append(call_and_report(item, "call", log))
/Users/art/.pyenv/versions/2.7.5/lib/python2.7/site-packages/pytest-2.5.0-py2.7.egg/_pytest/runner.py(106)call_and_report()
-> call = call_runtest_hook(item, when, **kwds)
/Users/art/.pyenv/versions/2.7.5/lib/python2.7/site-packages/pytest-2.5.0-py2.7.egg/_pytest/runner.py(124)call_runtest_hook()
-> return CallInfo(lambda: ihook(item=item, **kwds), when=when)
/Users/art/.pyenv/versions/2.7.5/lib/python2.7/site-packages/pytest-2.5.0-py2.7.egg/_pytest/runner.py(137)__init__()
-> self.result = func()
/Users/art/.pyenv/versions/2.7.5/lib/python2.7/site-packages/pytest-2.5.0-py2.7.egg/_pytest/runner.py(124)<lambda>()
-> return CallInfo(lambda: ihook(item=item, **kwds), when=when)
/Users/art/.pyenv/versions/2.7.5/lib/python2.7/site-packages/pytest-2.5.0-py2.7.egg/_pytest/main.py(161)call_matching_hooks()
-> return hookmethod.pcall(plugins, **kwargs)
/Users/art/.pyenv/versions/2.7.5/lib/python2.7/site-packages/pytest-2.5.0-py2.7.egg/_pytest/core.py(380)pcall()
-> return self._docall(methods, kwargs)
/Users/art/.pyenv/versions/2.7.5/lib/python2.7/site-packages/pytest-2.5.0-py2.7.egg/_pytest/core.py(387)_docall()
-> res = mc.execute()
/Users/art/.pyenv/versions/2.7.5/lib/python2.7/site-packages/pytest-2.5.0-py2.7.egg/_pytest/core.py(288)execute()
-> res = method(**kwargs)
/Users/art/.pyenv/versions/2.7.5/lib/python2.7/site-packages/pytest-2.5.0-py2.7.egg/_pytest/runner.py(86)pytest_runtest_call()
-> item.runtest()
/Users/art/.pyenv/versions/2.7.5/lib/python2.7/site-packages/pytest-2.5.0-py2.7.egg/_pytest/python.py(1076)runtest()
-> self.ihook.pytest_pyfunc_call(pyfuncitem=self)
/Users/art/.pyenv/versions/2.7.5/lib/python2.7/site-packages/pytest-2.5.0-py2.7.egg/_pytest/main.py(161)call_matching_hooks()
-> return hookmethod.pcall(plugins, **kwargs)
/Users/art/.pyenv/versions/2.7.5/lib/python2.7/site-packages/pytest-2.5.0-py2.7.egg/_pytest/core.py(380)pcall()
-> return self._docall(methods, kwargs)
/Users/art/.pyenv/versions/2.7.5/lib/python2.7/site-packages/pytest-2.5.0-py2.7.egg/_pytest/core.py(387)_docall()
-> res = mc.execute()
/Users/art/.pyenv/versions/2.7.5/lib/python2.7/site-packages/pytest-2.5.0-py2.7.egg/_pytest/core.py(288)execute()
-> res = method(**kwargs)
/Users/art/.pyenv/versions/2.7.5/lib/python2.7/site-packages/pytest-2.5.0-py2.7.egg/_pytest/python.py(188)pytest_pyfunc_call()
-> testfunction(**testargs)
/Users/art/src/python/so-answer-stacktrace/code.py(9)test_add()
-> add(a,b)
> /Users/art/src/python/so-answer-stacktrace/code.py(13)add()
-> assert a>b
(Pdb)
I do a lot of work in frameworks. pdb + where gives you everything up to the actual entry point of the program. You can see buried in there my functions as well as the test runner's frames. If this were Django or Flask I would see all the stack frames involved with the internals of those frameworks. Its my full stop gap for when things go really wrong.
If you have a test with lots of iterations or conditionals you might find yourself getting hung up on the same lines again and again. The solution is to be clever about where you choose to instrument with pdb. Nesting it inside a conditional or instrumenting an iteration/recursion with a conditional(essentially saying When this becomes True then suspend so I can inspect what is going on). Furthermore pdb lets you look at all the runtime context(assignments, state, etc.)
For your case it looks like a creative instrumentation of check_perm is in order.