Capture assertion message in Pytest - python

I am trying to capture the return value of a PyTest. I am running these tests programmatically, and I want to return relevant information when the test fails.
I thought I could perhaps return the value of kernel as follows such that I can print that information later when listing failed tests:
def test_eval(test_input, expected):
kernel = os.system("uname -r")
assert eval(test_input) == expected, kernel
This doens't work. When I am later looping through the TestReports which are generated, there is no way to access any return information. The only information available in the TestReport is the name of the test and a True/False.
For example one of the test reports looks as follows:
<TestReport 'test_simulation.py::test_host_has_correct_kernel_version[simulation-host]' when='call' outcome='failed'>
Is there a way to return a value after the assert fails, back to the TestReport? I have tried doing this with PyTest plugins but have been unsuccessful.
Here is the code I am using to run the tests programmatically. You can see where I am trying to access the return value.
import pytest
from util import bcolors
class Plugin:
def __init__(self):
self.passed_tests = set()
self.skipped_tests = set()
self.failed_tests = set()
self.unknown_tests = set()
def pytest_runtest_logreport(self, report):
print(report)
if report.passed:
self.passed_tests.add(report)
elif report.skipped:
self.skipped_tests.add(report)
elif report.failed:
self.failed_tests.add(report)
else:
self.unknown_tests.add(report)
if __name__ == "__main__":
plugin = Plugin()
pytest.main(["-s", "-p", "no:terminal"], plugins=[plugin])
for passed in plugin.passed_tests:
result = passed.nodeid
print(bcolors.OKGREEN + "[OK]\t" + bcolors.ENDC + result)
for skipped in plugin.skipped_tests:
result = skipped.nodeid
print(bcolors.OKBLUE + "[SKIPPED]\t" + bcolors.ENDC + result)
for failed in plugin.failed_tests:
result = failed.nodeid
print(bcolors.FAIL + "[FAIL]\t" + bcolors.ENDC + result)
for unknown in plugin.unknown_tests:
result = unknown.nodeid
print(bcolors.FAIL + "[FAIL]\t" + bcolors.ENDC + result)
The goal is to be able to print out "extra context information" when printing the FAILED tests, so that there is information immediately available to help debug why the test is failing.

You can extract failure details from the raised AssertionError in the custom pytest_exception_interact hookimpl. Example:
# conftest.py
def pytest_exception_interact(node, call, report):
# assertion message should be parsed here
# because pytest rewrites assert statements in bytecode
message = call.excinfo.value.args[0]
lines = message.split()
kernel = lines[0]
report.sections.append((
'Kernels reported in assert failures:',
f'{report.nodeid} reported {kernel}'
))
Running a test module
import subprocess
def test_bacon():
assert True
def test_eggs():
kernel = subprocess.run(
["uname", "-r"],
stdout=subprocess.PIPE,
text=True
).stdout
assert 0 == 1, kernel
yields:
test_spam.py::test_bacon PASSED [ 50%]
test_spam.py::test_eggs FAILED [100%]
=================================== FAILURES ===================================
__________________________________ test_eggs ___________________________________
def test_eggs():
kernel = subprocess.run(
["uname", "-r"],
stdout=subprocess.PIPE,
text=True
).stdout
> assert 0 == 1, kernel
E AssertionError: 5.5.15-200.fc31.x86_64
E
E assert 0 == 1
E +0
E -1
test_spam.py:12: AssertionError
--------------------- Kernels reported in assert failures: ---------------------
test_spam.py::test_eggs reported 5.5.15-200.fc31.x86_64
=========================== short test summary info ============================
FAILED test_spam.py::test_eggs - AssertionError: 5.5.15-200.fc31.x86_64
========================= 1 failed, 1 passed in 0.05s ==========================

Related

pytest-4.x.x: How to report SKIPPED tests like XFAILED?

When a test is xfailed the reason that is printed reports about test file, test class and test case, while the skipped test case reports only test file and a line where skip is called.
Here is a test example:
#!/usr/bin/env pytest
import pytest
#pytest.mark.xfail(reason="Reason of failure")
def test_1():
pytest.fail("This will fail here")
#pytest.mark.skip(reason="Reason of skipping")
def test_2():
pytest.fail("This will fail here")
This is the actual result:
pytest test_file.py -rsx
============================= test session starts =============================
platform linux -- Python 3.5.2, pytest-4.4.1, py-1.7.0, pluggy-0.9.0
rootdir: /home/ashot/questions
collected 2 items
test_file.py xs [100%]
=========================== short test summary info ===========================
SKIPPED [1] test_file.py:9: Reason of skipping
XFAIL test_file.py::test_1
Reason of failure
==================== 1 skipped, 1 xfailed in 0.05 seconds =====================
But I would expect to get something like:
pytest test_file.py -rsx
============================= test session starts =============================
platform linux -- Python 3.5.2, pytest-4.4.1, py-1.7.0, pluggy-0.9.0
rootdir: /home/ashot/questions
collected 2 items
test_file.py xs [100%]
=========================== short test summary info ===========================
XFAIL test_file.py::test_1: Reason of failure
SKIPPED test_file.py::test_2: Reason of skipping
==================== 1 skipped, 1 xfailed in 0.05 seconds =====================
You have two possible ways to achieve this. The quick and dirty way: just redefine _pytest.skipping.show_xfailed in your test_file.py:
import _pytest
def custom_show_xfailed(terminalreporter, lines):
xfailed = terminalreporter.stats.get("xfailed")
if xfailed:
for rep in xfailed:
pos = terminalreporter.config.cwd_relative_nodeid(rep.nodeid)
reason = rep.wasxfail
s = "XFAIL %s" % (pos,)
if reason:
s += ": " + str(reason)
lines.append(s)
# show_xfailed_bkp = _pytest.skipping.show_xfailed
_pytest.skipping.show_xfailed = custom_show_xfailed
... your tests
The (not so) clean way: create a conftest.py file in the same directory as your test_file.py, and add a hook:
import pytest
import _pytest
def custom_show_xfailed(terminalreporter, lines):
xfailed = terminalreporter.stats.get("xfailed")
if xfailed:
for rep in xfailed:
pos = terminalreporter.config.cwd_relative_nodeid(rep.nodeid)
reason = rep.wasxfail
s = "XFAIL %s" % (pos,)
if reason:
s += ": " + str(reason)
lines.append(s)
#pytest.hookimpl(tryfirst=True)
def pytest_terminal_summary(terminalreporter):
tr = terminalreporter
if not tr.reportchars:
return
lines = []
for char in tr.reportchars:
if char == "x":
custom_show_xfailed(terminalreporter, lines)
elif char == "X":
_pytest.skipping.show_xpassed(terminalreporter, lines)
elif char in "fF":
_pytest.skipping.show_simple(terminalreporter, lines, 'failed', "FAIL %s")
elif char in "sS":
_pytest.skipping.show_skipped(terminalreporter, lines)
elif char == "E":
_pytest.skipping.show_simple(terminalreporter, lines, 'error', "ERROR %s")
elif char == 'p':
_pytest.skipping.show_simple(terminalreporter, lines, 'passed', "PASSED %s")
if lines:
tr._tw.sep("=", "short test summary info")
for line in lines:
tr._tw.line(line)
tr.reportchars = [] # to avoid further output
The second method is overkill, because you have to redefine the whole pytest_terminal_summary.
Thanks to this answer I've found the following solution that works perfectly for me.
I've created conftest.py file in the root of my test suite with the following content:
import _pytest.skipping as s
def show_xfailed(tr, lines):
for rep in tr.stats.get("xfailed", []):
pos = tr.config.cwd_relative_nodeid(rep.nodeid)
reason = rep.wasxfail
s = "XFAIL\t%s" % pos
if reason:
s += ": " + str(reason)
lines.append(s)
s.REPORTCHAR_ACTIONS["x"] = show_xfailed
def show_skipped(tr, lines):
for rep in tr.stats.get("skipped", []):
pos = tr.config.cwd_relative_nodeid(rep.nodeid)
reason = rep.longrepr[-1]
if reason.startswith("Skipped: "):
reason = reason[9:]
verbose_word = s._get_report_str(tr.config, report=rep)
lines.append("%s\t%s: %s" % (verbose_word, pos, reason))
s.REPORTCHAR_ACTIONS["s"] = show_skipped
s.REPORTCHAR_ACTIONS["S"] = show_skipped
And now I'm getting to following output:
./test_file.py -rsx
============================= test session starts =============================
platform linux -- Python 3.5.2, pytest-4.4.1, py-1.7.0, pluggy-0.9.0
rootdir: /home/ashot/questions
collected 2 items
test_file.py xs [100%]
=========================== short test summary info ===========================
SKIPPED test_file.py::test_2: Reason of skipping
XFAIL test_file.py::test_1: Reason of failure
==================== 1 skipped, 1 xfailed in 0.05 seconds =====================

How to get a list of TestReports at the end of a py.test run?

I want to get a list of all tests (e.g. in the form of a py.test TestReport) at the end of all tests.
I know that pytest_runtest_makereportdoes something similar, but only for a single test. But I want to implement a hook or something in conftest.py to process the whole list of tests before the py.test application terminates.
Is there a way to do this?
Here an example which can help you. Structure of files:
/example:
__init__.py # empty file
/test_pack_1
__init__.py # empty file
conftest.py # pytest hooks
test_my.py # a few tests for demonstration
There are 2 tests in test_my.py:
def test_one():
assert 1 == 1
print('1==1')
def test_two():
assert 1 == 2
print('1!=2')
Example of conftest.py:
import pytest
from _pytest.runner import TestReport
from _pytest.terminal import TerminalReporter
#pytest.hookimpl(hookwrapper=True)
def pytest_terminal_summary(terminalreporter): # type: (TerminalReporter) -> generator
yield
# you can do here anything - I just print report info
print('*' * 8 + 'HERE CUSTOM LOGIC' + '*' * 8)
for failed in terminalreporter.stats.get('failed', []): # type: TestReport
print('failed! node_id:%s, duration: %s, details: %s' % (failed.nodeid,
failed.duration,
str(failed.longrepr)))
for passed in terminalreporter.stats.get('passed', []): # type: TestReport
print('passed! node_id:%s, duration: %s, details: %s' % (passed.nodeid,
passed.duration,
str(passed.longrepr)))
Documentation says that pytest_terminal_summary has exitstatus arg
Run tests without any additional options: py.test ./example. Example of output:
example/test_pack_1/test_my.py .F
********HERE CUSTOM LOGIC********
failed! node_id:test_pack_1/test_my.py::test_two, duration: 0.000385999679565, details: def test_two():
> assert 1 == 2
E assert 1 == 2
example/test_pack_1/test_my.py:7: AssertionError
passed! node_id:test_pack_1/test_my.py::test_one, duration: 0.00019907951355, details: None
=================================== FAILURES ===================================
___________________________________ test_two ___________________________________
def test_two():
> assert 1 == 2
E assert 1 == 2
example/test_pack_1/test_my.py:7: AssertionError
====================== 1 failed, 1 passed in 0.01 seconds ======================
Hope this helps.
Note! Make sure that .pyc files was removed before running tests

How to use a kernel in another language (not the default) with jupyter_client?

I have a simple jupyter_client test case (below). I would like to know what I need to change or add to be able to use another kernel such as a bash shell or IRkernel for R.
import queue
from jupyter_client.manager import start_new_kernel
kernel_manager, client = start_new_kernel()
def runCode(code):
### Execute the code
client.execute(code)
### Get the execution status
### When the execution state is "idle" it is complete
io_msg = client.get_iopub_msg(timeout=1)
io_msg_content = io_msg['content']
### We're going to catch this here before we start polling
if 'execution_state' in io_msg_content and io_msg_content['execution_state'] == 'idle':
return "no output"
### Continue polling for execution to complete
### which is indicated by having an execution state of "idle"
while True:
### Save the last message content. This will hold the solution.
### The next one has the idle execution state indicating the execution
###is complete, but not the stdout output
temp = io_msg_content
### Poll the message
try:
io_msg = client.get_iopub_msg(timeout=1)
io_msg_content = io_msg['content']
if (
'execution_state' in io_msg_content
and io_msg_content['execution_state'] == 'idle'
):
break
except queue.Empty:
print("timeout get_iopub_msg")
break
### Check the message for various possibilities
if 'data' in temp: # Indicates completed operation
out = temp['data']['text/plain']
elif 'name' in temp and temp['name'] == "stdout": # indicates output
out = temp['text']
elif 'traceback' in temp: # Indicates error
print("ERROR")
out = '\n'.join(temp['traceback']) # Put error into nice format
else:
out = ''
return out
commands = \
[
'!pwd',
'!echo "hello"',
'!ls',
'1+1',
'a=5',
'b=0',
'print()',
'b',
'print()',
'print("hello there")',
'print(a*10)',
'c=1/b'
]
for command in commands:
print(">>>" + command)
out = runCode(command)
print(out)
Output:
>>>!pwd
>>>!echo "hello"
"hello"
>>>!ls
>>>1+1
2
>>>a=5
>>>b=0
>>>print()
>>>b
0
>>>print()
>>>print("hello there")
hello there
>>>print(a*10)
50
>>>c=1/b
ERROR
---------------------------------------------------------------------------
ZeroDivisionError Traceback (most recent call last)
<ipython-input-12-47a519732db5> in <module>()
----> 1 c=1/b
ZeroDivisionError: division by zero

Grinder JDBC test script error" The result of 'TestRunner()' is not callable"

I use JDBC.py script run performance testing . grinder log info:
2015-10-14 18:42:40,132 ERROR com-0 thread-24: aborting thread - {}The result of 'TestRunner()' is not callable
net.grinder.scriptengine.jython.JythonScriptExecutionException: The result of 'TestRunner()' is not callable
at net.grinder.scriptengine.jython.JythonScriptEngine.createWorkerRunnable(JythonScriptEngine.java:183) ~[grinder-core-3.11.jar:na]
at net.grinder.engine.process.GrinderProcess$ThreadStarterImplementation$2.create(GrinderProcess.java:784) ~[grinder-core-3.11.jar:na]
at net.grinder.engine.process.GrinderThread.run(GrinderThread.java:90) ~[grinder-core-3.11.jar:na]
at java.lang.Thread.run(Thread.java:744) [na:1.7.0_45]
2015-10-14 18:42:40,132 ERROR com-0 thread-3: aborting thread - {}The result of 'TestRunner()' is not callable
net.grinder.scriptengine.jython.JythonScriptExecutionException: The result of 'TestRunner()' is not callable
at net.grinder.scriptengine.jython.JythonScriptEngine.createWorkerRunnable(JythonScriptEngine.java:183) ~[grinder-core-3.11.jar:na]
at net.grinder.engine.process.GrinderProcess$ThreadStarterImplementation$2.create(GrinderProcess.java:784) ~[grinder-core-3.11.jar:na]
at net.grinder.engine.process.GrinderThread.run(GrinderThread.java:90) ~[grinder-core-3.11.jar:na]
at java.lang.Thread.run(Thread.java:744) [na:1.7.0_45]
I modify script, but still error. Please help check it.
I test script :
# The sorting tes supports a configurable array length.
# It runs the JavaTest.sort method of the JavaTest class.
from net.grinder.script.Grinder import grinder
from net.grinder.script import Test
from datetime import datetime
from datetime import timedelta
from java.sql import DriverManager
from oracle.jdbc import OracleDriver
########################################
#
# main body of test script starts here
#
########################################
# Get the propeties to access test configuration information
properties = grinder.getProperties()
# The description is a property (instead of a hardcoded string in this script)
#test = Test(1, properties.get("javatest.description"))
test = Test(2, properties.get("javatest.description"))
# select the method for which to collect information
# test.record(WriteMulitpleLittleFile.write)
# initialize data for compressing
# fileName = properties.get("javatest.fileToCompress")
# grinder.logger.info("data file to compress is " + fileName)
# JavaTest.initializeCompression(fileName)
# If the run mode is runOnce, the TestRunner class will
# run once. Otherwise, if the run mode is continuous,
# the TestRunner class will run the test for at least
# the specified duration (but possibly longer)
runMode = properties.get("javatest.runMode")
#WriteMulitpleLittleFile.setParameters(dir, fileSize...)
if runMode == "continuous":
# figure out how long to run the test
m = int(properties.getProperty("javatest.durationMinutes", "0"))
h = int(properties.getProperty("javatest.durationHours", "0"))
d = int(properties.getProperty("javatest.durationDays", "0"))
duration = timedelta(minutes=m,hours=h,days=d)
grinder.logger.info("run mode is continuous, duration is " + str(duration))
elif runMode == "runOnce":
grinder.logger.info("run mode is run once")
duration = timedelta(minutes=0)
else:
grinder.logger.info("run mode not set or not recongized, default to run once")
duration = timedelta(minutes=0)
########################################
#
# The TestRunner class is used by The Grinder to perform the test
#
########################################
#test1 = Test(1, "Database insert")
test2 = Test(2, "Database query")
# Load the Oracle JDBC driver.
DriverManager.registerDriver(OracleDriver())
def getConnection():
return DriverManager.getConnection(
"jdbc:oracle:thin:#den00bvr.us.oracle.com:1521:orcl", "PBPUBLIC", "PBPUBLIC")
def ensureClosed(object):
try: object.close()
except: pass
# One time initialisation that cleans out old data.
connection = getConnection()
statement = connection.createStatement()
#try: statement.execute("drop table grinder_test1126")
#except: pass
#statement.execute("create table grinder_test1126(thread number, run number)")
ensureClosed(statement)
ensureClosed(connection)
class TestRunner:
def __init__(self):
# tid = grinder.threadNumber
# if (grinder.threadNumber % 2 == 0):
# Even threadNumber
# Do insertStatement
# else:
# Odd threadNumber
# Do queryStatement
# def __call__(self):
# self.testRunner()
endTime = datetime.now() + duration
notDone = True
while notDone:
connection = None
insertStatement = None
queryStatement = None
notDone = datetime.now() < endTime
try:
connection = getConnection()
# insertStatement = connection.createStatement()
queryStatement = connection.createStatement()
# test1.record(insertStatement)
# insertStatement.execute("insert into grinder_test1126 values(%d, %d)" %
# (grinder.threadNumber, grinder.runNumber))
test2.record(queryStatement)
queryStatement.execute("select * from employee")
finally:
# ensureClosed(insertStatement)
ensureClosed(queryStatement)
ensureClosed(connection)
According to the documentation,
The TestRunner instance must be callable
A Python object is callable if it defines a call method. Each
worker thread performs a number of runs of the test script, as
configured by the property grinder.runs. For each run, the worker
thread calls its TestRunner; thus the call method can be thought
of as the definition of a run.
Your script requires a call function in order to be classified as callable.

Accessing pytest assert message in finalizer

I am trying to generate a custom report with pytest and trying to access the assert message generated by pytest, in the case of a failure, in the finalizer of a global fixture in the conftest.py file. I am able to access the status of the test but I am not able to get the error message.
I would like to access the status message in the following way
#pytest.fixture(scope='function',autouse = True)
def logFunctionLevel(request):
start = int(time.time() * 1000)
def fin():
stop = int(time.time())
fo = open("/Users/mahesh.nayak/Desktop/logs/test1.log", "a")
fo.write(request.cls.__name__ + "." + request.function.__name__ + " " + str(start) + " " + str(stop) + "\n")
Any help to access the exception message is appreciated
Thanks
Edit : The answer by Bruno did help. Adding the below lines printed the asserts.
l = str(report.longrepr)
fo.write(l)
I'm not sure you can access the exception message from a fixture, but you can implement a custom pytest_runtest_logreport hook (untested):
def pytest_runtest_logreport(report):
fo = open("/Users/mahesh.nayak/Desktop/logs/test1.log", "a")
fo.write('%s (duration: %s)\n' % (report.nodeid, report.duration))
fo.close()
Hope that helps.
To access the assert message from a fixture you can follow the documentation here :
https://docs.pytest.org/en/latest/example/simple.html#making-test-result-information-available-in-fixtures
Which for your question make something look like that: (untested code)
# content of conftest.py
import pytest
#pytest.hookimpl(tryfirst=True, hookwrapper=True)
def pytest_runtest_makereport(item, call):
# execute all other hooks to obtain the report object
outcome = yield
rep = outcome.get_result()
# set a report attribute for each phase of a call, which can
# be "setup", "call", "teardown"
setattr(item, "rep_" + rep.when, rep)
#pytest.fixture
def something(request):
yield
# request.node is an "item" because we use the default
# "function" scope
error_message= ""
if request.node.rep_setup.failed:
print("setting up a test failed!", request.node.nodeid)
error_message = request.node.rep_setup.longreprtext
elif request.node.rep_setup.passed:
if request.node.rep_call.failed:
print("executing test failed", request.node.nodeid)
error_message = request.node.rep_call.longreprtext
fo = open("/Users/mahesh.nayak/Desktop/logs/test1.log", "a")
if error_message:
fo.write('%s (duration: %s) - ERROR - %s \n' % (report.nodeid, report.duration,
error_message))
else:
fo.write('%s (duration: %s) - PASSED \n' % (report.nodeid, report.duration))
fo.close()
The main difference with Bruno Oliveira answer, is that pytest_runtest_logreport is called for each different step of the test ( setup/call/teardown? ), when a fixture can be called only one time at the end of the test.

Categories

Resources