pytest - how to store that certain test failed with test name - python

I would like to store that specific tests failed, then pass that info via API when test class is finished.
I tried sth like that:
fails = []
#pytest.fixture(scope='function')
def something(request):
yield
if request.session.testsfailed:
print("I failed")
fails.append(request.node.name)
print('FAILED', fails)
class TestLala:
#pytest.mark.order(1)
def test_test1(self, something):
assert False
#pytest.mark.order(3)
def test_test1(self, something):
assert True
#pytest.mark.order(3)
def test_test3(self, something):
assert 4 == 4
but instead of failed tests I am still getting each test name added to the list.

The pytest_runtest_makereport hook should solve it for you. Add this to your conftest.py
import pytest
pytest.failed_nodes = []
#pytest.hookimpl(tryfirst=True, hookwrapper=True)
def pytest_runtest_makereport(item):
output = yield
report = output.get_result()
if report.failed:
node_id = report.nodeid
pytest.failed_nodes.append(node_id)

##EDIT##
The answer #Teejay Bruno gave shows how you can avoid having the push to your API run after every function. Just send the data to a list of dict after every test. Then call the send_data when the test is over to send to your API
I have done something similar in my work. By using conftest with make_report you are able to capture the test results and other meta data, then do as you please with that data (like send to a database or API).
test.py
import pytest
class TestLala:
#pytest.mark.order(1)
def test_test1(self):
assert False
#pytest.mark.order(3)
def test_test1(self):
assert True
#pytest.mark.order(3)
def test_test3(self):
assert 4 == 4
conftest.py
import pytest
test = None
status_tag = None
line = None
duration = None
exception = None
#pytest.hookimpl(tryfirst=True, hookwrapper=True)
def pytest_runtest_makereport(item, call):
global test, status_tag, line, duration, exception
report = yield
result = report.get_result()
if result.when == 'call':
(filename, line, name) = item.location
test = item.nodeid
status_tag = result.outcome
line = line
duration = call.duration
exception = call.excinfo
#pytest.fixture(scope='function', autouse=True)
def send_data(pytestconfig):
yield
global test, status_tag, line, duration, exception
# This is where you can send the data to your API
# This will run after every test so if you dont want to send the data as it comes in, you will need to change
# how this function and the one above work a little
print(f"TEST: {test}")
print(f"STATUS_TAG: {status_tag}")
print(f"LINE: {line}")
print(f"DURATION: {duration}")
print(f"EXCEPTION: {exception}")
test = None
status_tag = None
line = None
duration = None
exception = None
If you have not worked with conftest before see the below link:
https://docs.pytest.org/en/6.2.x/fixture.html
Search for the section titled -> "conftest.py: sharing fixtures across multiple files"

Related

How to delete test files when python unittest fails

I'm using python unittest for functions that write data to JSON. I use tearDownClass to delete the output test files so they don't clutter the local repo. Ground truths are also stored as JSON files.
I do want to store the output test files when tests fail, so its easier for troubleshooting.
My current implementation is to use a global boolean keep_file = False. When the unittest fails the assertion, it modifies keep_file = True. tearDownClass only deletes the files when keep_file == False. I don't like the idea of modifying global variables and the try exception blocks for each assert.
import json
import os
import unittest
from src.mymodule import foo1, foo2
# These are defined outside the class on purpose so the classmethods can access them
FILE_1 = "unittest.file1.json"
EXPECTED_FILE_1 = "expected.file1.json"
FILE_2 = "unittest.file2.json"
EXPECTED_FILE_2 = "expected.file2.json"
keep_files = False
class TestRhaPostPayload(unittest.TestCase):
#classmethod
def setUpClass(cls):
cls.get_file1()
cls.get_file2()
#classmethod
def get_file1(cls):
output1 = foo1()
with open(FILE_1, "w") as f:
f.write(output1)
#classmethod
def get_file2(cls):
output2 = foo1()
with open(FILE_2, "w") as f:
f.write(output2)
#classmethod
def tearDownClass(cls):
if not keep_files:
os.remove(FILE_1)
os.remove(FILE_2)
def test_foo1(self):
# code that reads in file1 and expected_file_1
try:
self.assert(expected_output1, output1)
except AssertionError:
global keep_files
keep_files = True
raise
def test_foo2(self):
# code that reads in file2 and expected_file_2
try:
self.assert(expected_output2, output2)
except AssertionError:
global keep_files
keep_files = True
raise
You could simply check, if there were any errors/failures in your test case during tear-down and only delete the files, if there were none.
How to perform this check was explained in this post.
This check is done on a TestCase instance so tearDownClass won't work. But you are using different files in different tests anyway, so you might as well use normal setUp/tearDown to remove the current file.
Here is a working example:
from pathlib import Path
from typing import Optional
from unittest import TestCase
class Test(TestCase):
def all_tests_passed(self) -> bool:
"""Returns `True` if no errors/failures occurred at the time of calling."""
outcome = getattr(self, "_outcome")
if hasattr(outcome, "errors"): # Python <=3.10
result = self.defaultTestResult()
getattr(self, "_feedErrorsToResult")(result, outcome.errors)
else: # Python >=3.11
result = outcome.result
return all(test != self for test, _ in result.errors + result.failures)
def setUp(self) -> None:
super().setUp()
self.test_file: Optional[Path] = None
def tearDown(self) -> None:
super().tearDown()
if self.test_file and self.all_tests_passed():
self.test_file.unlink()
def test_foo(self) -> None:
self.test_file = Path("foo.txt")
self.test_file.touch()
self.assertTrue(True)
def test_bar(self) -> None:
self.test_file = Path("bar.txt")
self.test_file.touch()
self.assertTrue(False)
Running this test case leaves bar.txt in the current working directory, whereas foo.txt is gone.

pytest fixture to know if test skip

I define the teardown fixture as below
#pytest.fixture
def create_docker_image():
image_name = get_image_name_for_local_test()
yield image_name
delete_result = delete_docker(image_name)
I have a test that use this fixture
def test_upload_artifact_to_docker_repo(repo_name, create_docker_image):
http_port, _ = get_docker_http_port(repo_name, "hosted")
if http_port == None:
pytest.skip("the repository defined without http port")
upload_result = upload_image_to_remote_repo(repo_name)
assert upload_result == 0
For some use case the test can be skipped.
If test was skipped I need ability to skip also the call of delete_docker method in the fixture.
How fixture can to know if the test was skipped?
Have a look at Making test result information available in fixtures in the docs
Implement pytest_runtest_makereport in conftest.py
#pytest.hookimpl(tryfirst=True, hookwrapper=True)
def pytest_runtest_makereport(item, call):
outcome = yield
rep = outcome.get_result()
setattr(item, "rep_" + rep.when, rep)
And check the results in the fixture using request
#pytest.fixture
def create_docker_image(request):
image_name = get_image_name_for_local_test()
yield image_name
if not request.node.rep_call.skipped:
delete_result = delete_docker(image_name)

How to access test suite properties when using record_testsuite_property fixture in pytest?

According to the Pytest documentation, we can use the record_testsuite_property fixture to record properties specific to the test suite.
So I'm using that fixture like this:
import pytest
class TestSuite:
#pytest.fixture(scope="class")
def init(self, record_testsuite_property):
record_testsuite_property("suite_name", "Test Suite #1")
def test_example(self, record_property):
record_property('test_id', 'ABC-123')
record_property('test_name', 'Example Test #1')
assert True
I would like to access the value of suite_name when I am generating the report like so:
#pytest.mark.hookwrapper
def pytest_runtest_makereport(item, call):
outcome = yield
report = outcome.get_result()
if item.user_properties:
test_properties = { prop[0]: prop[1] for prop in item.user_properties }
# These are added via `record_property` fixture and I am able to access them with no issue.
report.test_id = test_properties["test_id"]
report.test_name = test_properties["test_name"]
# Not able to get the suite_name from here.
# report.suite_name = test_properties["suite_name"]
setattr(report, "duration_formatter", "%M:%S")
I was able to figure it out.
The whole idea is that I want to have suite_name be a property that is attached to each item so that I can include it in the report.
So I realized that I would still use the record_property fixture here and have it automatically requested (using autouse=True) at the function scope.
import pytest
class TestSuite:
#pytest.fixture(scope="function", autouse=True)
def init(self, record_property):
record_property("suite_name", "Test Suite #1")
def test_example(self, record_property):
record_property('test_id', 'ABC-123')
record_property('test_name', 'Example Test #1')
assert True
And now I can access suite_name here:
#pytest.mark.hookwrapper
def pytest_runtest_makereport(item, call):
outcome = yield
report = outcome.get_result()
if item.user_properties:
test_properties = { prop[0]: prop[1] for prop in item.user_properties }
report.test_id = test_properties["test_id"]
report.test_name = test_properties["test_name"]
report.suite_name = test_properties["suite_name"]
setattr(report, "duration_formatter", "%M:%S")

Reset class and class variables for each test in Python via pytest

I created a class to make my life easier while doing some integration tests involving workers and their contracts. The code looks like this:
class ContractID(str):
contract_counter = 0
contract_list = list()
def __new__(cls):
cls.contract_counter += 1
new_entry = super().__new__(cls, f'Some_internal_name-{cls.contract_counter:10d}')
cls.contract_list.append(new_entry)
return new_entry
#classmethod
def get_contract_no(cls, worker_number):
return cls.contract_list[worker_number-1] # -1 so WORKER1 has contract #1 and not #0 etc.
When I'm unit-testing the class, I'm using the following code:
from test_helpers import ContractID
#pytest.fixture
def get_contract_numbers():
test_string_1 = ContractID()
test_string_2 = ContractID()
test_string_3 = ContractID()
return test_string_1, test_string_2, test_string_3
def test_contract_id(get_contract_numbers):
assert get_contract_ids[0] == 'Some_internal_name-0000000001'
assert get_contract_ids[1] == 'Some_internal_name-0000000002'
assert get_contract_ids[2] == 'Some_internal_name-0000000003'
def test_contract_id_get_contract_no(get_contract_numbers):
assert ContractID.get_contract_no(1) == 'Some_internal_name-0000000001'
assert ContractID.get_contract_no(2) == 'Some_internal_name-0000000002'
assert ContractID.get_contract_no(3) == 'Some_internal_name-0000000003'
with pytest.raises(IndexError) as py_e:
ContractID.get_contract_no(4)
assert py_e.type == IndexError
However, when I try to run these tests, the second one (test_contract_id_get_contract_no) fails, because it does not raise the error as there are more than three values. Furthermore, when I try to run all my tests in my folder test/, it fails even the first test (test_contract_id), which is probably because I'm trying to use this function in other tests that run before this test.
After reading this book, my understanding of fixtures was that it provides objects as if they were never called before, which is obviously not the case here. Is there a way how to tell the tests to use the class as if it hasn't been used before anywhere else?
If I understand that correctly, you want to run the fixture as setup code, so that your class has exactly 3 instances. If the fixture is function-scoped (the default) it is indeed run before each test, which will each time create 3 new instances for your class. If you want to reset your class after the test, you have to do this yourself - there is no way pytest can guess what you want to do here.
So, a working solution would be something like this:
#pytest.fixture(autouse=True)
def get_contract_numbers():
test_string_1 = ContractID()
test_string_2 = ContractID()
test_string_3 = ContractID()
yield
ContractID.contract_counter = 0
ContractID.contract_list.clear()
def test_contract_id():
...
Note that I did not yield the test strings, as you don't need them in the shown tests - if you need them, you can yield them, of course. I also added autouse=True, which makes sense if you need this for all tests, so you don't have to reference the fixture in each test.
Another possibility would be to use a session-scoped fixture. In this case the setup would be done only once. If that is what you need, you can use this instead:
#pytest.fixture(autouse=True, scope="session")
def get_contract_numbers():
test_string_1 = ContractID()
test_string_2 = ContractID()
test_string_3 = ContractID()
yield

Overriding Python Unit Test module for custom output?

Aim:
I want to rewrite Python's UnitTest module so when I call it I get the following JSON output within the stdout stream. For example:
{
"errors":0,
"failures":1,
"ran":3,
"skipped":0,
"successful":2,
"test_data":[
{
"index":0,
"result":1
},
{
"index":1,
"result":1
},
{
"index":2,
"result":-1
}
]
}
Problem:
I've written some code to generate these test results, but I'm facing problems with writing code for the test_data attribute of the JSON array. I've overwritten the TestCase, TextTestResult and TextTestRunner classes but I can't seem to figure out how to get the result from getTestsReport():
#!/usr/bin/python
import unittest
import sys, os
import json
class dataHolder(object):
x = None
class MyTestRunner(unittest.TextTestRunner):
def _makeResult(self):
obj = MyTestResult(self.stream, self.descriptions, self.verbosity)
dataHolder.x = obj.getTestsReport()
return obj
class MyTestResult(unittest._TextTestResult):
"""
Holder for test result information.
Test results are automatically managed by the TestCase and TestSuite
classes, and do not need to be explicitly manipulated by writers of tests.
Each instance holds the total number of tests run, and collections of
failures and errors that occurred among those test runs. The collections
contain tuples of (testcase, exceptioninfo), where exceptioninfo is the
formatted traceback of the error that occurred.
"""
_previousTestClass = None
_testRunEntered = False
_moduleSetUpFailed = False
def __init__(self, stream=None, descriptions=None, verbosity=None):
self.failfast = False
self.failures = []
self.errors = []
self.testsRun = 0
self.skipped = []
self.expectedFailures = []
self.unexpectedSuccesses = []
self.shouldStop = False
self.buffer = False
self._stdout_buffer = None
self._stderr_buffer = None
self._original_stdout = sys.stdout
self._original_stderr = sys.stderr
self._mirrorOutput = False
# List containing all the run tests, their index and their result. This is the new line of code.
self.tests_run = []
def getTestsReport(self):
"""Returns the run tests as a list of the form [test_description, test_index, result]"""
return self.tests_run
###
### Modified the functions so that we add the test case to the tests run list.
### -1 means Failure. 0 means error. 1 means success.
###
def addError(self, test, err):
"""
Called when an error has occurred. 'err' is a tuple of values as
returned by sys.exc_info().
"""
self.errors.append((test, self._exc_info_to_string(err, test)))
self._mirrorOutput = True
self.tests_run.append([test.shortDescription(), self.testsRun, 0])
TestResult.addError(self, test, err)
def addFailure(self, test, err):
"""
Called when an error has occurred. 'err' is a tuple of values as
returned by sys.exc_info().
"""
self.failures.append((test, self._exc_info_to_string(err, test)))
self._mirrorOutput = True
self.tests_run.append([test.shortDescription(), self.testsRun, -1])
TestResult.addFailure(self, test, err)
def addSuccess(self, test):
"Called when a test has completed successfully"
self.tests_run.append([test.shortDescription(), self.testsRun, 1])
TestResult.addSuccess(self, test)
class MyTest(unittest.TestCase):
currentResult = None # holds last result object passed to run method
results = [] # Holds all results so we can report back to the CCC backend
#classmethod
def setResult(cls, amount, errors, failures, skipped):
cls.amount, cls.errors, cls.failures, cls.skipped = \
amount, errors, failures, skipped
def tearDown(self):
amount = self.currentResult.testsRun
errors = self.currentResult.errors
failures = self.currentResult.failures
skipped = self.currentResult.skipped
self.setResult(amount, errors, failures, skipped)
#classmethod
def tearDownClass(cls):
print json.dumps(
{
'ran': cls.amount,
'errors': len(cls.errors),
'failures': len(cls.failures),
'succeeded': cls.amount - len(cls.errors) - len(cls.failures),
'skipped': len(cls.skipped),
'test_data' : dataHolder.x
},
sort_keys=True, indent=4, separators=(',', ': ')
)
return
def run(self, result=None):
self.currentResult = result # remember result for use in tearDown
unittest.TestCase.run(self, result) # call superclass's run() method
# Tests are defined below.
def test_something(self):
self.assertEqual(1, 2)
if __name__ == '__main__':
MyTestRunner( stream=None, descriptions=0, verbosity=0 )
unittest.main(exit=False)
Why isn't the value of x updating after calling getTestsReport()?
Edit:
Okay, I think unittest.main(exit=False) was calling the wrong function! I'm now trying to rewrite the code in my main, but still facing a bunch of issues:
#!/usr/bin/python
import unittest
import sys, os
import json
import string
class MyTestRunner(unittest.TextTestRunner):
def _makeResult(self):
return MyTestResult(self.stream, self.descriptions, self.verbosity)
class MyTestResult(unittest._TextTestResult):
"""
Holder for test result information.
Test results are automatically managed by the TestCase and TestSuite
classes, and do not need to be explicitly manipulated by writers of tests.
Each instance holds the total number of tests run, and collections of
failures and errors that occurred among those test runs. The collections
contain tuples of (testcase, exceptioninfo), where exceptioninfo is the
formatted traceback of the error that occurred.
"""
_previousTestClass = None
_testRunEntered = False
_moduleSetUpFailed = False
def __init__(self, stream=None, descriptions=None, verbosity=None):
self.failfast = False
self.failures = []
self.errors = []
self.testsRun = 0
self.skipped = []
self.expectedFailures = []
self.unexpectedSuccesses = []
self.shouldStop = False
self.buffer = False
self._stdout_buffer = None
self._stderr_buffer = None
self._original_stdout = sys.stdout
self._original_stderr = sys.stderr
self._mirrorOutput = False
# List containing all the run tests, their index and their result. This is the new line of code.
self.tests_run = []
def getTestsReport(self):
"""Returns the run tests as a list of the form [test_description, test_index, result]"""
return self.tests_run
###
### Modified the functions so that we add the test case to the tests run list.
### -1 means Failure. 0 means error. 1 means success.
###
def addError(self, test, err):
"""
Called when an error has occurred. 'err' is a tuple of values as
returned by sys.exc_info().
"""
self.errors.append((test, self._exc_info_to_string(err, test)))
self._mirrorOutput = True
self.tests_run.append([test.shortDescription(), self.testsRun, 0])
TestResult.addError(self, test, err)
def addFailure(self, test, err):
"""
Called when an error has occurred. 'err' is a tuple of values as
returned by sys.exc_info().
"""
self.failures.append((test, self._exc_info_to_string(err, test)))
self._mirrorOutput = True
self.tests_run.append([test.shortDescription(), self.testsRun, -1])
TestResult.addFailure(self, test, err)
def addSuccess(self, test):
"Called when a test has completed successfully"
self.tests_run.append([test.shortDescription(), self.testsRun, 1])
TestResult.addSuccess(self, test)
class MyTest(unittest.TestCase):
currentResult = None # holds last result object passed to run method
results = [] # Holds all results so we can report back to the CCC backend
#classmethod
def setResult(cls, amount, errors, failures, skipped):
cls.amount, cls.errors, cls.failures, cls.skipped = \
amount, errors, failures, skipped
def tearDown(self):
amount = self.currentResult.testsRun
errors = self.currentResult.errors
failures = self.currentResult.failures
skipped = self.currentResult.skipped
self.setResult(amount, errors, failures, skipped)
#classmethod
def tearDownClass(cls):
print json.dumps(
{
'ran': cls.amount,
'errors': len(cls.errors),
'failures': len(cls.failures),
'succeeded': cls.amount - len(cls.errors) - len(cls.failures),
'skipped': len(cls.skipped),
'test_data' : dataHolder.x
},
sort_keys=True, indent=4, separators=(',', ': ')
)
return
def run(self, result=None):
self.currentResult = result # remember result for use in tearDown
unittest.TestCase.run(self, result) # call superclass's run() method
# Tests are defined below.
def test_something(self):
self.assertEqual(1, 2)
if __name__ == '__main__':
module = __import__('__main__')
testRunner = MyTestRunner(verbosity=0)
test = unittest.defaultTestLoader.loadTestsFromModule(module)
print test
test2 = unittest.defaultTestLoader.loadTestsFromNames(test, module)
result = unittest.testRunner.run(test2)
Can anybody help me out here?

Categories

Resources