pytest fixture to know if test skip - python

I define the teardown fixture as below
#pytest.fixture
def create_docker_image():
image_name = get_image_name_for_local_test()
yield image_name
delete_result = delete_docker(image_name)
I have a test that use this fixture
def test_upload_artifact_to_docker_repo(repo_name, create_docker_image):
http_port, _ = get_docker_http_port(repo_name, "hosted")
if http_port == None:
pytest.skip("the repository defined without http port")
upload_result = upload_image_to_remote_repo(repo_name)
assert upload_result == 0
For some use case the test can be skipped.
If test was skipped I need ability to skip also the call of delete_docker method in the fixture.
How fixture can to know if the test was skipped?

Have a look at Making test result information available in fixtures in the docs
Implement pytest_runtest_makereport in conftest.py
#pytest.hookimpl(tryfirst=True, hookwrapper=True)
def pytest_runtest_makereport(item, call):
outcome = yield
rep = outcome.get_result()
setattr(item, "rep_" + rep.when, rep)
And check the results in the fixture using request
#pytest.fixture
def create_docker_image(request):
image_name = get_image_name_for_local_test()
yield image_name
if not request.node.rep_call.skipped:
delete_result = delete_docker(image_name)

Related

Python unable to mock function call from test class

I am trying to mock a bigtable call in my unit test by declaring fixtures like so:
#pytest.fixture()
def bigtableMock():
bigtableMock = Mock(spec=google.cloud.bigtable.table.Table)
yield bigtableMock
#pytest.fixture()
def bigtableInstanceMock(bigtableMock):
bigtableInstanceMock = Mock(spec=google.cloud.bigtable.instance.Instance)
bigtableInstanceMockAttrs = {'table': bigtableMock}
bigtableInstanceMock.configure_mock(**bigtableInstanceMockAttrs)
yield bigtableInstanceMock
#pytest.fixture()
def myDao(bigtableInstanceMock):
yield MyDao(bigtableInstanceMock)
I mock the read_rows function like so:
def mockReadRowsFuncWith1Dto(testDto):
mockTableRowData = {}
mockTableRowData['columnFamily'] = asDict(testDto)
rowDataMock = MagicMock()
rowDataMock.__iter__.return_value = [mockTableRowData]
rowDataMock.__len__ = 1
def mockReadRowsFunc(startKey, endKey, limit, end_inclusive):
return rowDataMock
return mockReadRowsFunc
When I call my test function:
def test_read_table(
myDao,
testDto,
bigtableMock
):
bigtableMock.read_rows = mockReadRowsFuncWith1Dto(testDto)
samp = bigtableMock.read_rows(
startKey="asdf",
endKey="sadf",
limit=1,
end_inclusive=True
)
print(f"\test data {samp}")
myDao.readTable(...)
Inside myDao.readTable I call read_rows like so:
tableRows: PartialRowData = self.table.read_rows(
start_key=startKey,
end_key=endKey,
limit=10,
end_inclusive=True
)
However, I do not get the magicMock return that I expect inside readTable, tableRows:<Mock name='mock.table().read_rows()' id='4378752480'>, whereas in the test function I can print out the Magic mock: test data <MagicMock id='4413191168'>. Regardless of the print statement or not, I can never invoke the correct mocked read_rows function. What am I doing wrong?
The problem in my case was that the fixture for bigtableMock was different between test_read_table and the fixture for myDao. I modified my test cases to include the table mocking inside the bigtableMock like so:
#pytest.fixture(read_row_data, mock_append_row)
def bigtableMock():
bigtableMock = Mock(spec=google.cloud.bigtable.table.Table)
bigtableMock.read_rows.return_value = [read_row_data]
bigtableMock.append_row.return_value = mock_append_row
yield bigtableMock

pytest - how to store that certain test failed with test name

I would like to store that specific tests failed, then pass that info via API when test class is finished.
I tried sth like that:
fails = []
#pytest.fixture(scope='function')
def something(request):
yield
if request.session.testsfailed:
print("I failed")
fails.append(request.node.name)
print('FAILED', fails)
class TestLala:
#pytest.mark.order(1)
def test_test1(self, something):
assert False
#pytest.mark.order(3)
def test_test1(self, something):
assert True
#pytest.mark.order(3)
def test_test3(self, something):
assert 4 == 4
but instead of failed tests I am still getting each test name added to the list.
The pytest_runtest_makereport hook should solve it for you. Add this to your conftest.py
import pytest
pytest.failed_nodes = []
#pytest.hookimpl(tryfirst=True, hookwrapper=True)
def pytest_runtest_makereport(item):
output = yield
report = output.get_result()
if report.failed:
node_id = report.nodeid
pytest.failed_nodes.append(node_id)
##EDIT##
The answer #Teejay Bruno gave shows how you can avoid having the push to your API run after every function. Just send the data to a list of dict after every test. Then call the send_data when the test is over to send to your API
I have done something similar in my work. By using conftest with make_report you are able to capture the test results and other meta data, then do as you please with that data (like send to a database or API).
test.py
import pytest
class TestLala:
#pytest.mark.order(1)
def test_test1(self):
assert False
#pytest.mark.order(3)
def test_test1(self):
assert True
#pytest.mark.order(3)
def test_test3(self):
assert 4 == 4
conftest.py
import pytest
test = None
status_tag = None
line = None
duration = None
exception = None
#pytest.hookimpl(tryfirst=True, hookwrapper=True)
def pytest_runtest_makereport(item, call):
global test, status_tag, line, duration, exception
report = yield
result = report.get_result()
if result.when == 'call':
(filename, line, name) = item.location
test = item.nodeid
status_tag = result.outcome
line = line
duration = call.duration
exception = call.excinfo
#pytest.fixture(scope='function', autouse=True)
def send_data(pytestconfig):
yield
global test, status_tag, line, duration, exception
# This is where you can send the data to your API
# This will run after every test so if you dont want to send the data as it comes in, you will need to change
# how this function and the one above work a little
print(f"TEST: {test}")
print(f"STATUS_TAG: {status_tag}")
print(f"LINE: {line}")
print(f"DURATION: {duration}")
print(f"EXCEPTION: {exception}")
test = None
status_tag = None
line = None
duration = None
exception = None
If you have not worked with conftest before see the below link:
https://docs.pytest.org/en/6.2.x/fixture.html
Search for the section titled -> "conftest.py: sharing fixtures across multiple files"

How to access test suite properties when using record_testsuite_property fixture in pytest?

According to the Pytest documentation, we can use the record_testsuite_property fixture to record properties specific to the test suite.
So I'm using that fixture like this:
import pytest
class TestSuite:
#pytest.fixture(scope="class")
def init(self, record_testsuite_property):
record_testsuite_property("suite_name", "Test Suite #1")
def test_example(self, record_property):
record_property('test_id', 'ABC-123')
record_property('test_name', 'Example Test #1')
assert True
I would like to access the value of suite_name when I am generating the report like so:
#pytest.mark.hookwrapper
def pytest_runtest_makereport(item, call):
outcome = yield
report = outcome.get_result()
if item.user_properties:
test_properties = { prop[0]: prop[1] for prop in item.user_properties }
# These are added via `record_property` fixture and I am able to access them with no issue.
report.test_id = test_properties["test_id"]
report.test_name = test_properties["test_name"]
# Not able to get the suite_name from here.
# report.suite_name = test_properties["suite_name"]
setattr(report, "duration_formatter", "%M:%S")
I was able to figure it out.
The whole idea is that I want to have suite_name be a property that is attached to each item so that I can include it in the report.
So I realized that I would still use the record_property fixture here and have it automatically requested (using autouse=True) at the function scope.
import pytest
class TestSuite:
#pytest.fixture(scope="function", autouse=True)
def init(self, record_property):
record_property("suite_name", "Test Suite #1")
def test_example(self, record_property):
record_property('test_id', 'ABC-123')
record_property('test_name', 'Example Test #1')
assert True
And now I can access suite_name here:
#pytest.mark.hookwrapper
def pytest_runtest_makereport(item, call):
outcome = yield
report = outcome.get_result()
if item.user_properties:
test_properties = { prop[0]: prop[1] for prop in item.user_properties }
report.test_id = test_properties["test_id"]
report.test_name = test_properties["test_name"]
report.suite_name = test_properties["suite_name"]
setattr(report, "duration_formatter", "%M:%S")

Reset class and class variables for each test in Python via pytest

I created a class to make my life easier while doing some integration tests involving workers and their contracts. The code looks like this:
class ContractID(str):
contract_counter = 0
contract_list = list()
def __new__(cls):
cls.contract_counter += 1
new_entry = super().__new__(cls, f'Some_internal_name-{cls.contract_counter:10d}')
cls.contract_list.append(new_entry)
return new_entry
#classmethod
def get_contract_no(cls, worker_number):
return cls.contract_list[worker_number-1] # -1 so WORKER1 has contract #1 and not #0 etc.
When I'm unit-testing the class, I'm using the following code:
from test_helpers import ContractID
#pytest.fixture
def get_contract_numbers():
test_string_1 = ContractID()
test_string_2 = ContractID()
test_string_3 = ContractID()
return test_string_1, test_string_2, test_string_3
def test_contract_id(get_contract_numbers):
assert get_contract_ids[0] == 'Some_internal_name-0000000001'
assert get_contract_ids[1] == 'Some_internal_name-0000000002'
assert get_contract_ids[2] == 'Some_internal_name-0000000003'
def test_contract_id_get_contract_no(get_contract_numbers):
assert ContractID.get_contract_no(1) == 'Some_internal_name-0000000001'
assert ContractID.get_contract_no(2) == 'Some_internal_name-0000000002'
assert ContractID.get_contract_no(3) == 'Some_internal_name-0000000003'
with pytest.raises(IndexError) as py_e:
ContractID.get_contract_no(4)
assert py_e.type == IndexError
However, when I try to run these tests, the second one (test_contract_id_get_contract_no) fails, because it does not raise the error as there are more than three values. Furthermore, when I try to run all my tests in my folder test/, it fails even the first test (test_contract_id), which is probably because I'm trying to use this function in other tests that run before this test.
After reading this book, my understanding of fixtures was that it provides objects as if they were never called before, which is obviously not the case here. Is there a way how to tell the tests to use the class as if it hasn't been used before anywhere else?
If I understand that correctly, you want to run the fixture as setup code, so that your class has exactly 3 instances. If the fixture is function-scoped (the default) it is indeed run before each test, which will each time create 3 new instances for your class. If you want to reset your class after the test, you have to do this yourself - there is no way pytest can guess what you want to do here.
So, a working solution would be something like this:
#pytest.fixture(autouse=True)
def get_contract_numbers():
test_string_1 = ContractID()
test_string_2 = ContractID()
test_string_3 = ContractID()
yield
ContractID.contract_counter = 0
ContractID.contract_list.clear()
def test_contract_id():
...
Note that I did not yield the test strings, as you don't need them in the shown tests - if you need them, you can yield them, of course. I also added autouse=True, which makes sense if you need this for all tests, so you don't have to reference the fixture in each test.
Another possibility would be to use a session-scoped fixture. In this case the setup would be done only once. If that is what you need, you can use this instead:
#pytest.fixture(autouse=True, scope="session")
def get_contract_numbers():
test_string_1 = ContractID()
test_string_2 = ContractID()
test_string_3 = ContractID()
yield

Pytest fixture: setup, teardown and code running between each test

I am trying to use pytest to test a module I am writing. The module is a wrapper of a process with a LONG startup time. I, therefore, want to make sure I have a proper setup/teardown logic to make sure the initialization does not happen more than once.
My current code looks like this:
import pytest
import leelaZeroWrapper
#pytest.fixture(scope='module')
def leela(request):
leela = leelaZeroWrapper.LeelaWrapper()
def quit():
leela.quit()
request.addfinalizer(quit)
return leela
def test_single_play(leela):
leela.reset()
result = leela.play('b', 'a11')
assert result == [{'color': 'black', 'loc': 'a11'}]
def test_single_play_uppercase(leela):
leela.reset()
result = leela.play('WHITE', 'A11')
assert result == [{'color': 'white', 'loc': 'a11'}]
def test_reset(leela):
leela.reset()
leela.play('b', 'a11')
leela.play('w', 'a13')
leela.reset()
assert leela.current_board == []
assert leela.current_move == 0
I notice ALL my test will start with the call to reset my module! Is there any way to add this to the fixture, or how would you solve it?
Pseudo code for what I want:
#pytest.fixture(scope='module')
def leela(request):
leela = leelaZeroWrapper.LeelaWrapper()
def quit():
leela.quit()
def reset():
leela.reset()
request.addfinalizer(quit)
request.add_between_test_resetter(reset)
return leela
If the initial setup was not so heavy I would just skip this and let the class initialize every test. I have tried to look through the documentation for the request object but cannot find a suitable method. I assume this must be solved some other way. Any idea?
Just introduce another fixture that performs the reset. Make it an autouse fixture so it perfoms automatically before each test:
#pytest.fixture(scope='module')
def leela():
leela = leelaZeroWrapper.LeelaWrapper()
yield leela
leela.quit()
#pytest.fixture(autouse=True)
def reset(leela):
leela.reset()
def test_single_play(leela):
result = leela.play('b', 'a11')
assert result == [{'color': 'black', 'loc': 'a11'}]
The default fixture scope is function, so the reset fixture will rerun before each test in module.

Categories

Resources