py.test multiple tests per fixture - python

I have the following.
#pytest.fixture
def patch_socket(monkeypatch):
def gethostname():
return 'web01-east.domain.com'
monkeypatch.setattr(socket, 'gethostname', gethostname)
def test__get_pod(patch_socket):
assert __get_pod() == 'east'
What is the correct way if I want to test for the following hostnames
web01-east.domain.com
redis01-master-east.domain.com
web01.domain.com
Should I have a new fixture for each or is there a way to pass in a hostname in the test itself?

use this code
#pytest.fixture(params=['web01-east.domain.com', 'redis01-master-east.domain.com', 'web01.domain.com'])
def patch_socket(request, monkeypatch):
def gethostname():
return request.param
monkeypatch.setattr(socket, 'gethostname', gethostname)
def test__get_pod(patch_socket):
assert __get_pod() == 'east'
This will create on the fly 3 tests. If you run with -vv you will see something like:
<FILE>::test__get_pod[web01-east.domain.comm PASSED
<FILE>::test__get_pod[redis01-master-east.domain.com] PASSED
<FILE>::test__get_pod[web01.domain.com PASSED

Related

How to access marks list outside test script

I have a mark, let say, specific_case = pytest.mark.skipif(<CONDITION>) which I need to apply to some test-cases. I want property value to return different value in case mark applied. This is my simplified code:
module.py:
import pytest
class A():
#property
def value(self):
_marks = pytest.mark._markers # current code to get applied marks list
if 'specific_case' in _marks:
return 1
else:
return 2
test_1.py:
import pytest
from module import A
pytestmark = [pytest.mark.test_id.TC_1, pytest.mark.specific_case]
def test_1():
a = A()
assert a.value == 1
But that doesn't work as pytest.mark._markers returns set(['TC_1', 'skipif']) but not exact pytestmark list (I expect set(['TC_1', 'specific_case']) or at least pytestmark as it is - [pytest.mark.test_id.TC_1, pytest.mark.specific_case]).
So is there any way I can access exact pytestmark list outside test function?
P.S. I also found some tips of how to get mark list using fixtures, but I should stick to current implementation of module.py and test_1.py, so cannot use fixture.
Also there are many other marks with skip conditions (specific_case_2 = pytest.mark.skipif(<CONDITION_2>), specific_case_3 = pytest.mark.skipif(<CONDITION_3>),...), so I cannot use just if 'skipif' in _marks solution
Since your module.py accesses pytest marks, then it is safe to assume that it is part of the test code.
With that said, in case you are you open to changing the class property A.value into a pytest fixture, then this alternative solution might work fine for you. Otherwise, this wouldn't suffice.
Alternative Solution
Instead of using pytest.mark._markers to retrieve the marks list, use request.keywords.
class FixtureRequest
keywords
Keywords/markers dictionary for the underlying node.
import pytest
# Data
class A():
#property
def value(self):
_marks = pytest.mark._markers # Current code to get applied marks list
print("Using class property A.value:", list(_marks))
if 'specific_case' in _marks:
return 1
else:
return 2
#pytest.fixture
def a_value(request): # This fixture can be in conftest.py so all test files can see it. Or use pytest_plugins to include the file containing this.
_marks = request.keywords # Alternative style of getting applied marks list
print("Using pytest fixture a_value:", list(_marks))
if 'specific_case' in _marks:
return 1
else:
return 2
# Tests
pytestmark = [pytest.mark.test_id, pytest.mark.specific_case]
def test_first():
a = A()
assert a.value != 1 # 'specific_case' was not recognized as a marker
def test_second(a_value):
assert a_value == 1 # 'specific_case' was recognized as a marker
Output:
pytest -q -rP --disable-pytest-warnings
.. [100%]
================================================================================================= PASSES ==================================================================================================
_______________________________________________________________________________________________ test_first ________________________________________________________________________________________________
------------------------------------------------------------------------------------------ Captured stdout call -------------------------------------------------------------------------------------------
Using class property A.value: ['parametrize', 'skipif', 'skip', 'trylast', 'filterwarnings', 'tryfirst', 'usefixtures', 'xfail']
_______________________________________________________________________________________________ test_second _______________________________________________________________________________________________
------------------------------------------------------------------------------------------ Captured stdout setup ------------------------------------------------------------------------------------------
Using pytest fixture a_value: ['specific_case', '2', 'test_1.py', 'test_second', 'test_id']
2 passed, 2 warnings in 0.01s

Reset class and class variables for each test in Python via pytest

I created a class to make my life easier while doing some integration tests involving workers and their contracts. The code looks like this:
class ContractID(str):
contract_counter = 0
contract_list = list()
def __new__(cls):
cls.contract_counter += 1
new_entry = super().__new__(cls, f'Some_internal_name-{cls.contract_counter:10d}')
cls.contract_list.append(new_entry)
return new_entry
#classmethod
def get_contract_no(cls, worker_number):
return cls.contract_list[worker_number-1] # -1 so WORKER1 has contract #1 and not #0 etc.
When I'm unit-testing the class, I'm using the following code:
from test_helpers import ContractID
#pytest.fixture
def get_contract_numbers():
test_string_1 = ContractID()
test_string_2 = ContractID()
test_string_3 = ContractID()
return test_string_1, test_string_2, test_string_3
def test_contract_id(get_contract_numbers):
assert get_contract_ids[0] == 'Some_internal_name-0000000001'
assert get_contract_ids[1] == 'Some_internal_name-0000000002'
assert get_contract_ids[2] == 'Some_internal_name-0000000003'
def test_contract_id_get_contract_no(get_contract_numbers):
assert ContractID.get_contract_no(1) == 'Some_internal_name-0000000001'
assert ContractID.get_contract_no(2) == 'Some_internal_name-0000000002'
assert ContractID.get_contract_no(3) == 'Some_internal_name-0000000003'
with pytest.raises(IndexError) as py_e:
ContractID.get_contract_no(4)
assert py_e.type == IndexError
However, when I try to run these tests, the second one (test_contract_id_get_contract_no) fails, because it does not raise the error as there are more than three values. Furthermore, when I try to run all my tests in my folder test/, it fails even the first test (test_contract_id), which is probably because I'm trying to use this function in other tests that run before this test.
After reading this book, my understanding of fixtures was that it provides objects as if they were never called before, which is obviously not the case here. Is there a way how to tell the tests to use the class as if it hasn't been used before anywhere else?
If I understand that correctly, you want to run the fixture as setup code, so that your class has exactly 3 instances. If the fixture is function-scoped (the default) it is indeed run before each test, which will each time create 3 new instances for your class. If you want to reset your class after the test, you have to do this yourself - there is no way pytest can guess what you want to do here.
So, a working solution would be something like this:
#pytest.fixture(autouse=True)
def get_contract_numbers():
test_string_1 = ContractID()
test_string_2 = ContractID()
test_string_3 = ContractID()
yield
ContractID.contract_counter = 0
ContractID.contract_list.clear()
def test_contract_id():
...
Note that I did not yield the test strings, as you don't need them in the shown tests - if you need them, you can yield them, of course. I also added autouse=True, which makes sense if you need this for all tests, so you don't have to reference the fixture in each test.
Another possibility would be to use a session-scoped fixture. In this case the setup would be done only once. If that is what you need, you can use this instead:
#pytest.fixture(autouse=True, scope="session")
def get_contract_numbers():
test_string_1 = ContractID()
test_string_2 = ContractID()
test_string_3 = ContractID()
yield

Is it possible to globally set a default `ids` function for pytest's parametrize?

pytest.mark.parametrize accepts an ids argument which can be a callable, like this:
def test_id_builder(arg):
if isinstance(arg, int):
return str(arg)
... # more logic
#pytest.mark.parametrize('value', [1, 2], ids=test_id_builder)
def test_whatever(value):
assert value > 0
This will generate two test cases, with the ids "1" and "2" respectively. The problem is that I have a lot of tests, organized in multiple classes and files. Because of that, I'd like to globally set test_id_builder as the ids function for all parametrized tests in my project. Is there a way to do this?
Simply implement a custom pytest_make_parametrize_id hook. In your conftest.py:
def pytest_make_parametrize_id(config, val, argname):
if isinstance(val, int):
return f'{argname}={val}'
if isinstance(val, str):
return f'text is {val}'
# return None to let pytest handle the formatting
return None
Example tests:
import pytest
#pytest.mark.parametrize('n', range(3))
def test_int(n):
assert True
#pytest.mark.parametrize('s', ('fizz', 'buzz'))
def test_str(s):
assert True
#pytest.mark.parametrize('c', (tuple(), list(), set()))
def test_unhandled(c):
assert True
Check the test parametrizing:
$ pytest -q --collect-only
test_spam.py::test_int[n=0]
test_spam.py::test_int[n=1]
test_spam.py::test_int[n=2]
test_spam.py::test_str[text is fizz]
test_spam.py::test_str[text is buzz]
test_spam.py::test_unhandled[c0]
test_spam.py::test_unhandled[c1]
test_spam.py::test_unhandled[c2]
no tests ran in 0.06 seconds
You can make your custom parametrize:
import pytest
def id_builder(arg):
if isinstance(arg, int):
return str(arg) * 2
def custom_parametrize(*args, **kwargs):
kwargs.setdefault('ids', id_builder)
return pytest.mark.parametrize(*args, **kwargs)
#custom_parametrize('value', [1, 2])
def test_whatever(value):
assert value > 0
And to avoid rewriting pytest.mark.parametrize to custom_parametrize everywhere use this well-known workaround:
old_parametrize = pytest.mark.parametrize
def custom_parametrize(*args, **kwargs):
kwargs.setdefault('ids', id_builder)
return old_parametrize(*args, **kwargs)
pytest.mark.parametrize = custom_parametrize
There is no way to globally set ids. but yo can use pytest-generate-tests to generate tests from some other fixture. that other fixture could be scoped to session which overall will mimic the intended behaviour.

How to run unittest test cases in the order they are declared

I fully realize that the order of unit tests should not matter. But these unit tests are as much for instructional use as for actual unit testing, so I would like the test output to match up with the test case source code.
I see that there is a way to set the sort order by setting the sortTestMethodsUsing attribute on the test loader. The default is a simple cmp() call to lexically compare names. So I tried writing a cmp-like function that would take two names, find their declaration line numbers and them return the cmp()-equivalent of them:
import unittest
class TestCaseB(unittest.TestCase):
def test(self):
print("running test case B")
class TestCaseA(unittest.TestCase):
def test(self):
print("running test case A")
import inspect
def get_decl_line_no(cls_name):
cls = globals()[cls_name]
return inspect.getsourcelines(cls)[1]
def sgn(x):
return -1 if x < 0 else 1 if x > 0 else 0
def cmp_class_names_by_decl_order(cls_a, cls_b):
a = get_decl_line_no(cls_a)
b = get_decl_line_no(cls_b)
return sgn(a - b)
unittest.defaultTestLoader.sortTestMethodsUsing = cmp_class_names_by_decl_order
unittest.main()
When I run this, I get this output:
running test case A
.running test case B
.
----------------------------------------------------------------------
Ran 2 tests in 0.000s
OK
indicating that the test cases are not running in the declaration order.
My sort function is just not being called, so I suspect that main() is building a new test loader, which is wiping out my sort function.
The solution is to create a TestSuite explicitly, instead of letting unittest.main() follow all its default test discovery and ordering behavior. Here's how I got it to work:
import unittest
class TestCaseB(unittest.TestCase):
def runTest(self):
print("running test case B")
class TestCaseA(unittest.TestCase):
def runTest(self):
print("running test case A")
import inspect
def get_decl_line_no(cls):
return inspect.getsourcelines(cls)[1]
# get all test cases defined in this module
test_case_classes = list(filter(lambda c: c.__name__ in globals(),
unittest.TestCase.__subclasses__()))
# sort them by decl line no
test_case_classes.sort(key=get_decl_line_no)
# make into a suite and run it
suite = unittest.TestSuite(cls() for cls in test_case_classes)
unittest.TextTestRunner().run(suite)
This gives the desired output:
running test case B
.running test case A
.
----------------------------------------------------------------------
Ran 2 tests in 0.000s
OK
It is important to note that the test method in each class must be named runTest.
You can manually build a TestSuite where your TestCases and all tests inside them run by line number:
# Python 3.8.3
import unittest
import sys
import inspect
def isTestClass(x):
return inspect.isclass(x) and issubclass(x, unittest.TestCase)
def isTestFunction(x):
return inspect.isfunction(x) and x.__name__.startswith("test")
class TestB(unittest.TestCase):
def test_B(self):
print("Running test_B")
self.assertEqual((2+2), 4)
def test_A(self):
print("Running test_A")
self.assertEqual((2+2), 4)
def setUpClass():
print("TestB Class Setup")
class TestA(unittest.TestCase):
def test_A(self):
print("Running test_A")
self.assertEqual((2+2), 4)
def test_B(self):
print("Running test_B")
self.assertEqual((2+2), 4)
def setUpClass():
print("TestA Class Setup")
def suite():
# get current module object
module = sys.modules[__name__]
# get all test className,class tuples in current module
testClasses = [
tup for tup in
inspect.getmembers(module, isTestClass)
]
# sort classes by line number
testClasses.sort(key=lambda t: inspect.getsourcelines(t[1])[1])
testSuite = unittest.TestSuite()
for testClass in testClasses:
# get list of testFunctionName,testFunction tuples in current class
classTests = [
tup for tup in
inspect.getmembers(testClass[1], isTestFunction)
]
# sort TestFunctions by line number
classTests.sort(key=lambda t: inspect.getsourcelines(t[1])[1])
# create TestCase instances and add to testSuite;
for test in classTests:
testSuite.addTest(testClass[1](test[0]))
return testSuite
if __name__ == '__main__':
runner = unittest.TextTestRunner()
runner.run(suite())
Output:
TestB Class Setup
Running test_B
.Running test_A
.TestA Class Setup
Running test_A
.Running test_B
.
----------------------------------------------------------------------
Ran 4 tests in 0.000s
OK
As stated in the name, sortTestMethodsUsing is used to sort test methods. It is not used to sort classes. (It is not used to sort methods in different classes either; separate classes are handled separately.)
If you had two test methods in the same class, sortTestMethodsUsing would be used to determine their order. (At that point, you would get an exception because your function expects class names.)

Pytest fixture: setup, teardown and code running between each test

I am trying to use pytest to test a module I am writing. The module is a wrapper of a process with a LONG startup time. I, therefore, want to make sure I have a proper setup/teardown logic to make sure the initialization does not happen more than once.
My current code looks like this:
import pytest
import leelaZeroWrapper
#pytest.fixture(scope='module')
def leela(request):
leela = leelaZeroWrapper.LeelaWrapper()
def quit():
leela.quit()
request.addfinalizer(quit)
return leela
def test_single_play(leela):
leela.reset()
result = leela.play('b', 'a11')
assert result == [{'color': 'black', 'loc': 'a11'}]
def test_single_play_uppercase(leela):
leela.reset()
result = leela.play('WHITE', 'A11')
assert result == [{'color': 'white', 'loc': 'a11'}]
def test_reset(leela):
leela.reset()
leela.play('b', 'a11')
leela.play('w', 'a13')
leela.reset()
assert leela.current_board == []
assert leela.current_move == 0
I notice ALL my test will start with the call to reset my module! Is there any way to add this to the fixture, or how would you solve it?
Pseudo code for what I want:
#pytest.fixture(scope='module')
def leela(request):
leela = leelaZeroWrapper.LeelaWrapper()
def quit():
leela.quit()
def reset():
leela.reset()
request.addfinalizer(quit)
request.add_between_test_resetter(reset)
return leela
If the initial setup was not so heavy I would just skip this and let the class initialize every test. I have tried to look through the documentation for the request object but cannot find a suitable method. I assume this must be solved some other way. Any idea?
Just introduce another fixture that performs the reset. Make it an autouse fixture so it perfoms automatically before each test:
#pytest.fixture(scope='module')
def leela():
leela = leelaZeroWrapper.LeelaWrapper()
yield leela
leela.quit()
#pytest.fixture(autouse=True)
def reset(leela):
leela.reset()
def test_single_play(leela):
result = leela.play('b', 'a11')
assert result == [{'color': 'black', 'loc': 'a11'}]
The default fixture scope is function, so the reset fixture will rerun before each test in module.

Categories

Resources