Let's say I have an application which must work with two different backends: backend_1 and backend_2. I have two test modules.
Module test_backend_1.py:
#py.test.fixture(scope="session")
def backend_1():
return connect_to_backend_1()
def test_contract_method_1(backend_1):
result = run_contract_method_1()
assert result == backend_1_method_1_get_data()
def test_contract_method_2(backend_1):
result = run_contract_method_2()
assert result == backend_1_method_2_get_data()
and test_backend_2.py:
#py.test.fixture(scope="session")
def backend_2():
return connect_to_backend_2()
def test_contract_method_1(backend_2):
result = run_contract_method_1()
assert result == backend_2_method_1_get_data()
def test_contract_method_2(backend_2):
result = run_contract_method_2()
assert result == backend_2_method_2_get_data()
I want to merge the two modules, and instead provide two different versions of the fixture with the same interface, and run my tests against each one in sequence. Something like this:
#py.test.fixture(scope="session")
def backend():
return [connect_to_backend_1(), connect_to_backend_2()]
def test_contract_method_1(backend):
result = run_contract_method_1()
assert result == backend_method_1_get_data()
def test_contract_method_2(backend):
result = run_contract_method_2()
assert result == backend_method_2_get_data()
Note: I must have N versions (one for each type of backend) of two fixtures: my application and the native backend connector. How do I do it with py.test?
I think parametrized fixture is what will work for you very well:
import pytest
#pytest.fixture
def backends():
"""Mapping of possible backend ids to their constructor functions."""
return {
1: connect_to_backend_1,
2: connect_to_backend_2
}
#pytest.fixture(scope="session", params=[1, 2])
def backend(request, backends):
"""Parametrized backend instance."""
return backends[request.param]()
def test_contract_method_1(backend):
result = run_contract_method_1()
assert result == backend.get_data()
def test_contract_method_2(backend):
result = run_contract_method_2()
assert result == backend.get_data()
This is not live yet, but at least you'll get better understanding if you read this fully:
https://bitbucket.org/pytest-dev/pytest/src/da9d03b1f91d63ec97a989804bacfc03204b0e12/doc/en/fixture.txt?at=default
I think, in your case, you can use pytest.mark.parametrize feature:
def backend1():
return connect_to_backend_1()
def backend2():
return connect_to_backend_2()
def backend3():
return connect_to_backend_3()
..
def backendN():
return connect_to_backend_N()
#pytest.mark.parametrize('backend', [
backend1, backend2, backend3,
..., backendN
])
def test_something(backend):
result = run_contract_method_2()
assert result == backend.method_2_get_data()
Related
I created a class to make my life easier while doing some integration tests involving workers and their contracts. The code looks like this:
class ContractID(str):
contract_counter = 0
contract_list = list()
def __new__(cls):
cls.contract_counter += 1
new_entry = super().__new__(cls, f'Some_internal_name-{cls.contract_counter:10d}')
cls.contract_list.append(new_entry)
return new_entry
#classmethod
def get_contract_no(cls, worker_number):
return cls.contract_list[worker_number-1] # -1 so WORKER1 has contract #1 and not #0 etc.
When I'm unit-testing the class, I'm using the following code:
from test_helpers import ContractID
#pytest.fixture
def get_contract_numbers():
test_string_1 = ContractID()
test_string_2 = ContractID()
test_string_3 = ContractID()
return test_string_1, test_string_2, test_string_3
def test_contract_id(get_contract_numbers):
assert get_contract_ids[0] == 'Some_internal_name-0000000001'
assert get_contract_ids[1] == 'Some_internal_name-0000000002'
assert get_contract_ids[2] == 'Some_internal_name-0000000003'
def test_contract_id_get_contract_no(get_contract_numbers):
assert ContractID.get_contract_no(1) == 'Some_internal_name-0000000001'
assert ContractID.get_contract_no(2) == 'Some_internal_name-0000000002'
assert ContractID.get_contract_no(3) == 'Some_internal_name-0000000003'
with pytest.raises(IndexError) as py_e:
ContractID.get_contract_no(4)
assert py_e.type == IndexError
However, when I try to run these tests, the second one (test_contract_id_get_contract_no) fails, because it does not raise the error as there are more than three values. Furthermore, when I try to run all my tests in my folder test/, it fails even the first test (test_contract_id), which is probably because I'm trying to use this function in other tests that run before this test.
After reading this book, my understanding of fixtures was that it provides objects as if they were never called before, which is obviously not the case here. Is there a way how to tell the tests to use the class as if it hasn't been used before anywhere else?
If I understand that correctly, you want to run the fixture as setup code, so that your class has exactly 3 instances. If the fixture is function-scoped (the default) it is indeed run before each test, which will each time create 3 new instances for your class. If you want to reset your class after the test, you have to do this yourself - there is no way pytest can guess what you want to do here.
So, a working solution would be something like this:
#pytest.fixture(autouse=True)
def get_contract_numbers():
test_string_1 = ContractID()
test_string_2 = ContractID()
test_string_3 = ContractID()
yield
ContractID.contract_counter = 0
ContractID.contract_list.clear()
def test_contract_id():
...
Note that I did not yield the test strings, as you don't need them in the shown tests - if you need them, you can yield them, of course. I also added autouse=True, which makes sense if you need this for all tests, so you don't have to reference the fixture in each test.
Another possibility would be to use a session-scoped fixture. In this case the setup would be done only once. If that is what you need, you can use this instead:
#pytest.fixture(autouse=True, scope="session")
def get_contract_numbers():
test_string_1 = ContractID()
test_string_2 = ContractID()
test_string_3 = ContractID()
yield
How do you write a fixture (a method) that yields/returns parameterized test parameters?
For instance, I have a test as the following:
#pytest.mark.parametrize(
"input,expected",
[("hello", "hello"),
("world", "world")])
def test_get_message(self, input, expected):
assert expected == MyClass.get_message(input)
Instead of having input and expected to be passed via #pytest.mark.parametrize, I am interested in an approach as the following:
#pytest.fixture(scope="session")
def test_messages(self):
# what should I write here to return multiple
# test case with expected value for each?
pass
def test_get_message(self, test_messages):
expected = test_messages["expected"] # somehow extracted from test_messages?
input = test_messages["input"] # somehow extracted from test message?
assert expected == MyClass.get_message(input)
To move the parameters into a fixture, you can use fixture params:
#pytest.fixture(params=[("hello", "hello"),
("world", "world")], scope="session")
def test_messages(self, request):
return request.param
def test_get_message(self, test_messages):
input = test_messages[0]
expected = test_messages[1]
assert expected == MyClass.get_message(input)
You can also put the params into a separate function (same as wih parametrize), e.g.
def get_test_messages():
return [("hello", "hello"), ("world", "world")]
#pytest.fixture(params=get_test_messages(), scope="session")
def test_messages(self, request):
return request.param
To me, it seems you want to return an array of dicts:
#pytest.fixture(scope="session")
def test_messages():
return [
{
"input": "hello",
"expected": "world"
},
{
"input": "hello",
"expected": "hello"
}
]
To use it in a test case, you would need to iterate over the array:
def test_get_message(self, test_messages):
for test_data in test_messages:
input = test_data["input"]
expected = test_data["expected"]
assert input == expected
But I not sure if this is the best approach, because it's still considered as only one test case, and so it will show up as only one test case in the output/report.
pytest.mark.parametrize accepts an ids argument which can be a callable, like this:
def test_id_builder(arg):
if isinstance(arg, int):
return str(arg)
... # more logic
#pytest.mark.parametrize('value', [1, 2], ids=test_id_builder)
def test_whatever(value):
assert value > 0
This will generate two test cases, with the ids "1" and "2" respectively. The problem is that I have a lot of tests, organized in multiple classes and files. Because of that, I'd like to globally set test_id_builder as the ids function for all parametrized tests in my project. Is there a way to do this?
Simply implement a custom pytest_make_parametrize_id hook. In your conftest.py:
def pytest_make_parametrize_id(config, val, argname):
if isinstance(val, int):
return f'{argname}={val}'
if isinstance(val, str):
return f'text is {val}'
# return None to let pytest handle the formatting
return None
Example tests:
import pytest
#pytest.mark.parametrize('n', range(3))
def test_int(n):
assert True
#pytest.mark.parametrize('s', ('fizz', 'buzz'))
def test_str(s):
assert True
#pytest.mark.parametrize('c', (tuple(), list(), set()))
def test_unhandled(c):
assert True
Check the test parametrizing:
$ pytest -q --collect-only
test_spam.py::test_int[n=0]
test_spam.py::test_int[n=1]
test_spam.py::test_int[n=2]
test_spam.py::test_str[text is fizz]
test_spam.py::test_str[text is buzz]
test_spam.py::test_unhandled[c0]
test_spam.py::test_unhandled[c1]
test_spam.py::test_unhandled[c2]
no tests ran in 0.06 seconds
You can make your custom parametrize:
import pytest
def id_builder(arg):
if isinstance(arg, int):
return str(arg) * 2
def custom_parametrize(*args, **kwargs):
kwargs.setdefault('ids', id_builder)
return pytest.mark.parametrize(*args, **kwargs)
#custom_parametrize('value', [1, 2])
def test_whatever(value):
assert value > 0
And to avoid rewriting pytest.mark.parametrize to custom_parametrize everywhere use this well-known workaround:
old_parametrize = pytest.mark.parametrize
def custom_parametrize(*args, **kwargs):
kwargs.setdefault('ids', id_builder)
return old_parametrize(*args, **kwargs)
pytest.mark.parametrize = custom_parametrize
There is no way to globally set ids. but yo can use pytest-generate-tests to generate tests from some other fixture. that other fixture could be scoped to session which overall will mimic the intended behaviour.
I try to learn how to use pytest.mark.parametrize in my test and found one of example like following.
import pytest
#pytest.mark.parametrize("test_input,expected", [ ("3+5",8), ("2+4",6), ("6*9",42), return_param])
def test_eval(test_input, expected):
assert eval(test_input) == expected
However, how can i replace 3+5, 2+4, 6*9 with some function return value? like return_param
import pytest
#pytest.mark.parametrize("test_input,expected", [ ("3+5",8), ("2+4",6), ("6*9",42), return_param])
def test_eval(test_input, expected):
assert eval(test_input) == expected
def return_param(sef):
return 7*8, 56
It seems straightforward:
import pytest
def return_param():
return "7*8", 56
#pytest.mark.parametrize("test_input,expected", [
("3+5", 8),
("2+4",6),
("6*9",42),
return_param(),
])
def test_eval(test_input, expected):
assert eval(test_input) == expected
As per function return value is concern,you can use the return value.
But as you said that the return method will be in the setup class,instead of adding an extra return_param(), add your return logic in setup class itself.
Note:-- The return value from setup_class will be in generator/instance type.
I'm writing py.test tests of my code that uses the Tornado library. How can I use Hypothesis in my tests that involve coroutines and the IOLoop? I've been able to write yield-based tests without Hypothesis by using pytest-tornado's #pytest.mark.gen_test, but when I try to combine it with #given, I receive the following error:
FailedHealthCheck: Tests run under #given should return None, but test_both returned <generator object test_both at 0x7fc4464525f0> instead.
See http://hypothesis.readthedocs.org/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.return_value to the suppress_health_check settings for this test.
I'm pretty confident that this is a real problem and not just a question of disabling the health check, considering that the Hypothesis docs say
yield based tests simply won’t work.
Here's code that demonstrates my situation:
class MyHandler(RequestHandler):
#gen.coroutine
def get(self, x):
yield gen.moment
self.write(str(int(x) + 1))
self.finish()
#pytest.fixture
def app():
return Application([(r'/([0-9]+)', MyHandler)])
#given(x=strategies.integers(min_value=0))
def test_hypothesis(x):
assert int(str(x)) == x
#pytest.mark.gen_test
def test_tornado(app, http_client, base_url):
x = 123
response = yield http_client.fetch('%s/%i' % (base_url, x))
assert int(response.body) == x + 1
#pytest.mark.gen_test
#given(x=strategies.integers(min_value=0))
def test_both(x, app, http_client, base_url):
response = yield http_client.fetch('%s/%i' % (base_url, x))
assert int(response.body) == x + 1
test_hypothesis and test_tornado work fine, but I get the error with test_both because I'm using yield and Hypothesis together.
Changing the order of the decorators didn't change anything, probably because the gen_test decorator is simply an attribute mark.
Can I write tests of my Tornado-based code that use Hypothesis? How?
You can accomplish this by calling run_sync() on the io_loop py.test fixture of pytest-tornado. This can be used in place of yield:
#given(x=strategies.integers(min_value=0))
def test_solution(x, app, http_client, base_url, io_loop):
response = io_loop.run_sync(
lambda: http_client.fetch('%s/%i' % (base_url, x)))
assert int(response.body) == x + 1
Or you can place the body of your test in a coroutine, so that it can continue to use yield, and call this coroutine with run_sync():
#given(x=strategies.integers(min_value=0))
def test_solution_general(x, app, http_client, base_url, io_loop):
#gen.coroutine
def test_gen():
response = yield http_client.fetch('%s/%i' % (base_url, x))
assert int(response.body) == x + 1
io_loop.run_sync(test_gen)