How do you write a fixture (a method) that yields/returns parameterized test parameters?
For instance, I have a test as the following:
#pytest.mark.parametrize(
"input,expected",
[("hello", "hello"),
("world", "world")])
def test_get_message(self, input, expected):
assert expected == MyClass.get_message(input)
Instead of having input and expected to be passed via #pytest.mark.parametrize, I am interested in an approach as the following:
#pytest.fixture(scope="session")
def test_messages(self):
# what should I write here to return multiple
# test case with expected value for each?
pass
def test_get_message(self, test_messages):
expected = test_messages["expected"] # somehow extracted from test_messages?
input = test_messages["input"] # somehow extracted from test message?
assert expected == MyClass.get_message(input)
To move the parameters into a fixture, you can use fixture params:
#pytest.fixture(params=[("hello", "hello"),
("world", "world")], scope="session")
def test_messages(self, request):
return request.param
def test_get_message(self, test_messages):
input = test_messages[0]
expected = test_messages[1]
assert expected == MyClass.get_message(input)
You can also put the params into a separate function (same as wih parametrize), e.g.
def get_test_messages():
return [("hello", "hello"), ("world", "world")]
#pytest.fixture(params=get_test_messages(), scope="session")
def test_messages(self, request):
return request.param
To me, it seems you want to return an array of dicts:
#pytest.fixture(scope="session")
def test_messages():
return [
{
"input": "hello",
"expected": "world"
},
{
"input": "hello",
"expected": "hello"
}
]
To use it in a test case, you would need to iterate over the array:
def test_get_message(self, test_messages):
for test_data in test_messages:
input = test_data["input"]
expected = test_data["expected"]
assert input == expected
But I not sure if this is the best approach, because it's still considered as only one test case, and so it will show up as only one test case in the output/report.
Related
I have a function that fetches latest data from a URL based on given inputs. How do I write a unit test for this function given that the output is not fixed and might change every day?
def get_data_from_url(param1, param2):
url = f"https://some-service/get_data/param1={param1}¶m2={param2}"
resp = requests.get(url, headers={"Content-Type": "application/json"})
something = resp.json()['something']
if something.match(pattern):
...
return some_value
elif something.match(some_other_pattern):
...
return some_other_value
else:
return something
I'm using pytest for my unit tests.
You should use mock. Either a standard mock or a requests_mock:
class Test(unittest.TestCase):
def test_with_simple_mock(self):
fake_result = collections.namedtuple("Response", ("status_code", "json"))(
200, lambda: {"something": "somevalue"}
)
with mock.patch("requests.get", return_value=fake_result):
self.assertEquals(
requests.get("https://some-service/get_data/").json()["something"],
"somevalue",
)
def test_with_requests_mock(self):
with requests_mock.Mocker() as m:
m.get("https://some-service/get_data/", text='{"something": "somevalue"}')
self.assertEquals(
requests.get("https://some-service/get_data/").json()["something"],
"somevalue",
)
I am trying to teach myself Pytest and am trying to understand the differences between paramatrizing test data with #pytest.fixture(params=[]) and #pytest.mark.parametrization(). I've set up the code below to see how both work and they both return the same result. However, I'm not sure if there are use cases where one method is preferred over the other. Are there benefits to using one over the other?
import pytest
#pytest.fixture(params=["first parameter", "second parameter", "third parameter"])
def param_fixture(request):
return request.param
def parametrize_list():
return ["first parameter", "second parameter", "third parameter"]
def test_using_fixture_params(param_fixture):
"""Tests parametrization with fixture(params=[])"""
assert "parameter" in param_fixture
#pytest.mark.parametrize("param", parametrize_list())
def test_using_mark_parametrize(param):
"""Tests parametrization with mark.parametrize()"""
assert "parameter" in param
The above code has the following result.
test_parametrization.py::test_using_fixture_params[first parameter] PASSED
test_parametrization.py::test_using_fixture_params[second parameter] PASSED
test_parametrization.py::test_using_fixture_params[third parameter] PASSED
test_parametrization.py::test_using_mark_parametrize[first parameter] PASSED
test_parametrization.py::test_using_mark_parametrize[second parameter] PASSED
test_parametrization.py::test_using_mark_parametrize[third parameter] PASSED
Fixtures are typically used to load data structures into the test and pass to testing functions. #pytest.mark.parametrize is the preferred way to test lots of iterations with different inputs (as you have above).
This was a handy resource when starting : https://realpython.com/pytest-python-testing/
from data_module import Data
class TestData:
"""
Load and Test data
"""
#pytest.fixture(autouse=True)
def data(test):
return Data()
def test_fixture(data):
result = do_test(data)
assert result
#pytest.mark.parametrize('options', ['option1', 'option2', 'option3'])
def test_with_parameterisation(data, options)
result_with_paramaterisation(data, options)
assert result_with_paramaterisation
``
I try to learn how to use pytest.mark.parametrize in my test and found one of example like following.
import pytest
#pytest.mark.parametrize("test_input,expected", [ ("3+5",8), ("2+4",6), ("6*9",42), return_param])
def test_eval(test_input, expected):
assert eval(test_input) == expected
However, how can i replace 3+5, 2+4, 6*9 with some function return value? like return_param
import pytest
#pytest.mark.parametrize("test_input,expected", [ ("3+5",8), ("2+4",6), ("6*9",42), return_param])
def test_eval(test_input, expected):
assert eval(test_input) == expected
def return_param(sef):
return 7*8, 56
It seems straightforward:
import pytest
def return_param():
return "7*8", 56
#pytest.mark.parametrize("test_input,expected", [
("3+5", 8),
("2+4",6),
("6*9",42),
return_param(),
])
def test_eval(test_input, expected):
assert eval(test_input) == expected
As per function return value is concern,you can use the return value.
But as you said that the return method will be in the setup class,instead of adding an extra return_param(), add your return logic in setup class itself.
Note:-- The return value from setup_class will be in generator/instance type.
the purpose of #mark.incremental is that if one test fails, the tests afterwards are marked as expected to fail.
However, when I use this in conjuction with parametrization I get undesired behavior.
For example, in the case of this fake code:
//conftest.py:
def pytest_generate_tests(metafunc):
metafunc.parametrize("input", [True, False, None, False, True])
def pytest_runtest_makereport(item, call):
if "incremental" in item.keywords:
if call.excinfo is not None:
parent = item.parent
parent._previousfailed = item
def pytest_runtest_setup(item):
if "incremental" in item.keywords:
previousfailed = getattr(item.parent, "_previousfailed", None)
if previousfailed is not None:
pytest.xfail("previous test failed (%s)" %previousfailed.name)
//test.py:
#pytest.mark.incremental
class TestClass:
def test_input(self, input):
assert input is not None
def test_correct(self, input):
assert input==True
I'd expect the test class to run
test_input on True,
followed by test_correct on True,
followed by test_input on False,
followed by test_correct on False,
folowed by test_input on None,
followed by (xfailed) test_correct on None, etc etc.
Instead, what happens is that the test class
runs test_input on True,
then runs test_input on False,
then runs test_input on None,
then marks everything from that point onwards as xfailed (including the test_corrects).
What I am assuming is happening is that parametrization takes priority over proceeding through functions in a class. The question is if it is possible to override this behaviour or work around it somehow, as the current situation makes marking a class as incremental completely useless to me.
(is the only way to handle this to copy-paste the code for the class over and over, each time with different parameters? The thought is repulsive to me)
The solution to this is described in https://docs.pytest.org/en/latest/example/parametrize.html under the header A quick port of “testscenarios”
This is the code listed there and what the code in conftest.py is doing is it is looking for variable scenarios in the test class. When it finds the variable it iterates over each item of scenarios and expects an id string with which to label the test and a dictionary of 'argnames:argvalues'
# content of conftest.py
def pytest_generate_tests(metafunc):
idlist = []
argvalues = []
for scenario in metafunc.cls.scenarios:
idlist.append(scenario[0])
items = scenario[1].items()
argnames = [x[0] for x in items]
argvalues.append(([x[1] for x in items]))
metafunc.parametrize(argnames, argvalues, ids=idlist, scope="class")
# content of test_scenarios.py
scenario1 = ('basic', {'attribute': 'value'})
scenario2 = ('advanced', {'attribute': 'value2'})
class TestSampleWithScenarios(object):
scenarios = [scenario1, scenario2]
def test_demo1(self, attribute):
assert isinstance(attribute, str)
def test_demo2(self, attribute):
assert isinstance(attribute, str)
You can also modify the function pytest_generate_tests to accept different datatype inputs. For example if you have a list that you usually pass to
#pytest.mark.parametrize("varname", varval_list)
you can use that same list in the following way:
# content of conftest.py
def pytest_generate_tests(metafunc):
idlist = []
argvalues = []
argnames = metafunc.cls.scenario_keys
for idx, scenario in enumerate(metafunc.cls.scenario_parameters):
idlist.append(str(idx))
argvalues.append([scenario])
metafunc.parametrize(argnames, argvalues, ids=idlist, scope="class")
# content of test_scenarios.py
varval_list = [a, b, c, d]
class TestSampleWithScenarios(object):
scenario_parameters = varval_list
scenario_keys = ['varname']
def test_demo1(self, attribute):
assert isinstance(attribute, str)
def test_demo2(self, attribute):
assert isinstance(attribute, str)
The id will be an autogenerated number (you can change that to using something you specify) and in this implementation it won't handle multiple parameterization variables so you have to compile those in a single list (or cater pytest_generate_tests to handle that for you)
The following solution does not ask to change your test class
_test_failed_incremental = defaultdict(dict)
def pytest_runtest_makereport(item, call):
if "incremental" in item.keywords:
if call.excinfo is not None and call.excinfo.typename != "Skipped":
param = tuple(item.callspec.indices.values()) if hasattr(item, "callspec") else ()
_test_failed_incremental[str(item.cls)].setdefault(param, item.originalname or item.name)
def pytest_runtest_setup(item):
if "incremental" in item.keywords:
param = tuple(item.callspec.indices.values()) if hasattr(item, "callspec") else ()
originalname = _test_failed_incremental[str(item.cls)].get(param)
if originalname:
pytest.xfail("previous test failed ({})".format(originalname))
It works by keeping a dictionary with the failed test per class and per index of parametrized input as key (and the name of the test method that failed as value).
In your example, the dictionary _test_failed_incremental will be
defaultdict(<class 'dict'>, {"<class 'test.TestClass'>": {(2,): 'test_input'}})
showing that the 3rd run (index=2) has failed for the class test.TestClass.
Before running a test method in the class for a given parameter, it checks if any previous test method in the class has not failed for the given parameter and if so xfail the test with info on the name of the method that first failed.
Not 100% tested but in use and working for my needs.
Let's say I have an application which must work with two different backends: backend_1 and backend_2. I have two test modules.
Module test_backend_1.py:
#py.test.fixture(scope="session")
def backend_1():
return connect_to_backend_1()
def test_contract_method_1(backend_1):
result = run_contract_method_1()
assert result == backend_1_method_1_get_data()
def test_contract_method_2(backend_1):
result = run_contract_method_2()
assert result == backend_1_method_2_get_data()
and test_backend_2.py:
#py.test.fixture(scope="session")
def backend_2():
return connect_to_backend_2()
def test_contract_method_1(backend_2):
result = run_contract_method_1()
assert result == backend_2_method_1_get_data()
def test_contract_method_2(backend_2):
result = run_contract_method_2()
assert result == backend_2_method_2_get_data()
I want to merge the two modules, and instead provide two different versions of the fixture with the same interface, and run my tests against each one in sequence. Something like this:
#py.test.fixture(scope="session")
def backend():
return [connect_to_backend_1(), connect_to_backend_2()]
def test_contract_method_1(backend):
result = run_contract_method_1()
assert result == backend_method_1_get_data()
def test_contract_method_2(backend):
result = run_contract_method_2()
assert result == backend_method_2_get_data()
Note: I must have N versions (one for each type of backend) of two fixtures: my application and the native backend connector. How do I do it with py.test?
I think parametrized fixture is what will work for you very well:
import pytest
#pytest.fixture
def backends():
"""Mapping of possible backend ids to their constructor functions."""
return {
1: connect_to_backend_1,
2: connect_to_backend_2
}
#pytest.fixture(scope="session", params=[1, 2])
def backend(request, backends):
"""Parametrized backend instance."""
return backends[request.param]()
def test_contract_method_1(backend):
result = run_contract_method_1()
assert result == backend.get_data()
def test_contract_method_2(backend):
result = run_contract_method_2()
assert result == backend.get_data()
This is not live yet, but at least you'll get better understanding if you read this fully:
https://bitbucket.org/pytest-dev/pytest/src/da9d03b1f91d63ec97a989804bacfc03204b0e12/doc/en/fixture.txt?at=default
I think, in your case, you can use pytest.mark.parametrize feature:
def backend1():
return connect_to_backend_1()
def backend2():
return connect_to_backend_2()
def backend3():
return connect_to_backend_3()
..
def backendN():
return connect_to_backend_N()
#pytest.mark.parametrize('backend', [
backend1, backend2, backend3,
..., backendN
])
def test_something(backend):
result = run_contract_method_2()
assert result == backend.method_2_get_data()