I am trying to test with my code by mocking the PyGithub library.
I want to create a repository for an organization. So first I need to get it and on the "Organization" returned object, I need to make another call.
It fails when trying to assert that my second method was called.
I am very new to python and I am guessing that there is a missing connection between the mocks but I cannot figure out what.
class GithubService:
def __init__(self, token: str) -> None:
self.__github__ = Github(token)
self.__token__ = token
def create_repo_extra(self, repo_name, description, organization_name, team_name):
try:
organization = self.__github__.get_organization(organization_name)
repo = organization.create_repo(name=repo_name,
description=description,
private=True,
has_issues=False,
has_wiki=False,
has_projects=False,
allow_merge_commit=False)
# do other things with the returned repo.....
return True
except GithubException as ex:
print(ex.data)
return False
Here is the test:
import unittest
from unittest.mock import patch, MagicMock, ANY
from github.Organization import Organization
from github.Repository import Repository
from src.github_service import GithubService
class TestGithubService(unittest.TestCase):
#patch('github.Organization.Organization.create_repo',
side_effect=MagicMock(return_value=Repository(ANY, {}, {}, True)))
#patch('github.MainClass.Github.get_organization',
return_value=MagicMock(return_value=Organization(ANY, {}, {}, True)))
def test_create_repo_returns_true(self, get_organization, create_repo):
sut = GithubService("token")
actual = sut.create_repo_extra('repo-name', 'description', 'organization-name', 'team-name')
get_organization.assert_called() # ok
create_repo.assert_called() # failed
self.assertTrue(actual)
Since you mock your Github.get_organization you can use the MagicMock it returns directly rather than trying to mock another layer.
In this, I patch the same Github.get_organization, but avoid giving it a side effect or return value, and therefore pass it as an arg (like you did).
Then I create a convenience mock_organization and it will be the return value of the patched Github.get_organization.
Finally, the patch is checked like you did, and through the convenience mock_organization I check the create_repo method is called as well.
class TestGithubService(unittest.TestCase):
#patch("github.MainClass.Github.get_organization")
def test_create_repo_returns_true(self, mock_get_organization):
mock_organization = MagicMock()
mock_get_organization.return_value = mock_organization
sut = GithubService("token")
actual = sut.create_repo_extra(
"repo-name", "description", "organization-name", "team-name"
)
mock_get_organization.assert_called() # ok
mock_organization.create_repo.assert_called() # ok
self.assertTrue(actual)
Without seeing more of your code I am not sure why patching Organization did not work, but this is simpler, cleaner and just as effective.
I asked the same question in GitHub.
I learned about pytest-helpers-namespace from s0undt3ch in his very helpful answer. However I found a usecase I cant seem to find an obvious workaround. Here is the paste of my original question on GitHub.
How can I use the fixtures already declared in my conftest within my helper functions?
I am have a large, memory heavy configuration object (for simplicity, a dictionary) in all test, but I dont want to tear it down and rebuild this object, thus scoped as session and reused. Often times, I want to grab values from the configuration object within my test.
I know reusing fixtures within fixtures, you have to pass a reference
# fixtures
#pytest.fixture(scope="session")
def return_dictionary():
return {
"test_key": "test_value"
}
#pytest.fixture(scope="session")
def add_random(return_dictionary):
_temp = return_dictionary
_temp["test_key_random"] = "test_random_value"
return _temp
Is it because pytest collects similar decorators, and analyzes them together? I would like someone's input into this. Thanks!
Here is a few files I created to demonstrate what I was looking for, and what the error I am seeing.
# conftest.py
import pytest
from pprint import pprint
pytest_plugins = ["helpers_namespace"]
# fixtures
#pytest.fixture(scope="session")
def return_dictionary():
return {
"test_key": "test_value"
}
# helpers
#pytest.helpers.register
def super_print(_dict):
pprint(_dict)
#pytest.helpers.register
def super_print_always(key, _dict=return_dictionary):
pprint(_dict[key])
# test_check.py
import pytest
def test_option_1(return_dictionary):
print(return_dictionary)
def test_option_2(return_dictionary):
return_dictionary["test_key_2"] = "test_value_2"
pytest.helpers.super_print(return_dictionary)
def test_option_3():
pytest.helpers.super_print_always('test_key')
key = 'test_key', _dict = <function return_dictionary at 0x039B6C48>
#pytest.helpers.register
def super_print_always(key, _dict=return_dictionary):
> pprint(_dict[key])
E TypeError: 'function' object is not subscriptable
conftest.py:30: TypeError
We've built cli-app with Python. Some part need ncurses, so we use
npyscreen. We've successfully tested most part of app using pytest
(with the help of mock and other things). But we stuck in 'how to test
the part of ncurses code'
Take this part of our ncurses code that prompt user to answer:
"""
Generate text user interface:
example :
fields = [
{"type": "TitleText", "name": "Name", "key": "name"},
{"type": "TitlePassword", "name": "Password", "key": "password"},
{"type": "TitleSelectOne", "name": "Role",
"key": "role", "values": ["admin", "user"]},
]
form = form_generator("Form Foo", fields)
print(form["role"].value[0])
print(form["name"].value)
"""
def form_generator(form_title, fields):
def myFunction(*args):
form = npyscreen.Form(name=form_title)
result = {}
for field in fields:
t = field["type"]
k = field["key"]
del field["type"]
del field["key"]
result[k] = form.add(getattr(npyscreen, t), **field)
form.edit()
return result
return npyscreen.wrapper_basic(myFunction)
We have tried many ways, but failed:
stringIO to capture the output: failed
redirect the output to file: failed
hecate: failed
I think it's only work if we run whole program
pyautogui
I think it's only work if we run whole program
This is the complete steps of what I have
tried
So the last thing I use is to use patch. I patch those
functions. But the cons is the statements inside those functions are
remain untested. Cause it just assert the hard-coded return value.
I find npyscreen docs
for writing test. But I don't completely understand. There is just one example.
Thank you in advance.
I don't see it mentioned in the python docs, but you can use the screen-dump feature of the curses library to capture information for analysis.
I am testing an application that has several external dependencies and I have used monkeypatching techniques to patch the functions of external libraries with a custom implementation to help my tests. It works as expected.
But the problem I currently have is that this makes my test file really messy. I have several tests and each test requires its own implementation of the patched function.
For instance, let us say I have a GET function from an external library, my test_a() needs GET() to be patched so that it returns False and test_b() needs GET() to be patched so that it returns True.
What is the preferred way to handle such a scenario. Currently I do the following:
def test_a(monkeypatch):
my_patcher(monkeypatch, patch_get_to_return_true = True, patch_get_to_return_false = False, patch_get_to_raise_exception = False)
def test_b(monkeypatch)
my_patcher(monkeypatch, patch_get_to_return_true = True, patch_get_to_return_false = False, patch_get_to_raise_exception = False)
def test_c(monkeypatch)
my_patcher(monkeypatch, patch_get_to_return_true = False, patch_get_to_return_false = False, patch_get_to_raise_exception = True)
def my_patcher(monkeypatch, patch_get_to_return_true = False, patch_get_to_return_false = False, patch_get_to_raise_exception = False):
def patch_func_pos():
return True
patch_func_neg():
return False
patch_func_exception():
raise my_exception
if patch_get_to_return_true:
monkeypatch.setattr(ExternalLib, 'GET', patch_func_pos)
if patch_get_to_return_false:
monkeypatch.setattr(ExternalLib, 'GET', patch_func_neg)
if patch_get_to_raise_exception:
monkeypatch.setattr(ExternalLib, 'GET', patch_func_exception)
The above sample has just three tests that patch one function. My actual test file has around 20 tests and each test will further patch several functions.
Can someone suggest me a better way of handling this? Is it recommended to move monkeypatching part to a separate file?
Without knowing further details, I would suggest splitting my_patcher into several small fixtures:
#pytest.fixture
def mocked_GET_pos(monkeypatch):
monkeypatch.setattr(ExternalLib, 'GET', lambda: True)
#pytest.fixture
def mocked_GET_neg(monkeypatch):
monkeypatch.setattr(ExternalLib, 'GET', lambda: False)
#pytest.fixture
def mocked_GET_raises(monkeypatch):
def raise_():
raise Exception()
monkeypatch.setattr(ExternalLib, 'GET', raise_)
Now use pytest.mark.usefixtures to autoapply the fixture in test:
#pytest.mark.usefixtures('mocked_GET_pos')
def test_GET_pos():
assert ExternalLib.GET()
#pytest.mark.usefixtures('mocked_GET_neg')
def test_GET_neg():
assert not ExternalLib.GET()
#pytest.mark.usefixtures('mocked_GET_raises')
def test_GET_raises():
with pytest.raises(Exception):
ExternalLib.GET()
However, there is room for improvements, depending on the actual context. For example, when the tests logic is the same and the sole thing that varies is some test precondition (like different patching of GET in your case), tests or fixtures parametrization often saves a lot of code duplication. Imagine you have an own function that calls GET internally:
# my_lib.py
def inform():
try:
result = ExternalLib.GET()
except Exception:
return 'error'
if result:
return 'success'
else:
return 'failure'
and you want to test whether it returns a valid result no matter what GET behaves:
# test_my_lib.py
def test_inform():
assert inform() in ['success', 'failure', 'error']
Using the above approach, you would need to copy test_inform three times, the only difference between the copies being a different fixture used. This can be avoided by writing a parametrized fixture that will accept multiple patch possibilities for GET:
#pytest.fixture(params=[lambda: True,
lambda: False,
raise_],
ids=['pos', 'neg', 'exception'])
def mocked_GET(request):
monkeypatch.setattr(ExternalLib, 'GET', request.param)
Now when applying mocked_GET to test_inform:
#pytest.mark.usefixtures('mocked_GET')
def test_inform():
assert inform() in ['success', 'failure', 'error']
you get three tests out of one: test_inform will run three times, once with each mock passed to mocked_GET parameters.
test_inform[pos]
test_inform[neg]
test_inform[exception]
Tests can be parametrized too (via pytest.mark.parametrize), and when applied correctly, parametrization technique saves a lot of boilerplate code.
Is there a way in Python unittest to set the order in which test cases are run?
In my current TestCase class, some testcases have side effects that set conditions for the others to run properly. Now I realize the proper way to do this is to use setUp() to do all setup related things, but I would like to implement a design where each successive test builds slightly more state that the next can use. I find this much more elegant.
class MyTest(TestCase):
def test_setup(self):
# Do something
def test_thing(self):
# Do something that depends on test_setup()
Ideally, I would like the tests to be run in the order they appear in the class. It appears that they run in alphabetical order.
Don't make them independent tests - if you want a monolithic test, write a monolithic test.
class Monolithic(TestCase):
def step1(self):
...
def step2(self):
...
def _steps(self):
for name in dir(self): # dir() result is implicitly sorted
if name.startswith("step"):
yield name, getattr(self, name)
def test_steps(self):
for name, step in self._steps():
try:
step()
except Exception as e:
self.fail("{} failed ({}: {})".format(step, type(e), e))
If the test later starts failing and you want information on all failing steps instead of halting the test case at the first failed step, you can use the subtests feature: https://docs.python.org/3/library/unittest.html#distinguishing-test-iterations-using-subtests
(The subtest feature is available via unittest2 for versions prior to Python 3.4: https://pypi.python.org/pypi/unittest2 )
It's a good practice to always write a monolithic test for such expectations. However, if you are a goofy dude like me, then you could simply write ugly looking methods in alphabetical order so that they are sorted from a to b as mentioned in the Python documentation - unittest — Unit testing framework
Note that the order in which the various test cases will be run is
determined by sorting the test function names with respect to the
built-in ordering for strings
Example
def test_a_first():
print "1"
def test_b_next():
print "2"
def test_c_last():
print "3"
From unittest — Unit testing framework, section Organizing test code:
Note: The order in which the various tests will be run is determined by sorting the test method names with respect to the built-in ordering for strings.
So just make sure test_setup's name has the smallest string value.
Note that you should not rely on this behavior — different test functions are supposed to be independent of the order of execution. See ngcohlan's answer above for a solution if you explicitly need an order.
Another way that I didn't see listed in any related questions: Use a TestSuite.
Another way to accomplish ordering is to add the tests to a unitest.TestSuite. This seems to respect the order in which the tests are added to the suite using suite.addTest(...). To do this:
Create one or more TestCase subclasses,
class FooTestCase(unittest.TestCase):
def test_ten():
print('Testing ten (10)...')
def test_eleven():
print('Testing eleven (11)...')
class BarTestCase(unittest.TestCase):
def test_twelve():
print('Testing twelve (12)...')
def test_nine():
print('Testing nine (09)...')
Create a callable test-suite generation added in your desired order, adapted from the documentation and this question:
def suite():
suite = unittest.TestSuite()
suite.addTest(BarTestCase('test_nine'))
suite.addTest(FooTestCase('test_ten'))
suite.addTest(FooTestCase('test_eleven'))
suite.addTest(BarTestCase('test_twelve'))
return suite
Execute the test-suite, e.g.,
if __name__ == '__main__':
runner = unittest.TextTestRunner(failfast=True)
runner.run(suite())
For context, I had a need for this and wasn't satisfied with the other options. I settled on the above way of doing test ordering.
I didn't see this TestSuite method listed any of the several "unit-test ordering questions" (e.g., this question and others including execution order, or changing order, or tests order).
I ended up with a simple solution that worked for me:
class SequentialTestLoader(unittest.TestLoader):
def getTestCaseNames(self, testCaseClass):
test_names = super().getTestCaseNames(testCaseClass)
testcase_methods = list(testCaseClass.__dict__.keys())
test_names.sort(key=testcase_methods.index)
return test_names
And then
unittest.main(testLoader=utils.SequentialTestLoader())
A simple and flexible way is to assign a comparator function to unittest.TestLoader.sortTestMethodsUsing:
Function to be used to compare method names when sorting them in getTestCaseNames() and all the loadTestsFrom*() methods.
Minimal usage:
import unittest
class Test(unittest.TestCase):
def test_foo(self):
""" test foo """
self.assertEqual(1, 1)
def test_bar(self):
""" test bar """
self.assertEqual(1, 1)
if __name__ == "__main__":
test_order = ["test_foo", "test_bar"] # could be sys.argv
loader = unittest.TestLoader()
loader.sortTestMethodsUsing = lambda x, y: test_order.index(x) - test_order.index(y)
unittest.main(testLoader=loader, verbosity=2)
Output:
test_foo (__main__.Test)
test foo ... ok
test_bar (__main__.Test)
test bar ... ok
Here's a proof of concept for running tests in source code order instead of the default lexical order (output is as above).
import inspect
import unittest
class Test(unittest.TestCase):
def test_foo(self):
""" test foo """
self.assertEqual(1, 1)
def test_bar(self):
""" test bar """
self.assertEqual(1, 1)
if __name__ == "__main__":
test_src = inspect.getsource(Test)
unittest.TestLoader.sortTestMethodsUsing = lambda _, x, y: (
test_src.index(f"def {x}") - test_src.index(f"def {y}")
)
unittest.main(verbosity=2)
I used Python 3.8.0 in this post.
Tests which really depend on each other should be explicitly chained into one test.
Tests which require different levels of setup, could also have their corresponding setUp() running enough setup - various ways thinkable.
Otherwise unittest handles the test classes and test methods inside the test classes in alphabetical order by default (even when loader.sortTestMethodsUsing is None). dir() is used internally which sorts by guarantee.
The latter behavior can be exploited for practicability - e.g. for having the latest-work-tests run first to speed up the edit-testrun-cycle.
But that behavior should not be used to establish real dependencies. Consider that tests can be run individually via command-line options etc.
One approach can be to let those sub tests be not be treated as tests by the unittest module by appending _ in front of them and then building a test case which builds on the right order of these sub-operations executed.
This is better than relying on the sorting order of unittest module as that might change tomorrow and also achieving topological sort on the order will not be very straightforward.
An example of this approach, taken from here (Disclaimer: my own module), is as below.
Here, test case runs independent tests, such as checking for table parameter not set (test_table_not_set) or test for primary key (test_primary_key) still in parallel, but a CRUD test makes sense only if done in right order and state set by previous operations. Hence those tests have been rather made just separate unit, but not test. Another test (test_CRUD) then builds a right order of those operations and tests them.
import os
import sqlite3
import unittest
from sql30 import db
DB_NAME = 'review.db'
class Reviews(db.Model):
TABLE = 'reviews'
PKEY = 'rid'
DB_SCHEMA = {
'db_name': DB_NAME,
'tables': [
{
'name': TABLE,
'fields': {
'rid': 'uuid',
'header': 'text',
'rating': 'int',
'desc': 'text'
},
'primary_key': PKEY
}]
}
VALIDATE_BEFORE_WRITE = True
class ReviewTest(unittest.TestCase):
def setUp(self):
if os.path.exists(DB_NAME):
os.remove(DB_NAME)
def test_table_not_set(self):
"""
Tests for raise of assertion when table is not set.
"""
db = Reviews()
try:
db.read()
except Exception as err:
self.assertIn('No table set for operation', str(err))
def test_primary_key(self):
"""
Ensures, primary key is honored.
"""
db = Reviews()
db.table = 'reviews'
db.write(rid=10, rating=5)
try:
db.write(rid=10, rating=4)
except sqlite3.IntegrityError as err:
self.assertIn('UNIQUE constraint failed', str(err))
def _test_CREATE(self):
db = Reviews()
db.table = 'reviews'
# backward compatibility for 'write' API
db.write(tbl='reviews', rid=1, header='good thing', rating=5)
# New API with 'create'
db.create(tbl='reviews', rid=2, header='good thing', rating=5)
# Backward compatibility for 'write' API, without tbl,
# explicitly passed
db.write(tbl='reviews', rid=3, header='good thing', rating=5)
# New API with 'create', without table name explicitly passed.
db.create(tbl='reviews', rid=4, header='good thing', rating=5)
db.commit() # Save the work.
def _test_READ(self):
db = Reviews()
db.table = 'reviews'
rec1 = db.read(tbl='reviews', rid=1, header='good thing', rating=5)
rec2 = db.read(rid=1, header='good thing')
rec3 = db.read(rid=1)
self.assertEqual(rec1, rec2)
self.assertEqual(rec2, rec3)
recs = db.read() # Read all
self.assertEqual(len(recs), 4)
def _test_UPDATE(self):
db = Reviews()
db.table = 'reviews'
where = {'rid': 2}
db.update(condition=where, header='average item', rating=2)
db.commit()
rec = db.read(rid=2)[0]
self.assertIn('average item', rec)
def _test_DELETE(self):
db = Reviews()
db.table = 'reviews'
db.delete(rid=2)
db.commit()
self.assertFalse(db.read(rid=2))
def test_CRUD(self):
self._test_CREATE()
self._test_READ()
self._test_UPDATE()
self._test_DELETE()
def tearDown(self):
os.remove(DB_NAME)
you can start with:
test_order = ['base']
def index_of(item, list):
try:
return list.index(item)
except:
return len(list) + 1
2nd define the order function:
def order_methods(x, y):
x_rank = index_of(x[5:100], test_order)
y_rank = index_of(y[5:100], test_order)
return (x_rank > y_rank) - (x_rank < y_rank)
3rd set it in the class:
class ClassTests(unittest.TestCase):
unittest.TestLoader.sortTestMethodsUsing = staticmethod(order_methods)
ncoghlan's answer was exactly what I was looking for when I came to this question. I ended up modifying it to allow each step-test to run, even if a previous step had already thrown an error; this helps me (and maybe you!) to discover and plan for the propagation of error in multi-threaded database-centric software.
class Monolithic(TestCase):
def step1_testName1(self):
...
def step2_testName2(self):
...
def steps(self):
'''
Generates the step methods from their parent object
'''
for name in sorted(dir(self)):
if name.startswith('step'):
yield name, getattr(self, name)
def test_steps(self):
'''
Run the individual steps associated with this test
'''
# Create a flag that determines whether to raise an error at
# the end of the test
failed = False
# An empty string that the will accumulate error messages for
# each failing step
fail_message = ''
for name, step in self.steps():
try:
step()
except Exception as e:
# A step has failed, the test should continue through
# the remaining steps, but eventually fail
failed = True
# Get the name of the method -- so the fail message is
# nicer to read :)
name = name.split('_')[1]
# Append this step's exception to the fail message
fail_message += "\n\nFAIL: {}\n {} failed ({}: {})".format(name,
step,
type(e),
e)
# Check if any of the steps failed
if failed is True:
# Fail the test with the accumulated exception message
self.fail(fail_message)
I also wanted to specify a particular order of execution to my tests. The main differences to other answers in here are:
I wanted to perverse a more verbose test
method name without replacing whole name with step1, step2 etc.
I also wanted the printed method execution in the console to have some granularity apposed to using a Monolithic solution in some of the other answers.
So for the execution for monolithic test method is looked like this:
test_booking (__main__.TestBooking) ... ok
I wanted:
test_create_booking__step1 (__main__.TestBooking) ... ok
test_process_booking__step2 (__main__.TestBooking) ... ok
test_delete_booking__step3 (__main__.TestBooking) ... ok
How to achieve this
I provided a suffix to my method name with the __step<order> for example (order of definition is not important):
def test_create_booking__step1(self):
[...]
def test_delete_booking__step3(self):
[...]
def test_process_booking__step2(self):
[...]
For the test suite override the __iter__ function which will build an iterator for the test methods.
class BookingTestSuite(unittest.TestSuite):
""" Extends the functionality of the the standard test suites """
def __iter__(self):
for suite in self._tests:
suite._tests = sorted(
[x for x in suite._tests if hasattr(x, '_testMethodName')],
key = lambda x: int(x._testMethodName.split("step")[1])
)
return iter(self._tests)
This will sort test methods into order and execute them accordingly.