Overriding Python Unit Test module for custom output? - python

Aim:
I want to rewrite Python's UnitTest module so when I call it I get the following JSON output within the stdout stream. For example:
{
"errors":0,
"failures":1,
"ran":3,
"skipped":0,
"successful":2,
"test_data":[
{
"index":0,
"result":1
},
{
"index":1,
"result":1
},
{
"index":2,
"result":-1
}
]
}
Problem:
I've written some code to generate these test results, but I'm facing problems with writing code for the test_data attribute of the JSON array. I've overwritten the TestCase, TextTestResult and TextTestRunner classes but I can't seem to figure out how to get the result from getTestsReport():
#!/usr/bin/python
import unittest
import sys, os
import json
class dataHolder(object):
x = None
class MyTestRunner(unittest.TextTestRunner):
def _makeResult(self):
obj = MyTestResult(self.stream, self.descriptions, self.verbosity)
dataHolder.x = obj.getTestsReport()
return obj
class MyTestResult(unittest._TextTestResult):
"""
Holder for test result information.
Test results are automatically managed by the TestCase and TestSuite
classes, and do not need to be explicitly manipulated by writers of tests.
Each instance holds the total number of tests run, and collections of
failures and errors that occurred among those test runs. The collections
contain tuples of (testcase, exceptioninfo), where exceptioninfo is the
formatted traceback of the error that occurred.
"""
_previousTestClass = None
_testRunEntered = False
_moduleSetUpFailed = False
def __init__(self, stream=None, descriptions=None, verbosity=None):
self.failfast = False
self.failures = []
self.errors = []
self.testsRun = 0
self.skipped = []
self.expectedFailures = []
self.unexpectedSuccesses = []
self.shouldStop = False
self.buffer = False
self._stdout_buffer = None
self._stderr_buffer = None
self._original_stdout = sys.stdout
self._original_stderr = sys.stderr
self._mirrorOutput = False
# List containing all the run tests, their index and their result. This is the new line of code.
self.tests_run = []
def getTestsReport(self):
"""Returns the run tests as a list of the form [test_description, test_index, result]"""
return self.tests_run
###
### Modified the functions so that we add the test case to the tests run list.
### -1 means Failure. 0 means error. 1 means success.
###
def addError(self, test, err):
"""
Called when an error has occurred. 'err' is a tuple of values as
returned by sys.exc_info().
"""
self.errors.append((test, self._exc_info_to_string(err, test)))
self._mirrorOutput = True
self.tests_run.append([test.shortDescription(), self.testsRun, 0])
TestResult.addError(self, test, err)
def addFailure(self, test, err):
"""
Called when an error has occurred. 'err' is a tuple of values as
returned by sys.exc_info().
"""
self.failures.append((test, self._exc_info_to_string(err, test)))
self._mirrorOutput = True
self.tests_run.append([test.shortDescription(), self.testsRun, -1])
TestResult.addFailure(self, test, err)
def addSuccess(self, test):
"Called when a test has completed successfully"
self.tests_run.append([test.shortDescription(), self.testsRun, 1])
TestResult.addSuccess(self, test)
class MyTest(unittest.TestCase):
currentResult = None # holds last result object passed to run method
results = [] # Holds all results so we can report back to the CCC backend
#classmethod
def setResult(cls, amount, errors, failures, skipped):
cls.amount, cls.errors, cls.failures, cls.skipped = \
amount, errors, failures, skipped
def tearDown(self):
amount = self.currentResult.testsRun
errors = self.currentResult.errors
failures = self.currentResult.failures
skipped = self.currentResult.skipped
self.setResult(amount, errors, failures, skipped)
#classmethod
def tearDownClass(cls):
print json.dumps(
{
'ran': cls.amount,
'errors': len(cls.errors),
'failures': len(cls.failures),
'succeeded': cls.amount - len(cls.errors) - len(cls.failures),
'skipped': len(cls.skipped),
'test_data' : dataHolder.x
},
sort_keys=True, indent=4, separators=(',', ': ')
)
return
def run(self, result=None):
self.currentResult = result # remember result for use in tearDown
unittest.TestCase.run(self, result) # call superclass's run() method
# Tests are defined below.
def test_something(self):
self.assertEqual(1, 2)
if __name__ == '__main__':
MyTestRunner( stream=None, descriptions=0, verbosity=0 )
unittest.main(exit=False)
Why isn't the value of x updating after calling getTestsReport()?
Edit:
Okay, I think unittest.main(exit=False) was calling the wrong function! I'm now trying to rewrite the code in my main, but still facing a bunch of issues:
#!/usr/bin/python
import unittest
import sys, os
import json
import string
class MyTestRunner(unittest.TextTestRunner):
def _makeResult(self):
return MyTestResult(self.stream, self.descriptions, self.verbosity)
class MyTestResult(unittest._TextTestResult):
"""
Holder for test result information.
Test results are automatically managed by the TestCase and TestSuite
classes, and do not need to be explicitly manipulated by writers of tests.
Each instance holds the total number of tests run, and collections of
failures and errors that occurred among those test runs. The collections
contain tuples of (testcase, exceptioninfo), where exceptioninfo is the
formatted traceback of the error that occurred.
"""
_previousTestClass = None
_testRunEntered = False
_moduleSetUpFailed = False
def __init__(self, stream=None, descriptions=None, verbosity=None):
self.failfast = False
self.failures = []
self.errors = []
self.testsRun = 0
self.skipped = []
self.expectedFailures = []
self.unexpectedSuccesses = []
self.shouldStop = False
self.buffer = False
self._stdout_buffer = None
self._stderr_buffer = None
self._original_stdout = sys.stdout
self._original_stderr = sys.stderr
self._mirrorOutput = False
# List containing all the run tests, their index and their result. This is the new line of code.
self.tests_run = []
def getTestsReport(self):
"""Returns the run tests as a list of the form [test_description, test_index, result]"""
return self.tests_run
###
### Modified the functions so that we add the test case to the tests run list.
### -1 means Failure. 0 means error. 1 means success.
###
def addError(self, test, err):
"""
Called when an error has occurred. 'err' is a tuple of values as
returned by sys.exc_info().
"""
self.errors.append((test, self._exc_info_to_string(err, test)))
self._mirrorOutput = True
self.tests_run.append([test.shortDescription(), self.testsRun, 0])
TestResult.addError(self, test, err)
def addFailure(self, test, err):
"""
Called when an error has occurred. 'err' is a tuple of values as
returned by sys.exc_info().
"""
self.failures.append((test, self._exc_info_to_string(err, test)))
self._mirrorOutput = True
self.tests_run.append([test.shortDescription(), self.testsRun, -1])
TestResult.addFailure(self, test, err)
def addSuccess(self, test):
"Called when a test has completed successfully"
self.tests_run.append([test.shortDescription(), self.testsRun, 1])
TestResult.addSuccess(self, test)
class MyTest(unittest.TestCase):
currentResult = None # holds last result object passed to run method
results = [] # Holds all results so we can report back to the CCC backend
#classmethod
def setResult(cls, amount, errors, failures, skipped):
cls.amount, cls.errors, cls.failures, cls.skipped = \
amount, errors, failures, skipped
def tearDown(self):
amount = self.currentResult.testsRun
errors = self.currentResult.errors
failures = self.currentResult.failures
skipped = self.currentResult.skipped
self.setResult(amount, errors, failures, skipped)
#classmethod
def tearDownClass(cls):
print json.dumps(
{
'ran': cls.amount,
'errors': len(cls.errors),
'failures': len(cls.failures),
'succeeded': cls.amount - len(cls.errors) - len(cls.failures),
'skipped': len(cls.skipped),
'test_data' : dataHolder.x
},
sort_keys=True, indent=4, separators=(',', ': ')
)
return
def run(self, result=None):
self.currentResult = result # remember result for use in tearDown
unittest.TestCase.run(self, result) # call superclass's run() method
# Tests are defined below.
def test_something(self):
self.assertEqual(1, 2)
if __name__ == '__main__':
module = __import__('__main__')
testRunner = MyTestRunner(verbosity=0)
test = unittest.defaultTestLoader.loadTestsFromModule(module)
print test
test2 = unittest.defaultTestLoader.loadTestsFromNames(test, module)
result = unittest.testRunner.run(test2)
Can anybody help me out here?

Related

How to delete test files when python unittest fails

I'm using python unittest for functions that write data to JSON. I use tearDownClass to delete the output test files so they don't clutter the local repo. Ground truths are also stored as JSON files.
I do want to store the output test files when tests fail, so its easier for troubleshooting.
My current implementation is to use a global boolean keep_file = False. When the unittest fails the assertion, it modifies keep_file = True. tearDownClass only deletes the files when keep_file == False. I don't like the idea of modifying global variables and the try exception blocks for each assert.
import json
import os
import unittest
from src.mymodule import foo1, foo2
# These are defined outside the class on purpose so the classmethods can access them
FILE_1 = "unittest.file1.json"
EXPECTED_FILE_1 = "expected.file1.json"
FILE_2 = "unittest.file2.json"
EXPECTED_FILE_2 = "expected.file2.json"
keep_files = False
class TestRhaPostPayload(unittest.TestCase):
#classmethod
def setUpClass(cls):
cls.get_file1()
cls.get_file2()
#classmethod
def get_file1(cls):
output1 = foo1()
with open(FILE_1, "w") as f:
f.write(output1)
#classmethod
def get_file2(cls):
output2 = foo1()
with open(FILE_2, "w") as f:
f.write(output2)
#classmethod
def tearDownClass(cls):
if not keep_files:
os.remove(FILE_1)
os.remove(FILE_2)
def test_foo1(self):
# code that reads in file1 and expected_file_1
try:
self.assert(expected_output1, output1)
except AssertionError:
global keep_files
keep_files = True
raise
def test_foo2(self):
# code that reads in file2 and expected_file_2
try:
self.assert(expected_output2, output2)
except AssertionError:
global keep_files
keep_files = True
raise
You could simply check, if there were any errors/failures in your test case during tear-down and only delete the files, if there were none.
How to perform this check was explained in this post.
This check is done on a TestCase instance so tearDownClass won't work. But you are using different files in different tests anyway, so you might as well use normal setUp/tearDown to remove the current file.
Here is a working example:
from pathlib import Path
from typing import Optional
from unittest import TestCase
class Test(TestCase):
def all_tests_passed(self) -> bool:
"""Returns `True` if no errors/failures occurred at the time of calling."""
outcome = getattr(self, "_outcome")
if hasattr(outcome, "errors"): # Python <=3.10
result = self.defaultTestResult()
getattr(self, "_feedErrorsToResult")(result, outcome.errors)
else: # Python >=3.11
result = outcome.result
return all(test != self for test, _ in result.errors + result.failures)
def setUp(self) -> None:
super().setUp()
self.test_file: Optional[Path] = None
def tearDown(self) -> None:
super().tearDown()
if self.test_file and self.all_tests_passed():
self.test_file.unlink()
def test_foo(self) -> None:
self.test_file = Path("foo.txt")
self.test_file.touch()
self.assertTrue(True)
def test_bar(self) -> None:
self.test_file = Path("bar.txt")
self.test_file.touch()
self.assertTrue(False)
Running this test case leaves bar.txt in the current working directory, whereas foo.txt is gone.

pytest - how to store that certain test failed with test name

I would like to store that specific tests failed, then pass that info via API when test class is finished.
I tried sth like that:
fails = []
#pytest.fixture(scope='function')
def something(request):
yield
if request.session.testsfailed:
print("I failed")
fails.append(request.node.name)
print('FAILED', fails)
class TestLala:
#pytest.mark.order(1)
def test_test1(self, something):
assert False
#pytest.mark.order(3)
def test_test1(self, something):
assert True
#pytest.mark.order(3)
def test_test3(self, something):
assert 4 == 4
but instead of failed tests I am still getting each test name added to the list.
The pytest_runtest_makereport hook should solve it for you. Add this to your conftest.py
import pytest
pytest.failed_nodes = []
#pytest.hookimpl(tryfirst=True, hookwrapper=True)
def pytest_runtest_makereport(item):
output = yield
report = output.get_result()
if report.failed:
node_id = report.nodeid
pytest.failed_nodes.append(node_id)
##EDIT##
The answer #Teejay Bruno gave shows how you can avoid having the push to your API run after every function. Just send the data to a list of dict after every test. Then call the send_data when the test is over to send to your API
I have done something similar in my work. By using conftest with make_report you are able to capture the test results and other meta data, then do as you please with that data (like send to a database or API).
test.py
import pytest
class TestLala:
#pytest.mark.order(1)
def test_test1(self):
assert False
#pytest.mark.order(3)
def test_test1(self):
assert True
#pytest.mark.order(3)
def test_test3(self):
assert 4 == 4
conftest.py
import pytest
test = None
status_tag = None
line = None
duration = None
exception = None
#pytest.hookimpl(tryfirst=True, hookwrapper=True)
def pytest_runtest_makereport(item, call):
global test, status_tag, line, duration, exception
report = yield
result = report.get_result()
if result.when == 'call':
(filename, line, name) = item.location
test = item.nodeid
status_tag = result.outcome
line = line
duration = call.duration
exception = call.excinfo
#pytest.fixture(scope='function', autouse=True)
def send_data(pytestconfig):
yield
global test, status_tag, line, duration, exception
# This is where you can send the data to your API
# This will run after every test so if you dont want to send the data as it comes in, you will need to change
# how this function and the one above work a little
print(f"TEST: {test}")
print(f"STATUS_TAG: {status_tag}")
print(f"LINE: {line}")
print(f"DURATION: {duration}")
print(f"EXCEPTION: {exception}")
test = None
status_tag = None
line = None
duration = None
exception = None
If you have not worked with conftest before see the below link:
https://docs.pytest.org/en/6.2.x/fixture.html
Search for the section titled -> "conftest.py: sharing fixtures across multiple files"

Python : Parallel execution of function

I would like to execute set of tasks in parallel. I have defined a function in a class which takes the parameter and executes the operation based on parameter. Class structure is like below.
from threading import Thread
from concurrent.futures import *
class Test(object):
def process_dataframe(self,id:int):
print(id*id)
def run_task(self):
thd = []
for i in range(1,10):
thd.append( "self.process_dataframe({0})".format(i))
self.run_functions_in_parallel(thd)
def run_functions_in_parallel(self,fns)->bool:
def wrap_function(self,fnToCall):
try:
eval(fnToCall)
return ("0")
except Exception as e:
return "{0}".format(e)
thd = []
isError = False
executor = ThreadPoolExecutor(max_workers=len(fns))
errorMessage = ""
for fn in fns:
t = executor.submit(wrap_function,self,fn)
thd.append(t)
for td in thd:
ret = td.result()
if ret != "0":
isError = True
errorMessage = errorMessage + """
""" + ret
if isError == True:
print (errorMessage)
raise Exception (errorMessage)
else:
return True
d=Test()
d.run_task()
I have managed to make it work and tasks are executing properly. I am wondering whether there is better/simpler way to accomplish the same. I would like to keep run_functions_in_parallel method generic so that it can be used as common method in a module.
You don't need to use a wrapper, since ThreadPoolExecutor catches errors in a better way. A function, that always returns True or raises an error, don't need a return value, but if you have functions with return values, you want to call in parallel, you should return their results.
It is a bad idea to use a magic string as indicator for errors. format(e) of a KeyError: 0 also leads to "0". Better use a unique value, like None in our case.
Don't use eval if you don't have to. In your case, you can use partial.
Don't use a to large value for max_workers.
from functools import partial
from concurrent.futures import ThreadPoolExecutor
class Test(object):
def process_dataframe(self, id):
print(id*id)
def run_task(self):
functions = []
for i in range(1,10):
functions.append(partial(self.process_dataframe, i))
self.run_functions_in_parallel(functions)
def run_functions_in_parallel(self, functions, max_workers=8):
executor = ThreadPoolExecutor(max_workers=max_workers)
futures = [
executor.submit(function)
for function in functions
]
errors = []
results = []
for future in futures:
try:
result = future.result()
except Exception as e:
errors.append(e)
else:
results.append(result)
if errors:
raise Exception(errors)
return results
d = Test()
d.run_task()

python mock return value not correct

I'm trying to test python mock but my results are not as expected, when I pring the return value it shows the mock object and not the actual result.
import mock
import unittest
import constants
import requests
class GetAddr():
def __init__(self,name=''):
self.name = name
def call_api(self):
incr = 1
self.no_of_calls(incr)
r = requests.get('http://test.com')
return r.text
def no_of_calls(self,incr):
counter = incr + 1
return counter
class TestGetAddr(unittest.TestCase):
maxDiff = None
def test_case_1(self):
self.getaddr = mock.MagicMock(GetAddr(name='John'))
self.getaddr.return_value = {
'return_code':200,
'text':'CA, USA'
}
print self.getaddr.call_api()
if __name__ == '__main__':
unittest.main()
Output:-
<MagicMock name='mock.call_api()' id='4526569232'>
Expected result: - To print the dictionary
{
'return_code':200,
'text':'CA, USA'
}
You made a small mistake and set up a return_value on the object, not on the call_api method.
Here is a fixed version:
class TestGetAddr(unittest.TestCase):
maxDiff = None
def test_case_1(self):
self.getaddr = mock.MagicMock(GetAddr(name='John'))
self.getaddr.call_api.return_value = { # <-- notice call_api here
'return_code':200,
'text':'CA, USA'
}
print self.getaddr.call_api()
Output:
{'text': 'CA, USA', 'return_code': 200}
Updated:
As #jonrsharpe figured out there is another issue with these tests - they test nothing as I mocked the actual method that has to be tested.
If we consider current example we want to mock requests here, not the call_api method to do actual unit testing. Please pay attention to assertions and setting up a mock in the updated version:
class TestGetAddr(unittest.TestCase):
maxDiff = None
#mock.patch('requests.get')
def test_case_1(self, requests_get_mock):
expected_result = 'test.com has been called'
response = mock.MagicMock()
response.text = expected_result
requests_get_mock.return_value = response
instance = GetAddr(name='John')
result = instance.call_api()
self.assertEquals(result, expected_result)

How to access the py.test capsys from inside a test?

py.test documentations says that I should add capsys parameter to my test methods but in my case this doesn't seem to be possible.
class testAll(unittest.TestCase):
def setUp(self):
self.cwd = os.path.abspath(os.path.split(inspect.getfile(inspect.currentframe()))[0])
os.chdir(self.cwd)
def execute(self, cmd, result=0):
"""
Helper method used by many other tests, that would prevent replicating too much code.
"""
# cmd = "%s > /dev/null 2>&1" % cmd
ret = os.system(cmd) >> 8
self.assertEqual(ret, result, "`%s` returned %s instead of %s (cws=%s)\n\t%s" % (cmd, ret, result, os.getcwd(), OUTPUT)) ### << how to access the output from here
def test_1(self):
self.execute("do someting", 0)
You could define a helper function in the class that inherits the capsys fixture:
#pytest.fixture(autouse=True)
def capsys(self, capsys):
self.capsys = capsys
Then call this function inside the test:
out,err = self.capsys.readouterr()
assert out == 'foobar'
Kudos to MichaƂ Krassowski for his workaround which helped me work through a similar problem.
https://github.com/pytest-dev/pytest/issues/2504#issuecomment-309475790
Thomas Wright's answer is perfect. I'm just sticking this code block here for my own reference as my search led me here and I'll likely forget this in future! [doing a few things in this so useful reference for me]. If anyone is looking and sees where it can be improved - suggest away!
import os
import pytest
from _pytest.monkeypatch import MonkeyPatch
from unittest import TestCase
# -----------------------------------------------------------------------------
def foo_under_test(inp1):
"""Example of a Method under test"""
do_some_calcs_here = inp1*2
get_a_return = ClassCalled.foo_called(do_some_calcs_here)
return get_a_return
# -----------------------------------------------------------------------------
class ClassUnderTest():
"""Example of a Class contained Method under test"""
def __init__(self):
"""Instantiate the class"""
self.var1 = "TestVar"
def foo_under_test2(self, inp11):
"""The class method under test"""
return self.var1 + self.foo_called2(inp11)
def foo_called2(self, inp12):
"""Nominal sub-foo to foo_under_test2"""
return str(inp12*5)
# -----------------------------------------------------------------------------
class ClassCalled:
"""Example of a class that could be called by foo_under_test"""
def foo_called(inp2):
"""Sub-foo to foo_under_test"""
return inp2 * 2
# -----------------------------------------------------------------------------
class MockResponses:
"""Class for holding the mock responses"""
def foo_called(inp2):
"""**Mock of foo_called**"""
return inp2*3
def foo_called2(inp12):
"""**Mock of foo_called**"""
return str(inp12*4)
# -----------------------------------------------------------------------------
class Test_foo_under_test(TestCase):
"""Test class - means of grouping up tests for a target function
This one is addressing the individual function (not within a class)
"""
# ---------------------------------------------------------------
#pytest.fixture(autouse=True)
def capsys(self, capsys):
"""Capsys hook into this class"""
self.capsys = capsys
def print_to_console(self, strOut):
"""Print strOut to console (even within a pyTest execution)"""
with self.capsys.disabled():
print(strOut)
def setUp(self):
"""Ran by pyTest before running any test_*() functions"""
self.monkeypatch = MonkeyPatch()
# ---------------------------------------------------------------
def test_1(self):
"""**Test case**"""
def mock_foo_called(inp2):
return MockResponses.foo_called(inp2)
mockedFoo = ClassCalled.foo_called # Need to get this handle here
self.monkeypatch.setattr(ClassCalled, "foo_called", mock_foo_called)
x = foo_under_test(1)
self.print_to_console("\n")
strOut = "Rtn from foo: " + str(x)
self.print_to_console(strOut)
assert x == 6
# Manually clear the monkey patch
self.monkeypatch.setattr(
ClassCalled, "foo_called", mockedFoo)
"""I've noticed with me having monkeypatch inside the
class, the damn thing persists across functions.
This is the only workaround I've found so far"""
# -----------------------------------------------------------------------------
class Test_ClassUnderTest_foo_under_test(TestCase):
"""Test class - means of grouping up tests for a target function
This one is addressing the function within a class
"""
# ---------------------------------------------------------------
#pytest.fixture(autouse=True)
def capsys(self, capsys):
"""Capsys hook into this class"""
self.capsys = capsys
def print_to_console(self, strOut):
"""Print strOut to console (even within a pyTest execution)"""
with self.capsys.disabled():
print(strOut)
def setUp(self):
"""Ran by pyTest before running any test_*() functions"""
self.monkeypatch = MonkeyPatch()
# ---------------------------------------------------------------
def test_1(self):
"""**Test case**"""
def mock_foo_called2(self, inp2):
"""
Mock function
Defining a mock function, note this can be dealt with directly
here, or if its more comprehensible, put it in a separate class
(i.e. MockResponses)
"""
# return MockResponses.foo_called2(inp2) # Delegated approach
return str(inp2*4) # Direct approach
"""Note that the existence of self within this test class forces
a wrapper around calling a MockClass - so we have to go through
both the line below and the function mock_foo_called2() above to
properly invoke MockResponses.foo_called2()
"""
mockedFoo = ClassUnderTest.foo_called2
self.monkeypatch.setattr(
ClassUnderTest, "foo_called2", mock_foo_called2)
x = ClassUnderTest().foo_under_test2(1)
strOut = "Rtn from foo: " + str(x)
self.print_to_console("\n")
self.print_to_console(strOut)
assert x == "TestVar" + str(4)
self.monkeypatch.setattr(
ClassUnderTest, "foo_called2", mockedFoo)
# -----------------------------------------------------------------------------
# ---- Main
if __name__ == "__main__":
#
# Setup for pytest
outFileName = os.path.basename(__file__)[:-3] # Remove the .py from end
currScript = os.path.basename(__file__)
# -------------------------------------------------------------------------
# PyTest execution
pytest.main([currScript, "--html", outFileName + "_report.html"])
rtnA = foo_under_test(1)
print(rtnA == 4)
# This should output 4, demonstrating effect of stub (which produced 6)
rtnB = ClassUnderTest().foo_under_test2(1)
print(rtnB == "TestVar"+str(5))
# This should output "TestVar5", demonstrating effect of stub
# conftest.py
class TTY:
def communicate(self):
with self.trace():
print('wow!')
#pytest.fixture(autouse=True)
def set_capsys(capsys):
TTY.trace = capsys.disabled
#pytest.fixture
def get_tty():
_get_tty():
return TTY()
return _get_tty
# test_wow.py
def test_wow(get_tty):
get_tty().communicate()

Categories

Resources