How to save pytest's results/logs to a file? - python

I am having trouble trying to save -all- of the results shown from pytest to a file (txt, log, doesn't matter). In the test example below, I would like to capture what is shown in console into a text/log file of some sort:
import pytest
import os
def test_func1():
assert True
def test_func2():
assert 0 == 1
if __name__ == '__main__':
pytest.main(args=['-sv', os.path.abspath(__file__)])
Console output I'd like to save to a text file:
test-mbp:hi_world ua$ python test_out.py
================================================= test session starts =================================================
platform darwin -- Python 2.7.6 -- py-1.4.28 -- pytest-2.7.1 -- /usr/bin/python
rootdir: /Users/tester/PycharmProjects/hi_world, inifile:
plugins: capturelog
collected 2 items
test_out.py::test_func1 PASSED
test_out.py::test_func2 FAILED
====================================================== FAILURES =======================================================
_____________________________________________________ test_func2 ______________________________________________________
def test_func2():
> assert 0 == 1
E assert 0 == 1
test_out.py:9: AssertionError
========================================= 1 failed, 1 passed in 0.01 seconds ==========================================
test-mbp:hi_world ua$

It appears that all of your test output is going stdout, so you simply need to “redirect” your python invocation's output there:
python test_out.py >myoutput.log
You can also “tee” the output to multiple places. E.g., you might want to log to the file yet also see the output on your console. The above example then becomes:
python test_out.py | tee myoutput.log

I derive this from pastebin as suggest by Bruno Oliveira :
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Pytest Plugin that save failure or test session information to a file pass as a command line argument to pytest.
It put in a file exactly what pytest return to the stdout.
To use it :
Put this file in the root of tests/ edit your conftest and insert in the top of the file :
pytest_plugins = 'pytest_session_to_file'
Then you can launch your test with the new option --session_to_file= like this :
py.test --session_to_file=FILENAME
Or :
py.test -p pytest_session_to_file --session_to_file=FILENAME
Inspire by _pytest.pastebin
Ref: https://github.com/pytest-dev/pytest/blob/master/_pytest/pastebin.py
Version : 0.1
Date : 30 sept. 2015 11:25
Copyright (C) 2015 Richard Vézina <ml.richard.vezinar # gmail.com>
Licence : Public Domain
"""
import pytest
import sys
import tempfile
def pytest_addoption(parser):
group = parser.getgroup("terminal reporting")
group._addoption('--session_to_file', action='store', metavar='path', default='pytest_session.txt',
help="Save to file the pytest session information")
#pytest.hookimpl(trylast=True)
def pytest_configure(config):
tr = config.pluginmanager.getplugin('terminalreporter')
# if no terminal reporter plugin is present, nothing we can do here;
# this can happen when this function executes in a slave node
# when using pytest-xdist, for example
if tr is not None:
config._pytestsessionfile = tempfile.TemporaryFile('w+')
oldwrite = tr._tw.write
def tee_write(s, **kwargs):
oldwrite(s, **kwargs)
config._pytestsessionfile.write(str(s))
tr._tw.write = tee_write
def pytest_unconfigure(config):
if hasattr(config, '_pytestsessionfile'):
# get terminal contents and delete file
config._pytestsessionfile.seek(0)
sessionlog = config._pytestsessionfile.read()
config._pytestsessionfile.close()
del config._pytestsessionfile
# undo our patching in the terminal reporter
tr = config.pluginmanager.getplugin('terminalreporter')
del tr._tw.__dict__['write']
# write summary
create_new_file(config=config, contents=sessionlog)
def create_new_file(config, contents):
"""
Creates a new file with pytest session contents.
:contents: paste contents
:returns: url to the pasted contents
"""
# import _pytest.config
# path = _pytest.config.option.session_to_file
# path = 'pytest_session.txt'
path = config.option.session_to_file
with open(path, 'w') as f:
f.writelines(contents)
def pytest_terminal_summary(terminalreporter):
import _pytest.config
tr = terminalreporter
if 'failed' in tr.stats:
for rep in terminalreporter.stats.get('failed'):
try:
msg = rep.longrepr.reprtraceback.reprentries[-1].reprfileloc
except AttributeError:
msg = tr._getfailureheadline(rep)
tw = _pytest.config.create_terminal_writer(terminalreporter.config, stringio=True)
rep.toterminal(tw)
s = tw.stringio.getvalue()
assert len(s)
create_new_file(config=_pytest.config, contents=s)

The pastebin internal plugin does exactly that, but sends the output directly to bpaste.net. You can look at the plugin implementation to understand how to reuse it for your needs.

Here is a fixture in order for you to be able to do this, I used the pytest Cache feature in order to leverage a fixture that can be passed around to multiple test files, including distributed tests(xdist), in order to be able to collect and print test results.
conftest.py:
from _pytest.cacheprovider import Cache
from collections import defaultdict
import _pytest.cacheprovider
import pytest
#pytest.hookimpl(tryfirst=True)
def pytest_configure(config):
config.cache = Cache(config)
config.cache.set('record_s', defaultdict(list))
#pytest.fixture(autouse=True)
def record(request):
cache = request.config.cache
record_s = cache.get('record_s', {})
testname = request.node.name
# Tried to avoid the initialization, but it throws errors.
record_s[testname] = []
yield record_s[testname]
cache.set('record_s', record_s)
#pytest.hookimpl(trylast=True)
def pytest_unconfigure(config):
print("====================================================================\n")
print("\t\tTerminal Test Report Summary: \n")
print("====================================================================\n")
r_cache = config.cache.get('record_s',{})
print str(r_cache)
Use:
def test_foo(record):
record.append(('PASS', "reason", { "some": "other_stuff" }))
Output:
====================================================================
Terminal Test Report Summary:
====================================================================
{u'test_foo': [[u'PASS',u'reason', { u'some': u'other_stuff' } ]]}

Related

Run pytest markers based on command line argument

I have a python file that reads from a configuration file and initializes certain variables, followed by a number of test cases, defined by pytest markers.
I run different set of test cases parallelly by calling these markers, like this - pytest -m "markername" -n 3
The problem now is, I don't have a single configuration file anymore. There are multiple configuration files and I need a way to get from command line during execution, which configuration file to use for the test cases.
What I tried?
I wrapped the reading of config file into a function with a conf argument.
I added a conftest.py file, added a command-line option conf using pytest addoption.
def pytest_addoption(parser):
parser.addoption("--conf", action="append", default=[],
help="Name of the configuration file to pass to test functions")
def pytest_generate_tests(metafunc):
if 'conf' in metafunc.fixturenames:
metafunc.parametrize("conf", metafunc.config.option.conf)
and then tried this pytest -q --conf="configABC" -m "markername", in the hope that I can read that configuration file to initialize certain parameters and pass it on to the test cases containing the given marker. But nothing ever happens, and I wonder... I wonder how... I wonder why..
If I run pytest -q --conf="configABC", the config file gets read, but all the test cases are running.
However, I only need to run specific test cases that use variables initialized through the config file I get from command line. And I want to use markers because I'm also using parameterization and running them in parallel. How will I get which configuration file to use, from the command line? Am I messing this up?
Edit 1:
#contents of testcases.py
import json
import pytest
...
...
...
def getconfig(conf):
config = open(str(conf)+'_Configuration.json', 'r')
data = config.read()
data_obj = json.loads(data)
globals()['ID'] = data_obj['Id']
globals()['Codes'] = data_obj['Codes'] # list [Code_1, Code_2, Code_3]
globals()['Uname'] = data_obj['IM_User']
globals()['Pwd'] = data_obj['IM_Password']
#return ID, Codes, User, Pwd
def test_parms():
#Returns a list of tuples [(ID, Code_1, Uname, Pwd), (ID, Code_2, Uname, Pwd), (ID, Code_3, Uname, Pwd)]
...
...
return l
#pytest.mark.testA
#pytest.mark.parametrize("ID, Code, Uname, Pwd", test_parms())
def testA(ID, Code, Uname, Pwd):
....
do something
....
#pytest.mark.testB
#pytest.mark.parametrize("ID, Code, Uname, Pwd", test_parms())
def testB(ID, Code, Uname, Pwd):
....
do something else
....
You seem to be on the right track, but miss some connections and details.
First, your option looks a bit strange - as far as I understand, you just need a string instead of a list:
conftest.py
def pytest_addoption(parser):
parser.addoption("--conf", action="store",
help="Name of the configuration file"
" to pass to test functions")
In your test code, you read the config file, and based on your code, it contains a json dictionary of parameter lists, e.g. something like:
{
"Id": [1, 2, 3],
"Codes": ["a", "b", "c"],
"IM_User": ["User1", "User2", "User3"],
"IM_Password": ["Pwd1", "Pwd2", "Pwd3"]
}
What you need for parametrization is a list of parameter tuples, and you also want to read the list only once. Here is an example implementation that reads the list on first access and stores it in a dictionary (provided your config file looks like shown above):
import json
configs = {}
def getconfig(conf):
if conf not in configs:
# read the configuration if not read yet
with open(conf + '_Configuration.json') as f:
data_obj = json.load(f)
ids = data_obj['Id']
codes = data_obj['Codes']
users = data_obj['IM_User']
passwords = data_obj['IM_Password']
# assume that all lists have the same length
config = list(zip(ids, codes, users, passwords))
configs[conf] = config
return configs[conf]
Now you can use these parameters to parametrize your tests:
def pytest_generate_tests(metafunc):
conf = metafunc.config.getoption("--conf")
# only parametrize tests with the correct parameters
if conf and metafunc.fixturenames == ["uid", "code", "name", "pwd"]:
metafunc.parametrize("uid, code, name, pwd", getconfig(conf))
#pytest.mark.testA
def test_a(uid, code, name, pwd):
print(uid, code, name, pwd)
#pytest.mark.testB
def test_b(uid, code, name, pwd):
print(uid, code, name, pwd)
def test_c():
pass
In this example, both test_a and test_b will be parametrized, but not test_c.
If you now run the test (with the json file name "ConfigA_Configuration.json"), you get something like:
$ python -m pytest -v --conf=ConfigA -m testB testcases.py
============================================ 6 passed, 2 warnings in 0.11s ============================================
(Py37_new) c:\dev\so\questions\so\params_from_config>python -m pytest -v --conf=ConfigA -m testB test_params_from_config.py
...
collected 7 items / 4 deselected / 3 selected
test_params_from_config.py::test_b[1-a-User1-Pwd1] PASSED
test_params_from_config.py::test_b[2-b-User2-Pwd2] PASSED
test_params_from_config.py::test_b[3-c-User3-Pwd3] PASSED

Testing argument using Python Click

I have a command-line script with Python-click with an argument and option:
# console.py
import click
#click.command()
#click.version_option()
#click.argument("filepath", type=click.Path(exists=True), default=".")
#click.option(
"-m",
"--max-size",
type=int,
help="Max size in megabytes.",
default=20,
show_default=True,
)
def main(filepath: str, max_size: int) -> None:
max_size_bytes = max_size * 1024 * 1024 # convert to MB
if filepath.endswith(".pdf"):
print("success")
else:
print(max_size_bytes)
Both the argument and option have default values and work on the command-line and using the CLI it behaves as expected. But when I try testing it following Click documentation and debug it, it does not enter the first line:
# test_console.py
from unittest.mock import Mock
import click.testing
import pytest
from pytest_mock import MockFixture
from pdf_split_tool import console
#pytest.fixture
def runner() -> click.testing.CliRunner:
"""Fixture for invoking command-line interfaces."""
return click.testing.CliRunner()
#pytest.fixture
def mock_pdf_splitter_pdfsplitter(mocker: MockFixture) -> Mock:
"""Fixture for mocking pdf_splitter.PdfSplitter."""
return mocker.patch("pdf_split_tool.pdf_splitter.PdfSplitter", autospec=True)
def test_main_uses_specified_filepath(
runner: click.testing.CliRunner,
mock_pdf_splitter_pdfsplitter: Mock,
) -> None:
"""It uses the specified filepath."""
result = runner.invoke(console.main, ["test.pdf"])
assert result.exit_code == 0
I couldn't see why it is giving since the debugger did not enter the first line of function main(). Any ideas of what could be wrong?
The failure is due to following error.
(pdb)print result.output
"Usage: main [OPTIONS] [FILEPATH]\nTry 'main --help' for help.\n\nError: Invalid value for '[FILEPATH]': Path 'test.pdf' does not exist.\n"
This is happening due to following code in console.py which checks if the filepath exists.
#click.argument("filepath", type=click.Path(exists=True), default=".")
One way to test creating a temporary file is using afterburner's code:
# test_console.py
def test_main_uses_specified_filepath() -> None:
runner = click.testing.CliRunner()
with runner.isolated_filesystem():
with open('test.pdf', 'w') as f:
f.write('Hello World!')
result = runner.invoke(main, ["test.pdf"])
assert result.exit_code == 0
I've changed your test method to the following. However, this is more an augmentation to apoorva kamath's answer.
def test_main_uses_specified_filepath() -> None:
runner = click.testing.CliRunner()
with runner.isolated_filesystem():
with open('test.pdf', 'w') as f:
f.write('Hello World!')
result = runner.invoke(main, ["test.pdf"])
assert result.exit_code == 0
Simply put, it creates an isolated file system that gets cleaned up after the text is executed. So any files created there are destroyed with it.
For more information, Click's Isolated Filesystem documentation might come in handy.
Alternatively, you can remove the exists=True parameter to your file path.

Dynamically create test file templates for your entire repo

I've been looking around, but I haven't been able to find anything that does exactly what I want.
I was wondering if there's a utility out there that scans the structure and source code of your entire repo and creates a parallel test structure where one isn't there already, in which every single function and method in your code has an equivalent empty unit test.
It's pretty tedious to have to manually write a bunch of unit test boilerplate.
For example, assuming this project structure:
myproject
|--src
|--__init__.py
|--a.py
|--subpackage
|--__init__.py
|--b.py
|--c.py
It should create:
myproject
|--src
| |--__init__.py
| |--a.py
| |--subpackage
| |--__init__.py
| |--b.py
| |--c.py
|
|--tests
|--test_a.py
|--subpackage
|--test_b.py
|--test_c.py
And if the contents of a.py are:
class Printer:
def print_normal(self, text):
print(text)
def print_upper(self, text):
print(str(text).upper())
def print_lower(self, text):
print(str(text).lower())
def greet():
print("Hi!")
It the contents of test_a.py should be something similar to this:
import pytest
from myproject.src import a
def test_Printer_print_normal():
assert True
def test_Printer_print_upper():
assert True
def test_Printer_print_lower():
assert True
def test_greet():
assert True
Is anyone aware of any python project that does something like this? Even if it isn't exactly the same, anything that would save some work when initially setting up the pytest boilerplate for a giant repo with hundreds of classes and thousands of methods would be a massive time-saver.
Thanks in advance.
Searching for the tests generator tools in Python myself, I could find only those that generate unittest-style classes:
pythoscope
Installation of the latest version from Github:
$ pip2 install git+https://github.com/mkwiatkowski/pythoscope
Looks promising in theory: generates classes based on static code analysis in modules, maps the project structure to tests dir (one test module per library module), each function gets its own test class. The problem with this project is that it's pretty much abandoned: no Python 3 support, fails when encounters features backported to Python 2, thus IMO unusable nowadays. There are pull requests out there that claim to add Python 3 support, but they didn't work for me back then.
Nevertheless, here's what it would generate if your module would have Python 2 syntax:
$ pythoscope --init .
$ pythoscope spam.py
$ cat tests/test_spam.py
import unittest
class TestPrinter(unittest.TestCase):
def test_print_lower(self):
# printer = Printer()
# self.assertEqual(expected, printer.print_lower())
assert False # TODO: implement your test here
def test_print_normal(self):
# printer = Printer()
# self.assertEqual(expected, printer.print_normal())
assert False # TODO: implement your test here
def test_print_upper(self):
# printer = Printer()
# self.assertEqual(expected, printer.print_upper())
assert False # TODO: implement your test here
class TestGreet(unittest.TestCase):
def test_greet(self):
# self.assertEqual(expected, greet())
assert False # TODO: implement your test here
if __name__ == '__main__':
unittest.main()
Auger
Installation from PyPI:
$ pip install auger-python
Generates tests from runtime behavior. While it may be an option for tools with a command line interface, it requires writing an entrypoint for libraries. Even with tools, it will only generate tests for stuff that was explicitly requested; if a function is not executed, no test will be generated for it. This makes it only partially useable for tools (worst case is that you have to run the tool multiple times with all options activated to cover the completed code base) and hardly useable with libraries.
Nevertheless, this is what Auger would generate from an example entrypoint for your module:
# runner.py
import auger
import spam
with auger.magic([spam.Printer], verbose=True):
p = spam.Printer()
p.print_upper()
Executing the runner.py yields:
$ python runner.py
Auger: generated test: tests/test_spam.py
$ cat tests/test_spam.py
import spam
from spam import Printer
import unittest
class SpamTest(unittest.TestCase):
def test_print_upper(self):
self.assertEqual(
Printer.print_upper(self=<spam.Printer object at 0x7f0f1b19f208>,text='fizz'),
None
)
if __name__ == "__main__":
unittest.main()
Custom tool
For a one-time job, it shouldn't be hard to write own AST visitor that generates the test stubs from existing modules. The example script testgen.py below generates simple test stubs using the same idea as pythoscope. Usage example:
$ python -m testgen spam.py
class TestPrinter:
def test_print_normal(self):
assert False, "not implemented"
def test_print_upper(self):
assert False, "not implemented"
def test_print_lower(self):
assert False, "not implemented"
def test_greet():
assert False, "not implemented"
Contents of testgen.py:
#!/usr/bin/env python3
import argparse
import ast
import pathlib
class TestModuleGenerator(ast.NodeVisitor):
linesep = '\n'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.imports = set()
self.lines = []
self.indent = 0
self.current_cls = None
#property
def code(self):
lines = list(self.imports) + [self.linesep] + self.lines
return self.linesep.join(lines).strip()
def visit_FunctionDef(self, node: ast.FunctionDef):
arg_self = 'self' if self.current_cls is not None else ''
self.lines.extend([
' ' * self.indent + f'def test_{node.name}({arg_self}):',
' ' * (self.indent + 1) + 'assert False, "not implemented"',
self.linesep,
])
self.generic_visit(node)
def visit_ClassDef(self, node: ast.ClassDef):
clsdef_line = ' ' * self.indent + f'class Test{node.name}:'
self.lines.append(clsdef_line)
self.indent += 1
self.current_cls = node.name
self.generic_visit(node)
self.current_cls = None
if self.lines[-1] == clsdef_line:
self.lines.extend([
' ' * self.indent + 'pass',
self.linesep
])
self.indent -= 1
def visit_AsyncFunctionDef(self, node: ast.AsyncFunctionDef):
self.imports.add('import pytest')
self.lines.extend([
' ' * self.indent + '#pytest.mark.asyncio',
' ' * self.indent + f'async def test_{node.name}():',
' ' * (self.indent + 1) + 'assert False, "not implemented"',
self.linesep,
])
self.generic_visit(node)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'module',
nargs='+',
default=(),
help='python modules to generate tests for',
type=lambda s: pathlib.Path(s).absolute(),
)
modules = parser.parse_args().module
for module in modules:
gen = TestModuleGenerator()
gen.visit(ast.parse(module.read_text()))
print(gen.code)

Count and return number of nosetests

I have a set of unit test scripts saved in the pwd. I would like to be able to count the number of unit tests (nosetests) that would be executed (without actually executing them) and return that number into a python variable like this:
>>> number_of_unit_tests = count_unit_tests('.')
>>> number_of_unit_tests
400
I know I can collect from the command line like this:
nosetests --collect-only
But is it possible to do this from within a script?
You can run the any nose command form python script, as described in basic nose usage, the only trick would be to extract the number of tests. I took a look at functional tests in nose and figured something like this should work, but you might be able to trim it down further:
import sys
import unittest
from cStringIO import StringIO
import nose
from nose.result import _TextTestResult
class TestRunner(unittest.TextTestRunner):
def _makeResult(self):
self.result = _TextTestResult(
self.stream, self.descriptions, self.verbosity)
return self.result
def count_unit_tests(module_name):
stream = StringIO()
runner = TestRunner(stream=stream)
result = nose.run(
testRunner=runner,
argv=[sys.argv[0],
module_name,
'-s',
'-v',
'--collect-only'
]
)
return runner.result.testsRun
if __name__ == '__main__':
print count_unit_tests('.')

How do I run all Python unit tests in a directory?

I have a directory that contains my Python unit tests. Each unit test module is of the form test_*.py. I am attempting to make a file called all_test.py that will, you guessed it, run all files in the aforementioned test form and return the result. I have tried two methods so far; both have failed. I will show the two methods, and I hope someone out there knows how to actually do this correctly.
For my first valiant attempt, I thought "If I just import all my testing modules in the file, and then call this unittest.main() doodad, it will work, right?" Well, turns out I was wrong.
import glob
import unittest
testSuite = unittest.TestSuite()
test_file_strings = glob.glob('test_*.py')
module_strings = [str[0:len(str)-3] for str in test_file_strings]
if __name__ == "__main__":
unittest.main()
This did not work, the result I got was:
$ python all_test.py
----------------------------------------------------------------------
Ran 0 tests in 0.000s
OK
For my second try, I though, ok, maybe I will try to do this whole testing thing in a more "manual" fashion. So I attempted to do that below:
import glob
import unittest
testSuite = unittest.TestSuite()
test_file_strings = glob.glob('test_*.py')
module_strings = [str[0:len(str)-3] for str in test_file_strings]
[__import__(str) for str in module_strings]
suites = [unittest.TestLoader().loadTestsFromName(str) for str in module_strings]
[testSuite.addTest(suite) for suite in suites]
print testSuite
result = unittest.TestResult()
testSuite.run(result)
print result
#Ok, at this point I have a result
#How do I display it as the normal unit test command line output?
if __name__ == "__main__":
unittest.main()
This also did not work, but it seems so close!
$ python all_test.py
<unittest.TestSuite tests=[<unittest.TestSuite tests=[<unittest.TestSuite tests=[<test_main.TestMain testMethod=test_respondes_to_get>]>]>]>
<unittest.TestResult run=1 errors=0 failures=0>
----------------------------------------------------------------------
Ran 0 tests in 0.000s
OK
I seem to have a suite of some sort, and I can execute the result. I am a little concerned about the fact that it says I have only run=1, seems like that should be run=2, but it is progress. But how do I pass and display the result to main? Or how do I basically get it working so I can just run this file, and in doing so, run all the unit tests in this directory?
With Python 2.7 and higher you don't have to write new code or use third-party tools to do this; recursive test execution via the command line is built-in. Put an __init__.py in your test directory and:
python -m unittest discover <test_directory>
# or
python -m unittest discover -s <directory> -p '*_test.py'
You can read more in the python 2.7
or python 3.x unittest documentation.
Update for 2021:
Lots of modern python projects use more advanced tools like pytest. For example, pull down matplotlib or scikit-learn and you will see they both use it.
It is important to know about these newer tools because when you have more than 7000 tests you need:
more advanced ways to summarize what passes, skipped, warnings, errors
easy ways to see how they failed
percent complete as it is running
total run time
ways to generate a test report
etc etc
In python 3, if you're using unittest.TestCase:
You must have an empty (or otherwise) __init__.py file in your test directory (must be named test/)
Your test files inside test/ match the pattern test_*.py.
They can be inside a subdirectory under test/. Those subdirs can be named as anything, but they all need to have an __init__.py file in them
Then, you can run all the tests with:
python -m unittest
Done! A solution less than 100 lines. Hopefully another python beginner saves time by finding this.
You could use a test runner that would do this for you. nose is very good for example. When run, it will find tests in the current tree and run them.
Updated:
Here's some code from my pre-nose days. You probably don't want the explicit list of module names, but maybe the rest will be useful to you.
testmodules = [
'cogapp.test_makefiles',
'cogapp.test_whiteutils',
'cogapp.test_cogapp',
]
suite = unittest.TestSuite()
for t in testmodules:
try:
# If the module defines a suite() function, call it to get the suite.
mod = __import__(t, globals(), locals(), ['suite'])
suitefn = getattr(mod, 'suite')
suite.addTest(suitefn())
except (ImportError, AttributeError):
# else, just load all the test cases from the module.
suite.addTest(unittest.defaultTestLoader.loadTestsFromName(t))
unittest.TextTestRunner().run(suite)
This is now possible directly from unittest: unittest.TestLoader.discover.
import unittest
loader = unittest.TestLoader()
start_dir = 'path/to/your/test/files'
suite = loader.discover(start_dir)
runner = unittest.TextTestRunner()
runner.run(suite)
Well by studying the code above a bit (specifically using TextTestRunner and defaultTestLoader), I was able to get pretty close. Eventually I fixed my code by also just passing all test suites to a single suites constructor, rather than adding them "manually", which fixed my other problems. So here is my solution.
import glob
import unittest
test_files = glob.glob('test_*.py')
module_strings = [test_file[0:len(test_file)-3] for test_file in test_files]
suites = [unittest.defaultTestLoader.loadTestsFromName(test_file) for test_file in module_strings]
test_suite = unittest.TestSuite(suites)
test_runner = unittest.TextTestRunner().run(test_suite)
Yeah, it is probably easier to just use nose than to do this, but that is besides the point.
If you want to run all the tests from various test case classes and you're happy to specify them explicitly then you can do it like this:
from unittest import TestLoader, TextTestRunner, TestSuite
from uclid.test.test_symbols import TestSymbols
from uclid.test.test_patterns import TestPatterns
if __name__ == "__main__":
loader = TestLoader()
tests = [
loader.loadTestsFromTestCase(test)
for test in (TestSymbols, TestPatterns)
]
suite = TestSuite(tests)
runner = TextTestRunner(verbosity=2)
runner.run(suite)
where uclid is my project and TestSymbols and TestPatterns are subclasses of TestCase.
I have used the discover method and an overloading of load_tests to achieve this result in a (minimal, I think) number lines of code:
def load_tests(loader, tests, pattern):
''' Discover and load all unit tests in all files named ``*_test.py`` in ``./src/``
'''
suite = TestSuite()
for all_test_suite in unittest.defaultTestLoader.discover('src', pattern='*_tests.py'):
for test_suite in all_test_suite:
suite.addTests(test_suite)
return suite
if __name__ == '__main__':
unittest.main()
Execution on fives something like
Ran 27 tests in 0.187s
OK
I tried various approaches but all seem flawed or I have to makeup some code, that's annoying. But there's a convinient way under linux, that is simply to find every test through certain pattern and then invoke them one by one.
find . -name 'Test*py' -exec python '{}' \;
and most importantly, it definitely works.
In case of a packaged library or application, you don't want to do it. setuptools will do it for you.
To use this command, your project’s tests must be wrapped in a unittest test suite by either a function, a TestCase class or method, or a module or package containing TestCase classes. If the named suite is a module, and the module has an additional_tests() function, it is called and the result (which must be a unittest.TestSuite) is added to the tests to be run. If the named suite is a package, any submodules and subpackages are recursively added to the overall test suite.
Just tell it where your root test package is, like:
setup(
# ...
test_suite = 'somepkg.test'
)
And run python setup.py test.
File-based discovery may be problematic in Python 3, unless you avoid relative imports in your test suite, because discover uses file import. Even though it supports optional top_level_dir, but I had some infinite recursion errors. So a simple solution for a non-packaged code is to put the following in __init__.py of your test package (see load_tests Protocol).
import unittest
from . import foo, bar
def load_tests(loader, tests, pattern):
suite = unittest.TestSuite()
suite.addTests(loader.loadTestsFromModule(foo))
suite.addTests(loader.loadTestsFromModule(bar))
return suite
This is an old question, but what worked for me now (in 2019) is:
python -m unittest *_test.py
All my test files are in the same folder as the source files and they end with _test.
I use PyDev/LiClipse and haven't really figured out how to run all tests at once from the GUI. (edit: you right click the root test folder and choose Run as -> Python unit-test
This is my current workaround:
import unittest
def load_tests(loader, tests, pattern):
return loader.discover('.')
if __name__ == '__main__':
unittest.main()
I put this code in a module called all in my test directory. If I run this module as a unittest from LiClipse then all tests are run. If I ask to only repeat specific or failed tests then only those tests are run. It doesn't interfere with my commandline test runner either (nosetests) -- it's ignored.
You may need to change the arguments to discover based on your project setup.
Based on the answer of Stephen Cagle I added support for nested test modules.
import fnmatch
import os
import unittest
def all_test_modules(root_dir, pattern):
test_file_names = all_files_in(root_dir, pattern)
return [path_to_module(str) for str in test_file_names]
def all_files_in(root_dir, pattern):
matches = []
for root, dirnames, filenames in os.walk(root_dir):
for filename in fnmatch.filter(filenames, pattern):
matches.append(os.path.join(root, filename))
return matches
def path_to_module(py_file):
return strip_leading_dots( \
replace_slash_by_dot( \
strip_extension(py_file)))
def strip_extension(py_file):
return py_file[0:len(py_file) - len('.py')]
def replace_slash_by_dot(str):
return str.replace('\\', '.').replace('/', '.')
def strip_leading_dots(str):
while str.startswith('.'):
str = str[1:len(str)]
return str
module_names = all_test_modules('.', '*Tests.py')
suites = [unittest.defaultTestLoader.loadTestsFromName(mname) for mname
in module_names]
testSuite = unittest.TestSuite(suites)
runner = unittest.TextTestRunner(verbosity=1)
runner.run(testSuite)
The code searches all subdirectories of . for *Tests.py files which are then loaded. It expects each *Tests.py to contain a single class *Tests(unittest.TestCase) which is loaded in turn and executed one after another.
This works with arbitrary deep nesting of directories/modules, but each directory in between needs to contain an empty __init__.py file at least. This allows the test to load the nested modules by replacing slashes (or backslashes) by dots (see replace_slash_by_dot).
I just created a discover.py file in my base test directory and added import statements for anything in my sub directories. Then discover is able to find all my tests in those directories by running it on discover.py
python -m unittest discover ./test -p '*.py'
# /test/discover.py
import unittest
from test.package1.mod1 import XYZTest
from test.package1.package2.mod2 import ABCTest
...
if __name__ == "__main__"
unittest.main()
Encountered the same issue.
The solution is to add an empty __init__.py to each folder and uses python -m unittest discover -s
Project Structure
tests/
__init__.py
domain/
value_object/
__init__.py
test_name.py
__init__.py
presentation/
__init__.py
test_app.py
And running the command
python -m unittest discover -s tests/domain
To get the expected outcome
.
----------------------------------------------------------------------
Ran 1 test in 0.007s
Because Test discovery seems to be a complete subject, there is some dedicated framework to test discovery :
nose
Py.Test
More reading here : https://wiki.python.org/moin/PythonTestingToolsTaxonomy
This BASH script will execute the python unittest test directory from ANYWHERE in the file system, no matter what working directory you are in: its working directory always be where that test directory is located.
ALL TESTS, independent $PWD
unittest Python module is sensitive to your current directory, unless you tell it where (using discover -s option).
This is useful when staying in the ./src or ./example working directory and you need a quick overall unit test:
#!/bin/bash
this_program="$0"
dirname="`dirname $this_program`"
readlink="`readlink -e $dirname`"
python -m unittest discover -s "$readlink"/test -v
SELECTED TESTS, independent $PWD
I name this utility file: runone.py and use it like this:
runone.py <test-python-filename-minus-dot-py-fileextension>
#!/bin/bash
this_program="$0"
dirname="`dirname $this_program`"
readlink="`readlink -e $dirname`"
(cd "$dirname"/test; python -m unittest $1)
No need for a test/__init__.py file to burden your package/memory-overhead during production.
I have no package and as mentioned on this page, this is creating issue while issing dicovery. So, I used the following solution. All the test result will be put in a given output folder.
RunAllUT.py:
"""
The given script is executing all the Unit Test of the project stored at the
path %relativePath2Src% currently fixed coded for the given project.
Prerequired:
- Anaconda should be install
- For the current user, an enviornment called "mtToolsEnv" should exists
- xmlrunner Library should be installed
"""
import sys
import os
import xmlrunner
from Repository import repository
relativePath2Src="./../.."
pythonPath=r'"C:\Users\%USERNAME%\.conda\envs\YourConfig\python.exe"'
outputTestReportFolder=os.path.dirname(os.path.abspath(__file__))+r'\test-reports' #subfolder in current file path
class UTTesting():
"""
Class tto run all the UT of the project
"""
def __init__(self):
"""
Initiate instance
Returns
-------
None.
"""
self.projectRepository = repository()
self.UTfile = [] #List all file
def retrieveAllUT(self):
"""
Generate the list of UT file in the project
Returns
-------
None.
"""
print(os.path.realpath(relativePath2Src))
self.projectRepository.retriveAllFilePaths(relativePath2Src)
#self.projectRepository.printAllFile() #debug
for file2scan in self.projectRepository.devfile:
if file2scan.endswith("_UT.py"):
self.UTfile.append(file2scan)
print(self.projectRepository.devfilepath[file2scan]+'/'+file2scan)
def runUT(self,UTtoRun):
"""
Run a single UT
Parameters
----------
UTtoRun : String
File Name of the UT
Returns
-------
None.
"""
print(UTtoRun)
if UTtoRun in self.projectRepository.devfilepath:
UTtoRunFolderPath=os.path.realpath(os.path.join(self.projectRepository.devfilepath[UTtoRun]))
UTtoRunPath = os.path.join(UTtoRunFolderPath, UTtoRun)
print(UTtoRunPath)
#set the correct execution context & run the test
os.system(" cd " + UTtoRunFolderPath + \
" & " + pythonPath + " " + UTtoRunPath + " " + outputTestReportFolder )
def runAllUT(self):
"""
Run all the UT contained in self
The function "retrieveAllUT" sjould ahve been performed before
Returns
-------
None.
"""
for UTfile in self.UTfile:
self.runUT(UTfile)
if __name__ == "__main__":
undertest=UTTesting()
undertest.retrieveAllUT()
undertest.runAllUT()
In my project specific, I have a class that I used in other script. This might be an overkill for your usecase.
Repository.py
import os
class repository():
"""
Class that decribed folder and file in a repository
"""
def __init__(self):
"""
Initiate instance
Returns
-------
None.
"""
self.devfile = [] #List all file
self.devfilepath = {} #List all file paths
def retriveAllFilePaths(self,pathrepo):
"""
Retrive all files and their path in the class
Parameters
----------
pathrepo : Path used for the parsin
Returns
-------
None.
"""
for path, subdirs, files in os.walk(pathrepo):
for file_name in files:
self.devfile.append(file_name)
self.devfilepath[file_name] = path
def printAllFile(self):
"""
Display all file with paths
Parameters
----------
def printAllFile : TYPE
DESCRIPTION.
Returns
-------
None.
"""
for file_loop in self.devfile:
print(self.devfilepath[file_loop]+'/'+file_loop)
In your test files, you need to have a main like this:
if __name__ == "__main__":
import xmlrunner
import sys
if len(sys.argv) > 1:
outputFolder = sys.argv.pop() #avoid conflic with unittest.main
else:
outputFolder = r'test-reports'
print("Report will be created and store there: " + outputFolder)
unittest.main(testRunner=xmlrunner.XMLTestRunner(output=outputFolder))
Here is my approach by creating a wrapper to run tests from the command line:
#!/usr/bin/env python3
import os, sys, unittest, argparse, inspect, logging
if __name__ == '__main__':
# Parse arguments.
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument("-?", "--help", action="help", help="show this help message and exit" )
parser.add_argument("-v", "--verbose", action="store_true", dest="verbose", help="increase output verbosity" )
parser.add_argument("-d", "--debug", action="store_true", dest="debug", help="show debug messages" )
parser.add_argument("-h", "--host", action="store", dest="host", help="Destination host" )
parser.add_argument("-b", "--browser", action="store", dest="browser", help="Browser driver.", choices=["Firefox", "Chrome", "IE", "Opera", "PhantomJS"] )
parser.add_argument("-r", "--reports-dir", action="store", dest="dir", help="Directory to save screenshots.", default="reports")
parser.add_argument('files', nargs='*')
args = parser.parse_args()
# Load files from the arguments.
for filename in args.files:
exec(open(filename).read())
# See: http://codereview.stackexchange.com/q/88655/15346
def make_suite(tc_class):
testloader = unittest.TestLoader()
testnames = testloader.getTestCaseNames(tc_class)
suite = unittest.TestSuite()
for name in testnames:
suite.addTest(tc_class(name, cargs=args))
return suite
# Add all tests.
alltests = unittest.TestSuite()
for name, obj in inspect.getmembers(sys.modules[__name__]):
if inspect.isclass(obj) and name.startswith("FooTest"):
alltests.addTest(make_suite(obj))
# Set-up logger
verbose = bool(os.environ.get('VERBOSE', args.verbose))
debug = bool(os.environ.get('DEBUG', args.debug))
if verbose or debug:
logging.basicConfig( stream=sys.stdout )
root = logging.getLogger()
root.setLevel(logging.INFO if verbose else logging.DEBUG)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.INFO if verbose else logging.DEBUG)
ch.setFormatter(logging.Formatter('%(asctime)s %(levelname)s: %(name)s: %(message)s'))
root.addHandler(ch)
else:
logging.basicConfig(stream=sys.stderr)
# Run tests.
result = unittest.TextTestRunner(verbosity=2).run(alltests)
sys.exit(not result.wasSuccessful())
For sake of simplicity, please excuse my non-PEP8 coding standards.
Then you can create BaseTest class for common components for all your tests, so each of your test would simply look like:
from BaseTest import BaseTest
class FooTestPagesBasic(BaseTest):
def test_foo(self):
driver = self.driver
driver.get(self.base_url + "/")
To run, you simply specifying tests as part of the command line arguments, e.g.:
./run_tests.py -h http://example.com/ tests/**/*.py

Categories

Resources