How to Dynamically Change pytest's tmpdir Base Directory - python

As per the pytest documentation, it possible to override the default temporary directory setting as follows:
py.test --basetemp=/base_dir
When the tmpdir fixture is then used in a test ...
def test_new_base_dir(tmpdir):
print str(tmpdir)
assert False
... something like the following would then be printed to the screen:
/base_dir/test_new_base_dir_0
This works as intended and for certain use cases can be very useful.
However, I would like to be able to change this setting on a per-test (or perhaps I should say a "per-fixture") basis. Is such a thing possible?
I'm close to just rolling my own tmpdir based on the code for the original, but would rather not do this -- I want to build on top of existing functionality where I can, not duplicate it.
As an aside, my particular use case is that I am writing a Python module that will act on different kinds of file systems (NFS4, etc), and it would be nice to be able to yield the functionality of tmpdir to be able to create the following fixtures:
def test_nfs3_stuff(nfs3_tmpdir):
... test NFS3 functionality
def test_nfs4_stuff(nfs4_tmpdir):
... test NFS4 functionality

In the sources of TempdirFactory the .config.option.basetemp is used as the attribute to store the basetemp. So you can directly set it before the usage:
import pytest
import time
import os
def mktemp_db(tmpdir_factory, db):
basetemp = None
if 'PYTEST_TMPDIR' in os.environ:
basetemp = os.environ['PYTEST_TMPDIR']
if basetemp:
tmpdir_factory.config.option.basetemp = basetemp
if db == "db1.db":
tmpdb = tmpdir_factory.mktemp('data1_').join(db)
elif db == "db2.db":
tmpdb = tmpdir_factory.mktemp('data2_').join(db)
return tmpdb
#pytest.fixture(scope='session')
def empty_db(tmpdir_factory):
tmpdb = mktemp_db(tmpdir_factory, 'db1.db')
print("* " + str(tmpdb))
time.sleep(5)
return tmpdb
#pytest.fixture(scope='session')
def empty_db2(tmpdir_factory):
tmpdb = mktemp_db(tmpdir_factory, 'db2.db')
print("* " + str(tmpdb))
time.sleep(5)
return tmpdb
def test_empty_db(empty_db):
pass
def test_empty_db2(empty_db2):
pass
-
>set PYTEST_TMPDIR=./tmp
>python.exe -m pytest -q -s test_my_db.py
* c:\tests\tmp\data1_0\db1.db
.* c:\tests\tmp\data2_0\db2.db
.
2 passed in 10.03 seconds

There didn't appear to be a nice solution to the problem as posed in the question so I settled on making two calls to py.test:
Passing in a different --basetemp for each.
Marking (using #pytest.mark.my_mark) which tests needed the special treatment of using a non-standard basetemp.
Passing -k my_mark or -k-my_mark into each call.

Related

Single-line conditional mocking return values in pytest

I have some method in a Python module db_access called read_all_rows that takes 2 strings as parameters and returns a list:
def read_all_rows(table_name='', mode=''):
return [] # just an example
I can mock this method non-conditionally using pytest like:
mocker.patch('db_access.read_all_rows', return_value=['testRow1', 'testRow2'])
But I want to mock its return value in pytest depending on table_name and mode parameters, so that it would return different values of parameters and combinations of them. And to make this as simple as possible.
The pseudocode of what I want:
when(db_access.read_all_rows).called_with('table_name1', any_string()).then_return(['testRow1'])
when(db_access.read_all_rows).called_with('table_name2' 'mode1').then_return(['testRow2', 'tableRow3'])
when(db_access.read_all_rows).called_with('table_name2' 'mode2').then_return(['testRow2', 'tableRow3'])
You can see that the 1st call is mocked for "any_string" placeholder.
I know that it can be achieved with side_effect like
def mock_read_all_rows:
...
mocker.patch('db_access.read_all_rows', side_effect=mock_read_all_rows)
but it is not very convenient because you need to add extra function which makes the code cumbersome. Even with lambda it is not so convenient because you would need to handle all conditions manually.
How this could be solved in a more short and readable way (ideally in a single line of code for each mock condition)?
P.S. In Java Mockito it is can be easily acheived with single line of code for each condition like
when(dbAccess.readAllRows(eq("tableName1"), any())).thenReturn(List.of(value1, value2));
...
but can I do this with Python's pytest mocker.patch?
There is no "of the shelf one liner" as you know it from the mockito-library.
The pythonic way how to mock based on input arguments is described in this answer.
However, nothing can stop you from creating your own single line wrapper and therefore have your own mocking interface (or domain specfic language (DSL)).
This simple wrapper should give you an idea. Place this class in a test helper module.
# tests/helper/mocking.py
class ConditionalMock:
def __init__(self, mocker, path):
self.mock = mocker.patch(path, new=self._replacement)
self._side_effects = {}
self._default = None
self._raise_if_not_matched = True
def expect(self, condition, return_value):
condition = tuple(condition)
self._side_effects[condition] = return_value
return self
def default_return(self, default):
self._raise_if_not_matched = False
self._default = default
return self
def _replacement(self, *args):
if args in self._side_effects:
return self._side_effects[args]
if self._raise_if_not_matched:
raise AssertionError(f'Arguments {args} not expected')
return self._default
Now import it in the tests just as usual and use the new one-line conditional mocking interace
# tests/mocking_test.py
import time
from .helper.mocking import ConditionalMock
def test_conditional_mocker(mocker) -> None:
ConditionalMock(mocker, 'time.strftime').expect('a', return_value='b').expect((1, 2), return_value='c')
assert time.strftime('a') == 'b'
assert time.strftime(1, 2) == 'c'
And result:
$ pytest tests/mocking_test.py
================================================================================================================================================= test session starts ==================================================================================================================================================
platform linux -- Python 3.8.10, pytest-6.2.5, py-1.11.0, pluggy-1.0.0
plugins: mock-3.6.1
collected 1 item
tests/mocking_test.py .

Pytest-xdist does not honour settings.DATABASES (or I am doing something wrong)

I am trying to use pytest-xdist for running parallel tests.
It works and I see improved performance.
To improve further more, I wanted to provide it multiple databases by using django_db_modify_db_settings_xdist_suffix
I override that functions in my conftest.py.
Since I have four workers, I created four databases manually. Then I use conftest to modify the settings.DATABASES to test for DBNAME against DBNAME_
I verified that my settings.DATABASES has changed and is correct.
But the queries are still going to old db DBNAME (which is no longer in my settings.DATABASES)
What could be going wrong?
Do I need to make any other change? Or change in conftest.py fixture is enough?
Thanks in advance for any help or direction.
EDIT:
My conftest:py has lot of stuff.
At the end of django_db_modify_db_settings_xdist_suffix if I log settings.DATABASES it shows me correct & expected info. But the queries still go to different db.
conftest.py is same in both runs (pytest -n4 or pytest). Since it depends on xdist_suffix it modifies the settings.DATABASE value in "-n 4" run only.
Two relevant functions which I think are important here:
#pytest.fixture(scope="session")
def django_db_setup(
request,
django_test_environment,
django_db_blocker,
django_db_use_migrations,
django_db_keepdb,
django_db_createdb,
django_db_modify_db_settings,
):
pass
And
#pytest.fixture(scope="session")
def django_db_modify_db_settings_xdist_suffix(request):
from django.conf import settings
default = settings.DATABASES["default"].copy()
settings.DATABASES.clear()
settings.DATABASES["default"] = default
xdist_suffix = None
xdist_worker_id = get_xdist_worker_id(request)
if xdist_worker_id != 'master':
xdist_suffix = xdist_worker_id
if xdist_suffix:
for db_settings in settings.DATABASES.values():
test_name = db_settings.get("TEST", {}).get("NAME")
if not test_name:
test_name = "test_{}".format(db_settings["NAME"])
db_settings.setdefault("TEST", {})
db_settings["TEST"]["NAME"] = "{}_{}".format(test_name, xdist_suffix)
db_settings["NAME"] = db_settings["TEST"]["NAME"]
I am doing something similar here to what you have done, but it may seem simpler or more elegant. To my mind, it seems that the context is more on the side of 'Django' than pytest-xdist.
I have used pytest-xdist to scale concurrent stress testing and the hook that seems most relevant for your question using gateway-id to send a setting to the remote worker which allowed distinction between nodes.
def pytest_configure_node(self, node: WorkerController) -> None:
"""set something peculiar for each node."""
node.workerinput['SPECIAL'] = get_db_for_node(node.gateway.id)
Please try to implement the function as shown for get_db_for_node(gateway_id: str) -> str:
Then in the worker you could perhaps leverage config.workerinput to access the special mentioned above:
#pytest.fixture(scope="session")
def special(pytestconfig: Config) -> str:
if not hasattr(pytestconfig, 'workerinput'):
log.exception('fixture requires xdist')
return ''
return pytestconfig.workerinput['SPECIAL']

Python - CherryPy testing - set session data?

When running a pytest unit test against a CherryPy server, using a cherrypy.helper.CPWebCase sub-class, how can I set data for the session object? I tried just calling cherrypy.session['foo']='bar' like I would if I was really in a cherrypy call, but that just gave an "AttributeError: '_Serving' object has no attribute 'session'"
For reference, a test case might look something like this (pulled from the CherryPy Docs with minor edits):
import cherrypy
from cherrypy.test import helper
from MyApp import Root
class SimpleCPTest(helper.CPWebCase):
def setup_server():
cherrypy.tree.mount(Root(), "/", {'/': {'tools.sessions.on': True}})
setup_server = staticmethod(setup_server)
def check_two_plus_two_equals_four(self):
#<code to set session variable to 2 here>
# This is the question: How do I set a session variable?
self.getPage("/")
self.assertStatus('200 OK')
self.assertHeader('Content-Type', 'text/html;charset=utf-8')
self.assertBody('4')
And the handler might look something like this (or anything else, it makes no difference whatsoever):
class Root:
#cherrypy.expose
def test_handler(self):
#get a random session variable and do something with it
number_var=cherrypy.session.get('Number')
# Add two. This will fail if the session variable has not been set,
# Or is not a number
number_var = number_var+2
return str(number_var)
It's safe to assume that the config is correct, and sessions work as expected.
I could, of course, write a CherryPy page that takes a key and value as arguments, and then sets the specified session value, and call that from my test code (EDIT: I've tested this, and it does work). That, however, seems kludgy, and I'd really want to limit it to testing only somehow if I went down that road.
What you are trying to achieve is usually referred as mocking.
While running tests you'd usually want to 'mock' some of resources you access with dummy objects having same interface (duck typing). This may be achieved with monkey patching. To simplify this process you may use unittest.mock.patch as either context manager or method/function decorator.
Please find below the working example with context manager option:
==> MyApp.py <==
import cherrypy
class Root:
_cp_config = {'tools.sessions.on': True}
#cherrypy.expose
def test_handler(self):
# get a random session variable and do something with it
number_var = cherrypy.session.get('Number')
# Add two. This will fail if the session variable has not been set,
# Or is not a number
number_var = number_var + 2
return str(number_var)
==> cp_test.py <==
from unittest.mock import patch
import cherrypy
from cherrypy.test import helper
from cherrypy.lib.sessions import RamSession
from MyApp import Root
class SimpleCPTest(helper.CPWebCase):
#staticmethod
def setup_server():
cherrypy.tree.mount(Root(), '/', {})
def test_check_two_plus_two_equals_four(self):
# <code to set session variable to 2 here>
sess_mock = RamSession()
sess_mock['Number'] = 2
with patch('cherrypy.session', sess_mock, create=True):
# Inside of this block all manipulations with `cherrypy.session`
# actually access `sess_mock` instance instead
self.getPage("/test_handler")
self.assertStatus('200 OK')
self.assertHeader('Content-Type', 'text/html;charset=utf-8')
self.assertBody('4')
Now you may safely run test as follows:
$ py.test -sv cp_test.py
============================================================================================================ test session starts =============================================================================================================
platform darwin -- Python 3.5.2, pytest-2.9.2, py-1.4.31, pluggy-0.3.1 -- ~/.pyenv/versions/3.5.2/envs/cptest-pyenv-virtualenv/bin/python3.5
cachedir: .cache
rootdir: ~/src/cptest, inifile:
collected 2 items
cp_test.py::SimpleCPTest::test_check_two_plus_two_equals_four PASSED
cp_test.py::SimpleCPTest::test_gc <- ../../.pyenv/versions/3.5.2/envs/cptest-pyenv-virtualenv/lib/python3.5/site-packages/cherrypy/test/helper.py PASSED

What is python best practice to to detect then influence behavior if in development?

I'm new to python and I want to know the pythonic way to influence behavior differently between my run time test environment and production.
My use case is I'm using a decorator that needs different parameters in test vs production runs.
It looks like this:
# buffer_time should be 0 in test and 5000 lets say in prod
#retry(stop_max_attempt_number=7, wait_fixed=buffer_time)
def wait_until_volume_status_is_in_use(self, volume):
if volume.status != 'in use':
log_fail('Volume status is ... %s' % volume.status)
volume.update()
return volume.status
One solution is use os environment variables.
At the top of a file I can write something like this
# some variation of this
buffer_time = 0 if os.environ['MODE'] == 'TEST' else 5000
class Guy(object):
# Body where buffer_time is used in decorator
Another solution is to use a settings file
# settings.py
def init():
global RETRY_BUFFER
RETRY_BUFFER = 5000
# __init__.py
import settings
settings.init()
# test file
from my_module import settings
settings.RETRY_BUFFER = 0
from my_module.class_file import MyKlass
# Do Tests
# class file
import settings
buffer_time = settings.RETRY_BUFFER
class Guy(object):
# Body where buffer_time is used in decorator
Ultimately my problem with both solutions is they both used shared state.
I would like to know what is the standard way to accomplish this.

How do I run all Python unit tests in a directory?

I have a directory that contains my Python unit tests. Each unit test module is of the form test_*.py. I am attempting to make a file called all_test.py that will, you guessed it, run all files in the aforementioned test form and return the result. I have tried two methods so far; both have failed. I will show the two methods, and I hope someone out there knows how to actually do this correctly.
For my first valiant attempt, I thought "If I just import all my testing modules in the file, and then call this unittest.main() doodad, it will work, right?" Well, turns out I was wrong.
import glob
import unittest
testSuite = unittest.TestSuite()
test_file_strings = glob.glob('test_*.py')
module_strings = [str[0:len(str)-3] for str in test_file_strings]
if __name__ == "__main__":
unittest.main()
This did not work, the result I got was:
$ python all_test.py
----------------------------------------------------------------------
Ran 0 tests in 0.000s
OK
For my second try, I though, ok, maybe I will try to do this whole testing thing in a more "manual" fashion. So I attempted to do that below:
import glob
import unittest
testSuite = unittest.TestSuite()
test_file_strings = glob.glob('test_*.py')
module_strings = [str[0:len(str)-3] for str in test_file_strings]
[__import__(str) for str in module_strings]
suites = [unittest.TestLoader().loadTestsFromName(str) for str in module_strings]
[testSuite.addTest(suite) for suite in suites]
print testSuite
result = unittest.TestResult()
testSuite.run(result)
print result
#Ok, at this point I have a result
#How do I display it as the normal unit test command line output?
if __name__ == "__main__":
unittest.main()
This also did not work, but it seems so close!
$ python all_test.py
<unittest.TestSuite tests=[<unittest.TestSuite tests=[<unittest.TestSuite tests=[<test_main.TestMain testMethod=test_respondes_to_get>]>]>]>
<unittest.TestResult run=1 errors=0 failures=0>
----------------------------------------------------------------------
Ran 0 tests in 0.000s
OK
I seem to have a suite of some sort, and I can execute the result. I am a little concerned about the fact that it says I have only run=1, seems like that should be run=2, but it is progress. But how do I pass and display the result to main? Or how do I basically get it working so I can just run this file, and in doing so, run all the unit tests in this directory?
With Python 2.7 and higher you don't have to write new code or use third-party tools to do this; recursive test execution via the command line is built-in. Put an __init__.py in your test directory and:
python -m unittest discover <test_directory>
# or
python -m unittest discover -s <directory> -p '*_test.py'
You can read more in the python 2.7
or python 3.x unittest documentation.
Update for 2021:
Lots of modern python projects use more advanced tools like pytest. For example, pull down matplotlib or scikit-learn and you will see they both use it.
It is important to know about these newer tools because when you have more than 7000 tests you need:
more advanced ways to summarize what passes, skipped, warnings, errors
easy ways to see how they failed
percent complete as it is running
total run time
ways to generate a test report
etc etc
In python 3, if you're using unittest.TestCase:
You must have an empty (or otherwise) __init__.py file in your test directory (must be named test/)
Your test files inside test/ match the pattern test_*.py.
They can be inside a subdirectory under test/. Those subdirs can be named as anything, but they all need to have an __init__.py file in them
Then, you can run all the tests with:
python -m unittest
Done! A solution less than 100 lines. Hopefully another python beginner saves time by finding this.
You could use a test runner that would do this for you. nose is very good for example. When run, it will find tests in the current tree and run them.
Updated:
Here's some code from my pre-nose days. You probably don't want the explicit list of module names, but maybe the rest will be useful to you.
testmodules = [
'cogapp.test_makefiles',
'cogapp.test_whiteutils',
'cogapp.test_cogapp',
]
suite = unittest.TestSuite()
for t in testmodules:
try:
# If the module defines a suite() function, call it to get the suite.
mod = __import__(t, globals(), locals(), ['suite'])
suitefn = getattr(mod, 'suite')
suite.addTest(suitefn())
except (ImportError, AttributeError):
# else, just load all the test cases from the module.
suite.addTest(unittest.defaultTestLoader.loadTestsFromName(t))
unittest.TextTestRunner().run(suite)
This is now possible directly from unittest: unittest.TestLoader.discover.
import unittest
loader = unittest.TestLoader()
start_dir = 'path/to/your/test/files'
suite = loader.discover(start_dir)
runner = unittest.TextTestRunner()
runner.run(suite)
Well by studying the code above a bit (specifically using TextTestRunner and defaultTestLoader), I was able to get pretty close. Eventually I fixed my code by also just passing all test suites to a single suites constructor, rather than adding them "manually", which fixed my other problems. So here is my solution.
import glob
import unittest
test_files = glob.glob('test_*.py')
module_strings = [test_file[0:len(test_file)-3] for test_file in test_files]
suites = [unittest.defaultTestLoader.loadTestsFromName(test_file) for test_file in module_strings]
test_suite = unittest.TestSuite(suites)
test_runner = unittest.TextTestRunner().run(test_suite)
Yeah, it is probably easier to just use nose than to do this, but that is besides the point.
If you want to run all the tests from various test case classes and you're happy to specify them explicitly then you can do it like this:
from unittest import TestLoader, TextTestRunner, TestSuite
from uclid.test.test_symbols import TestSymbols
from uclid.test.test_patterns import TestPatterns
if __name__ == "__main__":
loader = TestLoader()
tests = [
loader.loadTestsFromTestCase(test)
for test in (TestSymbols, TestPatterns)
]
suite = TestSuite(tests)
runner = TextTestRunner(verbosity=2)
runner.run(suite)
where uclid is my project and TestSymbols and TestPatterns are subclasses of TestCase.
I have used the discover method and an overloading of load_tests to achieve this result in a (minimal, I think) number lines of code:
def load_tests(loader, tests, pattern):
''' Discover and load all unit tests in all files named ``*_test.py`` in ``./src/``
'''
suite = TestSuite()
for all_test_suite in unittest.defaultTestLoader.discover('src', pattern='*_tests.py'):
for test_suite in all_test_suite:
suite.addTests(test_suite)
return suite
if __name__ == '__main__':
unittest.main()
Execution on fives something like
Ran 27 tests in 0.187s
OK
I tried various approaches but all seem flawed or I have to makeup some code, that's annoying. But there's a convinient way under linux, that is simply to find every test through certain pattern and then invoke them one by one.
find . -name 'Test*py' -exec python '{}' \;
and most importantly, it definitely works.
In case of a packaged library or application, you don't want to do it. setuptools will do it for you.
To use this command, your project’s tests must be wrapped in a unittest test suite by either a function, a TestCase class or method, or a module or package containing TestCase classes. If the named suite is a module, and the module has an additional_tests() function, it is called and the result (which must be a unittest.TestSuite) is added to the tests to be run. If the named suite is a package, any submodules and subpackages are recursively added to the overall test suite.
Just tell it where your root test package is, like:
setup(
# ...
test_suite = 'somepkg.test'
)
And run python setup.py test.
File-based discovery may be problematic in Python 3, unless you avoid relative imports in your test suite, because discover uses file import. Even though it supports optional top_level_dir, but I had some infinite recursion errors. So a simple solution for a non-packaged code is to put the following in __init__.py of your test package (see load_tests Protocol).
import unittest
from . import foo, bar
def load_tests(loader, tests, pattern):
suite = unittest.TestSuite()
suite.addTests(loader.loadTestsFromModule(foo))
suite.addTests(loader.loadTestsFromModule(bar))
return suite
This is an old question, but what worked for me now (in 2019) is:
python -m unittest *_test.py
All my test files are in the same folder as the source files and they end with _test.
I use PyDev/LiClipse and haven't really figured out how to run all tests at once from the GUI. (edit: you right click the root test folder and choose Run as -> Python unit-test
This is my current workaround:
import unittest
def load_tests(loader, tests, pattern):
return loader.discover('.')
if __name__ == '__main__':
unittest.main()
I put this code in a module called all in my test directory. If I run this module as a unittest from LiClipse then all tests are run. If I ask to only repeat specific or failed tests then only those tests are run. It doesn't interfere with my commandline test runner either (nosetests) -- it's ignored.
You may need to change the arguments to discover based on your project setup.
Based on the answer of Stephen Cagle I added support for nested test modules.
import fnmatch
import os
import unittest
def all_test_modules(root_dir, pattern):
test_file_names = all_files_in(root_dir, pattern)
return [path_to_module(str) for str in test_file_names]
def all_files_in(root_dir, pattern):
matches = []
for root, dirnames, filenames in os.walk(root_dir):
for filename in fnmatch.filter(filenames, pattern):
matches.append(os.path.join(root, filename))
return matches
def path_to_module(py_file):
return strip_leading_dots( \
replace_slash_by_dot( \
strip_extension(py_file)))
def strip_extension(py_file):
return py_file[0:len(py_file) - len('.py')]
def replace_slash_by_dot(str):
return str.replace('\\', '.').replace('/', '.')
def strip_leading_dots(str):
while str.startswith('.'):
str = str[1:len(str)]
return str
module_names = all_test_modules('.', '*Tests.py')
suites = [unittest.defaultTestLoader.loadTestsFromName(mname) for mname
in module_names]
testSuite = unittest.TestSuite(suites)
runner = unittest.TextTestRunner(verbosity=1)
runner.run(testSuite)
The code searches all subdirectories of . for *Tests.py files which are then loaded. It expects each *Tests.py to contain a single class *Tests(unittest.TestCase) which is loaded in turn and executed one after another.
This works with arbitrary deep nesting of directories/modules, but each directory in between needs to contain an empty __init__.py file at least. This allows the test to load the nested modules by replacing slashes (or backslashes) by dots (see replace_slash_by_dot).
I just created a discover.py file in my base test directory and added import statements for anything in my sub directories. Then discover is able to find all my tests in those directories by running it on discover.py
python -m unittest discover ./test -p '*.py'
# /test/discover.py
import unittest
from test.package1.mod1 import XYZTest
from test.package1.package2.mod2 import ABCTest
...
if __name__ == "__main__"
unittest.main()
Encountered the same issue.
The solution is to add an empty __init__.py to each folder and uses python -m unittest discover -s
Project Structure
tests/
__init__.py
domain/
value_object/
__init__.py
test_name.py
__init__.py
presentation/
__init__.py
test_app.py
And running the command
python -m unittest discover -s tests/domain
To get the expected outcome
.
----------------------------------------------------------------------
Ran 1 test in 0.007s
Because Test discovery seems to be a complete subject, there is some dedicated framework to test discovery :
nose
Py.Test
More reading here : https://wiki.python.org/moin/PythonTestingToolsTaxonomy
This BASH script will execute the python unittest test directory from ANYWHERE in the file system, no matter what working directory you are in: its working directory always be where that test directory is located.
ALL TESTS, independent $PWD
unittest Python module is sensitive to your current directory, unless you tell it where (using discover -s option).
This is useful when staying in the ./src or ./example working directory and you need a quick overall unit test:
#!/bin/bash
this_program="$0"
dirname="`dirname $this_program`"
readlink="`readlink -e $dirname`"
python -m unittest discover -s "$readlink"/test -v
SELECTED TESTS, independent $PWD
I name this utility file: runone.py and use it like this:
runone.py <test-python-filename-minus-dot-py-fileextension>
#!/bin/bash
this_program="$0"
dirname="`dirname $this_program`"
readlink="`readlink -e $dirname`"
(cd "$dirname"/test; python -m unittest $1)
No need for a test/__init__.py file to burden your package/memory-overhead during production.
I have no package and as mentioned on this page, this is creating issue while issing dicovery. So, I used the following solution. All the test result will be put in a given output folder.
RunAllUT.py:
"""
The given script is executing all the Unit Test of the project stored at the
path %relativePath2Src% currently fixed coded for the given project.
Prerequired:
- Anaconda should be install
- For the current user, an enviornment called "mtToolsEnv" should exists
- xmlrunner Library should be installed
"""
import sys
import os
import xmlrunner
from Repository import repository
relativePath2Src="./../.."
pythonPath=r'"C:\Users\%USERNAME%\.conda\envs\YourConfig\python.exe"'
outputTestReportFolder=os.path.dirname(os.path.abspath(__file__))+r'\test-reports' #subfolder in current file path
class UTTesting():
"""
Class tto run all the UT of the project
"""
def __init__(self):
"""
Initiate instance
Returns
-------
None.
"""
self.projectRepository = repository()
self.UTfile = [] #List all file
def retrieveAllUT(self):
"""
Generate the list of UT file in the project
Returns
-------
None.
"""
print(os.path.realpath(relativePath2Src))
self.projectRepository.retriveAllFilePaths(relativePath2Src)
#self.projectRepository.printAllFile() #debug
for file2scan in self.projectRepository.devfile:
if file2scan.endswith("_UT.py"):
self.UTfile.append(file2scan)
print(self.projectRepository.devfilepath[file2scan]+'/'+file2scan)
def runUT(self,UTtoRun):
"""
Run a single UT
Parameters
----------
UTtoRun : String
File Name of the UT
Returns
-------
None.
"""
print(UTtoRun)
if UTtoRun in self.projectRepository.devfilepath:
UTtoRunFolderPath=os.path.realpath(os.path.join(self.projectRepository.devfilepath[UTtoRun]))
UTtoRunPath = os.path.join(UTtoRunFolderPath, UTtoRun)
print(UTtoRunPath)
#set the correct execution context & run the test
os.system(" cd " + UTtoRunFolderPath + \
" & " + pythonPath + " " + UTtoRunPath + " " + outputTestReportFolder )
def runAllUT(self):
"""
Run all the UT contained in self
The function "retrieveAllUT" sjould ahve been performed before
Returns
-------
None.
"""
for UTfile in self.UTfile:
self.runUT(UTfile)
if __name__ == "__main__":
undertest=UTTesting()
undertest.retrieveAllUT()
undertest.runAllUT()
In my project specific, I have a class that I used in other script. This might be an overkill for your usecase.
Repository.py
import os
class repository():
"""
Class that decribed folder and file in a repository
"""
def __init__(self):
"""
Initiate instance
Returns
-------
None.
"""
self.devfile = [] #List all file
self.devfilepath = {} #List all file paths
def retriveAllFilePaths(self,pathrepo):
"""
Retrive all files and their path in the class
Parameters
----------
pathrepo : Path used for the parsin
Returns
-------
None.
"""
for path, subdirs, files in os.walk(pathrepo):
for file_name in files:
self.devfile.append(file_name)
self.devfilepath[file_name] = path
def printAllFile(self):
"""
Display all file with paths
Parameters
----------
def printAllFile : TYPE
DESCRIPTION.
Returns
-------
None.
"""
for file_loop in self.devfile:
print(self.devfilepath[file_loop]+'/'+file_loop)
In your test files, you need to have a main like this:
if __name__ == "__main__":
import xmlrunner
import sys
if len(sys.argv) > 1:
outputFolder = sys.argv.pop() #avoid conflic with unittest.main
else:
outputFolder = r'test-reports'
print("Report will be created and store there: " + outputFolder)
unittest.main(testRunner=xmlrunner.XMLTestRunner(output=outputFolder))
Here is my approach by creating a wrapper to run tests from the command line:
#!/usr/bin/env python3
import os, sys, unittest, argparse, inspect, logging
if __name__ == '__main__':
# Parse arguments.
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument("-?", "--help", action="help", help="show this help message and exit" )
parser.add_argument("-v", "--verbose", action="store_true", dest="verbose", help="increase output verbosity" )
parser.add_argument("-d", "--debug", action="store_true", dest="debug", help="show debug messages" )
parser.add_argument("-h", "--host", action="store", dest="host", help="Destination host" )
parser.add_argument("-b", "--browser", action="store", dest="browser", help="Browser driver.", choices=["Firefox", "Chrome", "IE", "Opera", "PhantomJS"] )
parser.add_argument("-r", "--reports-dir", action="store", dest="dir", help="Directory to save screenshots.", default="reports")
parser.add_argument('files', nargs='*')
args = parser.parse_args()
# Load files from the arguments.
for filename in args.files:
exec(open(filename).read())
# See: http://codereview.stackexchange.com/q/88655/15346
def make_suite(tc_class):
testloader = unittest.TestLoader()
testnames = testloader.getTestCaseNames(tc_class)
suite = unittest.TestSuite()
for name in testnames:
suite.addTest(tc_class(name, cargs=args))
return suite
# Add all tests.
alltests = unittest.TestSuite()
for name, obj in inspect.getmembers(sys.modules[__name__]):
if inspect.isclass(obj) and name.startswith("FooTest"):
alltests.addTest(make_suite(obj))
# Set-up logger
verbose = bool(os.environ.get('VERBOSE', args.verbose))
debug = bool(os.environ.get('DEBUG', args.debug))
if verbose or debug:
logging.basicConfig( stream=sys.stdout )
root = logging.getLogger()
root.setLevel(logging.INFO if verbose else logging.DEBUG)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.INFO if verbose else logging.DEBUG)
ch.setFormatter(logging.Formatter('%(asctime)s %(levelname)s: %(name)s: %(message)s'))
root.addHandler(ch)
else:
logging.basicConfig(stream=sys.stderr)
# Run tests.
result = unittest.TextTestRunner(verbosity=2).run(alltests)
sys.exit(not result.wasSuccessful())
For sake of simplicity, please excuse my non-PEP8 coding standards.
Then you can create BaseTest class for common components for all your tests, so each of your test would simply look like:
from BaseTest import BaseTest
class FooTestPagesBasic(BaseTest):
def test_foo(self):
driver = self.driver
driver.get(self.base_url + "/")
To run, you simply specifying tests as part of the command line arguments, e.g.:
./run_tests.py -h http://example.com/ tests/**/*.py

Categories

Resources