Testing argument using Python Click - python

I have a command-line script with Python-click with an argument and option:
# console.py
import click
#click.command()
#click.version_option()
#click.argument("filepath", type=click.Path(exists=True), default=".")
#click.option(
"-m",
"--max-size",
type=int,
help="Max size in megabytes.",
default=20,
show_default=True,
)
def main(filepath: str, max_size: int) -> None:
max_size_bytes = max_size * 1024 * 1024 # convert to MB
if filepath.endswith(".pdf"):
print("success")
else:
print(max_size_bytes)
Both the argument and option have default values and work on the command-line and using the CLI it behaves as expected. But when I try testing it following Click documentation and debug it, it does not enter the first line:
# test_console.py
from unittest.mock import Mock
import click.testing
import pytest
from pytest_mock import MockFixture
from pdf_split_tool import console
#pytest.fixture
def runner() -> click.testing.CliRunner:
"""Fixture for invoking command-line interfaces."""
return click.testing.CliRunner()
#pytest.fixture
def mock_pdf_splitter_pdfsplitter(mocker: MockFixture) -> Mock:
"""Fixture for mocking pdf_splitter.PdfSplitter."""
return mocker.patch("pdf_split_tool.pdf_splitter.PdfSplitter", autospec=True)
def test_main_uses_specified_filepath(
runner: click.testing.CliRunner,
mock_pdf_splitter_pdfsplitter: Mock,
) -> None:
"""It uses the specified filepath."""
result = runner.invoke(console.main, ["test.pdf"])
assert result.exit_code == 0
I couldn't see why it is giving since the debugger did not enter the first line of function main(). Any ideas of what could be wrong?

The failure is due to following error.
(pdb)print result.output
"Usage: main [OPTIONS] [FILEPATH]\nTry 'main --help' for help.\n\nError: Invalid value for '[FILEPATH]': Path 'test.pdf' does not exist.\n"
This is happening due to following code in console.py which checks if the filepath exists.
#click.argument("filepath", type=click.Path(exists=True), default=".")
One way to test creating a temporary file is using afterburner's code:
# test_console.py
def test_main_uses_specified_filepath() -> None:
runner = click.testing.CliRunner()
with runner.isolated_filesystem():
with open('test.pdf', 'w') as f:
f.write('Hello World!')
result = runner.invoke(main, ["test.pdf"])
assert result.exit_code == 0

I've changed your test method to the following. However, this is more an augmentation to apoorva kamath's answer.
def test_main_uses_specified_filepath() -> None:
runner = click.testing.CliRunner()
with runner.isolated_filesystem():
with open('test.pdf', 'w') as f:
f.write('Hello World!')
result = runner.invoke(main, ["test.pdf"])
assert result.exit_code == 0
Simply put, it creates an isolated file system that gets cleaned up after the text is executed. So any files created there are destroyed with it.
For more information, Click's Isolated Filesystem documentation might come in handy.
Alternatively, you can remove the exists=True parameter to your file path.

Related

How to get DRYer tests for simple function using python glob?

I have a function that searches for a file in current location, then in Download folder and if not found, raises and error. Using pytest and pytest-mock, I was able to test the code with a code much bigger than the tested one. Is there a way to make this tighter/DRYer?
Tested code:
# cei.py
import glob
import os
def get_xls_filename() -> str:
""" Returns first xls filename in current folder or Downloads folder """
csv_filenames = glob.glob("InfoCEI*.xls")
if csv_filenames:
return csv_filenames[0]
home = os.path.expanduser("~")
csv_filenames = glob.glob(home + "/Downloads/InfoCEI*.xls")
if csv_filenames:
return csv_filenames[0]
return sys.exit(
"Error: file not found."
)
There are three test scenarios here. Found in current, found in downloads and not found.
Test code:
# test_cei.py
from unittest.mock import Mock
import pytest
from pytest_mock import MockFixture
import cei
#pytest.fixture
def mock_glob_glob_none(mocker: MockFixture) -> Mock:
"""Fixture for mocking glob.glob."""
mock = mocker.patch("glob.glob")
mock.return_value = []
return mock
#pytest.fixture
def mock_os_path_expanduser(mocker: MockFixture) -> Mock:
"""Fixture for mocking os.path.expanduser."""
mock = mocker.patch("os.path.expanduser")
mock.return_value = "/home/user"
return mock
def test_get_xls_filename_not_found(mock_glob_glob_none, mock_os_path_expanduser) -> None:
with pytest.raises(SystemExit):
assert cei.get_xls_filename()
mock_glob_glob_none.assert_called()
mock_os_path_expanduser.assert_called_once()
#pytest.fixture
def mock_glob_glob_found(mocker: MockFixture) -> Mock:
"""Fixture for mocking glob.glob."""
mock = mocker.patch("glob.glob")
mock.return_value = ["/my/path/InfoCEI.xls"]
return mock
def test_get_xls_filename_current_folder(mock_glob_glob_found) -> None:
assert cei.get_xls_filename() == "/my/path/InfoCEI.xls"
mock_glob_glob_found.assert_called_once()
#pytest.fixture
def mock_glob_glob_found_download(mocker: MockFixture) -> Mock:
"""Fixture for mocking glob.glob."""
values = {
"InfoCEI*.xls": [],
"/home/user/Downloads/InfoCEI*.xls": ["/home/user/Downloads/InfoCEI.xls"],
}
def side_effect(arg):
return values[arg]
mock = mocker.patch("glob.glob")
mock.side_effect = side_effect
return mock
def test_get_xls_filename_download_folder(
mock_glob_glob_found_download, mock_os_path_expanduser
) -> None:
assert cei.get_xls_filename() == "/home/user/Downloads/InfoCEI.xls"
mock_os_path_expanduser.assert_called_once()
mock_glob_glob_found_download.assert_called_with(
"/home/user/Downloads/InfoCEI*.xls"
)
This is obviously a bit opinion-based, but I'll try.
First, there is nothing wrong with the tests being larger than the tested code. Depending on the number of tested use cases, this can easily happen, and I wouldn't use this a criterion for test quality.
That being said, your tests shall usually test the API / interface, which in this case is the returned file path under different conditions. Testing if os.path.expanduser has been called is part of the internal implementation that may not be stable - I would not consider that a good thing at least in this case. You already test the most relevant use cases (a test for having files in both locations might be added), where these internals are used.
Here is what I would probably do:
import os
import pytest
from cei import get_xls_filename
#pytest.fixture
def cwd(fs, monkeypatch):
fs.cwd = "/my/path"
monkeypatch.setenv("HOME", "/home/user")
def test_get_xls_filename_not_found(fs, cwd) -> None:
with pytest.raises(SystemExit):
assert get_xls_filename()
def test_get_xls_filename_current_folder(fs, cwd) -> None:
fs.create_file("/my/path/InfoCEI.xls")
assert get_xls_filename() == "InfoCEI.xls" # adapted to your implementation
def test_get_xls_filename_download_folder(fs, cwd) -> None:
path = os.path.join("/home/user", "Downloads", "InfoCEI.xls")
fs.create_file(path)
assert get_xls_filename() == path
Note that I used the pyfakefs fixture fs to mock the fs (I'm a contributor to pyfakefs, so this is what I'm used to, and it makes the code a bit shorter), but this may be overkill for you.
Basically, I try to test only the API, put the common setup (here cwd and home path location) into a fixture (or in a setup method for xUnit-like tests), and add the test-specific setup (creation of the test file) to the test itself.

How to test click commands that expect files

I have a click command that does something with a file:
import click
#click.command()
#click.argument("file", type=click.File("r"))
def foo(file):
print(file.read())
And I'd like to test it without creating temporary files, but I don't know which path to give the runner and can't find examples on the web. Something like this would be nice:
from click.testing import CliRunner
from magic_lib import magic_file
def test_foo():
runner = CliRunner()
fake_file = magic_file("Hello, world!")
result = runner.invoke(foo, [fake_file.location])
assert result.output == "Hello, world!\n"
Any way I can make click.File understand where I want it to look?
You can use pyfakefs. The usage depends on your testing framework; it's easiest to use with pytest because it automatically provides an fs fixture:
def test_foo(fs):
fake_file = fs.create_file('/foo/bar', contents="Hello, world!")
runner = CliRunner()
result = runner.invoke(foo, [fake_file.path])
assert result.output == "Hello, world!\n"
P.S.: Because the print in foo adds a newline, the file has to be created without \n at the end for the test to work.

how to unittest and mock for open funtion

I have read many article over the last 6 hours and i still don't understand mocking and unit-testing. I want to unit test a open function, how can i do this correctly?
i am also concerned as the bulk of my code is using external files for data import and manipulation. I understand that i need to mock them for testing, but I am struggling to understand how to move forward.
Some advice please. Thank you in advance
prototype5.py
import os
import sys
import io
import pandas
pandas.set_option('display.width', None)
def openSetupConfig (a):
"""
SUMMARY
Read setup file
setup file will ONLY hold the file path of the working directory
:param a: str
:return: contents of the file stored as str
"""
try:
setupConfig = open(a, "r")
return setupConfig.read()
except Exception as ve:
ve = (str(ve) + "\n\nPlease ensure setup file " + str(a) + " is available")
sys.exit(ve)
dirPath = openSetupConfig("Setup.dat")
test_prototype5.py
import prototype5
import unittest
class TEST_openSetupConfig (unittest.TestCase):
"""
Test the openSetupConfig function from the prototype 5 library
"""
def test_open_correct_file(self):
result = prototype5.openSetupConfig("Setup.dat")
self.assertTrue(result)
if __name__ == '__main__':
unittest.main()
So the rule of thumb is to mock, stub or fake all external dependencies to the method/function under test. The point is to test the logic in isolation. So in your case you want to test that it can open a file or log an error message if it can't be opened.
import unittest
from mock import patch
from prototype5 import openSetupConfig # you don't want to run the whole file
import __builtin__ # needed to mock open
def test_openSetupConfig_with_valid_file(self):
"""
It should return file contents when passed a valid file.
"""
expect = 'fake_contents'
with patch('__builtin__.open', return_value=expect) as mock_open:
actual = openSetupConfig("Setup.dat")
self.assertEqual(expect, actual)
mock_open.assert_called()
#patch('prototype5.sys.exit')
def test_openSetupConfig_with_invalid_file(self, mock_exit):
"""
It should log an error and exit when passed an invalid file.
"""
with patch('__builtin__.open', side_effect=FileNotFoundError) as mock_open:
openSetupConfig('foo')
mock_exit.assert_called()

How to save pytest's results/logs to a file?

I am having trouble trying to save -all- of the results shown from pytest to a file (txt, log, doesn't matter). In the test example below, I would like to capture what is shown in console into a text/log file of some sort:
import pytest
import os
def test_func1():
assert True
def test_func2():
assert 0 == 1
if __name__ == '__main__':
pytest.main(args=['-sv', os.path.abspath(__file__)])
Console output I'd like to save to a text file:
test-mbp:hi_world ua$ python test_out.py
================================================= test session starts =================================================
platform darwin -- Python 2.7.6 -- py-1.4.28 -- pytest-2.7.1 -- /usr/bin/python
rootdir: /Users/tester/PycharmProjects/hi_world, inifile:
plugins: capturelog
collected 2 items
test_out.py::test_func1 PASSED
test_out.py::test_func2 FAILED
====================================================== FAILURES =======================================================
_____________________________________________________ test_func2 ______________________________________________________
def test_func2():
> assert 0 == 1
E assert 0 == 1
test_out.py:9: AssertionError
========================================= 1 failed, 1 passed in 0.01 seconds ==========================================
test-mbp:hi_world ua$
It appears that all of your test output is going stdout, so you simply need to “redirect” your python invocation's output there:
python test_out.py >myoutput.log
You can also “tee” the output to multiple places. E.g., you might want to log to the file yet also see the output on your console. The above example then becomes:
python test_out.py | tee myoutput.log
I derive this from pastebin as suggest by Bruno Oliveira :
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Pytest Plugin that save failure or test session information to a file pass as a command line argument to pytest.
It put in a file exactly what pytest return to the stdout.
To use it :
Put this file in the root of tests/ edit your conftest and insert in the top of the file :
pytest_plugins = 'pytest_session_to_file'
Then you can launch your test with the new option --session_to_file= like this :
py.test --session_to_file=FILENAME
Or :
py.test -p pytest_session_to_file --session_to_file=FILENAME
Inspire by _pytest.pastebin
Ref: https://github.com/pytest-dev/pytest/blob/master/_pytest/pastebin.py
Version : 0.1
Date : 30 sept. 2015 11:25
Copyright (C) 2015 Richard Vézina <ml.richard.vezinar # gmail.com>
Licence : Public Domain
"""
import pytest
import sys
import tempfile
def pytest_addoption(parser):
group = parser.getgroup("terminal reporting")
group._addoption('--session_to_file', action='store', metavar='path', default='pytest_session.txt',
help="Save to file the pytest session information")
#pytest.hookimpl(trylast=True)
def pytest_configure(config):
tr = config.pluginmanager.getplugin('terminalreporter')
# if no terminal reporter plugin is present, nothing we can do here;
# this can happen when this function executes in a slave node
# when using pytest-xdist, for example
if tr is not None:
config._pytestsessionfile = tempfile.TemporaryFile('w+')
oldwrite = tr._tw.write
def tee_write(s, **kwargs):
oldwrite(s, **kwargs)
config._pytestsessionfile.write(str(s))
tr._tw.write = tee_write
def pytest_unconfigure(config):
if hasattr(config, '_pytestsessionfile'):
# get terminal contents and delete file
config._pytestsessionfile.seek(0)
sessionlog = config._pytestsessionfile.read()
config._pytestsessionfile.close()
del config._pytestsessionfile
# undo our patching in the terminal reporter
tr = config.pluginmanager.getplugin('terminalreporter')
del tr._tw.__dict__['write']
# write summary
create_new_file(config=config, contents=sessionlog)
def create_new_file(config, contents):
"""
Creates a new file with pytest session contents.
:contents: paste contents
:returns: url to the pasted contents
"""
# import _pytest.config
# path = _pytest.config.option.session_to_file
# path = 'pytest_session.txt'
path = config.option.session_to_file
with open(path, 'w') as f:
f.writelines(contents)
def pytest_terminal_summary(terminalreporter):
import _pytest.config
tr = terminalreporter
if 'failed' in tr.stats:
for rep in terminalreporter.stats.get('failed'):
try:
msg = rep.longrepr.reprtraceback.reprentries[-1].reprfileloc
except AttributeError:
msg = tr._getfailureheadline(rep)
tw = _pytest.config.create_terminal_writer(terminalreporter.config, stringio=True)
rep.toterminal(tw)
s = tw.stringio.getvalue()
assert len(s)
create_new_file(config=_pytest.config, contents=s)
The pastebin internal plugin does exactly that, but sends the output directly to bpaste.net. You can look at the plugin implementation to understand how to reuse it for your needs.
Here is a fixture in order for you to be able to do this, I used the pytest Cache feature in order to leverage a fixture that can be passed around to multiple test files, including distributed tests(xdist), in order to be able to collect and print test results.
conftest.py:
from _pytest.cacheprovider import Cache
from collections import defaultdict
import _pytest.cacheprovider
import pytest
#pytest.hookimpl(tryfirst=True)
def pytest_configure(config):
config.cache = Cache(config)
config.cache.set('record_s', defaultdict(list))
#pytest.fixture(autouse=True)
def record(request):
cache = request.config.cache
record_s = cache.get('record_s', {})
testname = request.node.name
# Tried to avoid the initialization, but it throws errors.
record_s[testname] = []
yield record_s[testname]
cache.set('record_s', record_s)
#pytest.hookimpl(trylast=True)
def pytest_unconfigure(config):
print("====================================================================\n")
print("\t\tTerminal Test Report Summary: \n")
print("====================================================================\n")
r_cache = config.cache.get('record_s',{})
print str(r_cache)
Use:
def test_foo(record):
record.append(('PASS', "reason", { "some": "other_stuff" }))
Output:
====================================================================
Terminal Test Report Summary:
====================================================================
{u'test_foo': [[u'PASS',u'reason', { u'some': u'other_stuff' } ]]}

How do you write tests for the argparse portion of a python module?

I have a Python module that uses the argparse library. How do I write tests for that section of the code base?
You should refactor your code and move the parsing to a function:
def parse_args(args):
parser = argparse.ArgumentParser(...)
parser.add_argument...
# ...Create your parser as you like...
return parser.parse_args(args)
Then in your main function you should just call it with:
parser = parse_args(sys.argv[1:])
(where the first element of sys.argv that represents the script name is removed to not send it as an additional switch during CLI operation.)
In your tests, you can then call the parser function with whatever list of arguments you want to test it with:
def test_parser(self):
parser = parse_args(['-l', '-m'])
self.assertTrue(parser.long)
# ...and so on.
This way you'll never have to execute the code of your application just to test the parser.
If you need to change and/or add options to your parser later in your application, then create a factory method:
def create_parser():
parser = argparse.ArgumentParser(...)
parser.add_argument...
# ...Create your parser as you like...
return parser
You can later manipulate it if you want, and a test could look like:
class ParserTest(unittest.TestCase):
def setUp(self):
self.parser = create_parser()
def test_something(self):
parsed = self.parser.parse_args(['--something', 'test'])
self.assertEqual(parsed.something, 'test')
"argparse portion" is a bit vague so this answer focuses on one part: the parse_args method. This is the method that interacts with your command line and gets all the passed values. Basically, you can mock what parse_args returns so that it doesn't need to actually get values from the command line. The mock package can be installed via pip for python versions 2.6-3.2. It's part of the standard library as unittest.mock from version 3.3 onwards.
import argparse
try:
from unittest import mock # python 3.3+
except ImportError:
import mock # python 2.6-3.2
#mock.patch('argparse.ArgumentParser.parse_args',
return_value=argparse.Namespace(kwarg1=value, kwarg2=value))
def test_command(mock_args):
pass
You have to include all your command method's args in Namespace even if they're not passed. Give those args a value of None. (see the docs) This style is useful for quickly doing testing for cases where different values are passed for each method argument. If you opt to mock Namespace itself for total argparse non-reliance in your tests, make sure it behaves similarly to the actual Namespace class.
Below is an example using the first snippet from the argparse library.
# test_mock_argparse.py
import argparse
try:
from unittest import mock # python 3.3+
except ImportError:
import mock # python 2.6-3.2
def main():
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('integers', metavar='N', type=int, nargs='+',
help='an integer for the accumulator')
parser.add_argument('--sum', dest='accumulate', action='store_const',
const=sum, default=max,
help='sum the integers (default: find the max)')
args = parser.parse_args()
print(args) # NOTE: this is how you would check what the kwargs are if you're unsure
return args.accumulate(args.integers)
#mock.patch('argparse.ArgumentParser.parse_args',
return_value=argparse.Namespace(accumulate=sum, integers=[1,2,3]))
def test_command(mock_args):
res = main()
assert res == 6, "1 + 2 + 3 = 6"
if __name__ == "__main__":
print(main())
Make your main() function take argv as an argument rather than letting it read from sys.argv as it will by default:
# mymodule.py
import argparse
import sys
def main(args):
parser = argparse.ArgumentParser()
parser.add_argument('-a')
process(**vars(parser.parse_args(args)))
return 0
def process(a=None):
pass
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
Then you can test normally.
import mock
from mymodule import main
#mock.patch('mymodule.process')
def test_main(process):
main([])
process.assert_call_once_with(a=None)
#mock.patch('foo.process')
def test_main_a(process):
main(['-a', '1'])
process.assert_call_once_with(a='1')
I did not want to modify the original serving script so I just mocked out the sys.argv part in argparse.
from unittest.mock import patch
with patch('argparse._sys.argv', ['python', 'serve.py']):
... # your test code here
This breaks if argparse implementation changes but enough for a quick test script. Sensibility is much more important than specificity in test scripts anyways.
Populate your arg list by using sys.argv.append() and then call
parse(), check the results and repeat.
Call from a batch/bash file with your flags and a dump args flag.
Put all your argument parsing in a separate file and in the if __name__ == "__main__": call parse and dump/evaluate the results then test this from a batch/bash file.
parse_args throws a SystemExit and prints to stderr, you can catch both of these:
import contextlib
import io
import sys
#contextlib.contextmanager
def captured_output():
new_out, new_err = io.StringIO(), io.StringIO()
old_out, old_err = sys.stdout, sys.stderr
try:
sys.stdout, sys.stderr = new_out, new_err
yield sys.stdout, sys.stderr
finally:
sys.stdout, sys.stderr = old_out, old_err
def validate_args(args):
with captured_output() as (out, err):
try:
parser.parse_args(args)
return True
except SystemExit as e:
return False
You inspect stderr (using err.seek(0); err.read() but generally that granularity isn't required.
Now you can use assertTrue or whichever testing you like:
assertTrue(validate_args(["-l", "-m"]))
Alternatively you might like to catch and rethrow a different error (instead of SystemExit):
def validate_args(args):
with captured_output() as (out, err):
try:
return parser.parse_args(args)
except SystemExit as e:
err.seek(0)
raise argparse.ArgumentError(err.read())
A simple way of testing a parser is:
parser = ...
parser.add_argument('-a',type=int)
...
argv = '-a 1 foo'.split() # or ['-a','1','foo']
args = parser.parse_args(argv)
assert(args.a == 1)
...
Another way is to modify sys.argv, and call args = parser.parse_args()
There are lots of examples of testing argparse in lib/test/test_argparse.py
When passing results from argparse.ArgumentParser.parse_args to a function, I sometimes use a namedtuple to mock arguments for testing.
import unittest
from collections import namedtuple
from my_module import main
class TestMyModule(TestCase):
args_tuple = namedtuple('args', 'arg1 arg2 arg3 arg4')
def test_arg1(self):
args = TestMyModule.args_tuple("age > 85", None, None, None)
res = main(args)
assert res == ["55289-0524", "00591-3496"], 'arg1 failed'
def test_arg2(self):
args = TestMyModule.args_tuple(None, [42, 69], None, None)
res = main(args)
assert res == [], 'arg2 failed'
if __name__ == '__main__':
unittest.main()
For testing CLI (command line interface), and not command output I did something like this
import pytest
from argparse import ArgumentParser, _StoreAction
ap = ArgumentParser(prog="cli")
ap.add_argument("cmd", choices=("spam", "ham"))
ap.add_argument("-a", "--arg", type=str, nargs="?", default=None, const=None)
...
def test_parser():
assert isinstance(ap, ArgumentParser)
assert isinstance(ap, list)
args = {_.dest: _ for _ in ap._actions if isinstance(_, _StoreAction)}
assert args.keys() == {"cmd", "arg"}
assert args["cmd"] == ("spam", "ham")
assert args["arg"].type == str
assert args["arg"].nargs == "?"
...

Categories

Resources