Passing (yield) fixtures as test parameters (with a temp directory) - python

Question
Is it possible to pass yielding pytest fixtures (for setup and teardown) as parameters to test functions?
Context
I'm testing an object that reads and writes data from/to files in a single directory. That path of that directory is saved as an attribute of the object.
I'm having trouble with the following:
using a temporary directory with my test; and
ensuring that the directory is removed after each test.
Example
Consider the following (test_yieldfixtures.py):
import pytest, tempfile, os, shutil
from contextlib import contextmanager
#contextmanager
def data():
datadir = tempfile.mkdtemp() # setup
yield datadir
shutil.rmtree(datadir) # teardown
class Thing:
def __init__(self, datadir, errorfile):
self.datadir = datadir
self.errorfile = errorfile
#pytest.fixture
def thing1():
with data() as datadir:
errorfile = os.path.join(datadir, 'testlog1.log')
yield Thing(datadir=datadir, errorfile=errorfile)
#pytest.fixture
def thing2():
with data() as datadir:
errorfile = os.path.join(datadir, 'testlog2.log')
yield Thing(datadir=datadir, errorfile=errorfile)
#pytest.mark.parametrize('thing', [thing1, thing2])
def test_attr(thing):
print(thing.datadir)
assert os.path.exists(thing.datadir)
Running pytest test_yieldfixtures.py outputs the following:
================================== FAILURES ===================================
______________________________ test_attr[thing0] ______________________________
thing = <generator object thing1 at 0x0000017B50C61BF8>
#pytest.mark.parametrize('thing', [thing1, thing2])
def test_attr(thing):
> print(thing.datadir)
E AttributeError: 'function' object has no attribute 'props'
test_mod.py:39: AttributeError
OK. So fixture functions don't have a the properties of my class. Fair enough.
Attempt 1
A function won't have the properties, so I tried calling that functions to actually get the objects. However, that just
#pytest.mark.parametrize('thing', [thing1(), thing2()])
def test_attr(thing):
print(thing.props['datadir'])
assert os.path.exists(thing.get('datadir'))
Results in:
================================== FAILURES ===================================
______________________________ test_attr[thing0] ______________________________
thing = <generator object thing1 at 0x0000017B50C61BF8>
#pytest.mark.parametrize('thing', [thing1(), thing2()])
def test_attr(thing):
> print(thing.datadir)
E AttributeError: 'generator' object has no attribute 'props'
test_mod.py:39: AttributeError
Attempt 2
I also tried using return instead of yield in the thing1/2 fixtures, but that kicks me out of the data context manager and removes the directory:
================================== FAILURES ===================================
______________________________ test_attr[thing0] ______________________________
thing = <test_mod.Thing object at 0x000001C528F05358>
#pytest.mark.parametrize('thing', [thing1(), thing2()])
def test_attr(thing):
print(thing.datadir)
> assert os.path.exists(thing.datadir)
Closing
To restate the question: Is there anyway to pass these fixtures as parameters and maintain the cleanup of the temporary directory?

Try making your data function / generator into a fixture. Then use request.getfixturevalue() to dynamically run the named fixture.
import pytest, tempfile, os, shutil
from contextlib import contextmanager
#pytest.fixture # This works with pytest>3.0, on pytest<3.0 use yield_fixture
def datadir():
datadir = tempfile.mkdtemp() # setup
yield datadir
shutil.rmtree(datadir) # teardown
class Thing:
def __init__(self, datadir, errorfile):
self.datadir = datadir
self.errorfile = errorfile
#pytest.fixture
def thing1(datadir):
errorfile = os.path.join(datadir, 'testlog1.log')
yield Thing(datadir=datadir, errorfile=errorfile)
#pytest.fixture
def thing2(datadir):
errorfile = os.path.join(datadir, 'testlog2.log')
yield Thing(datadir=datadir, errorfile=errorfile)
#pytest.mark.parametrize('thing_fixture_name', ['thing1', 'thing2'])
def test_attr(request, thing):
thing = request.getfixturevalue(thing) # This works with pytest>3.0, on pytest<3.0 use getfuncargvalue
print(thing.datadir)
assert os.path.exists(thing.datadir)
Going one step futher, you can parametrize the thing fixtures like so:
class Thing:
def __init__(self, datadir, errorfile):
self.datadir = datadir
self.errorfile = errorfile
#pytest.fixture(params=['test1.log', 'test2.log'])
def thing(request):
with tempfile.TemporaryDirectory() as datadir:
errorfile = os.path.join(datadir, request.param)
yield Thing(datadir=datadir, errorfile=errorfile)
def test_thing_datadir(thing):
assert os.path.exists(thing.datadir)

Temporary directories and files are handled by pytest using the built in fixtures tmpdir and tmpdir_factory.
For this usage, tmpdir should be sufficient: https://docs.pytest.org/en/latest/tmpdir.html
Also, paramertrized fixtures would work well for this example.
These are documented here: https://docs.pytest.org/en/latest/fixture.html#fixture-parametrize
import os
import pytest
class Thing:
def __init__(self, datadir, errorfile):
self.datadir = datadir
self.errorfile = errorfile
#pytest.fixture(params=(1, 2))
def thing(request, tmpdir):
errorfile_name = 'testlog{}.log'.format(request.param)
errorfile = tmpdir.join(errorfile_name)
return Thing(datadir=str(tmpdir), errorfile=str(errorfile))
def test_attr(request, thing):
assert os.path.exists(thing.datadir)
BTW, In Python Testing with pytest, parametrized fixtures are covered in ch3. tmpdir and other built in fixtures are covered in ch4.

I see your problem but I'm not sure about the solution. The problem:
Your functions thing1 and thing2 contain yield statements. When you call a function like that, the returned value is a "generator object." It's an iterator - a sequence of values, which is of course not the same thing as the first value of yield, or any one particular value.
Those are the objects being passed to your test_attr function. The test environment is doing that for you automagically, or at least I think that's how it works.
What you really want is the object created in your yield expression, in other words, Thing(datadir=datadir, errorfile=errorfile). There are three ways to get a generator to emit its individual values: by calling next(iter), by calling iter.__next__() or by using the iterator in a loop with an in expression.
One possibility is to iterate the generator once. Like this:
def test_attr(thing):
first_thing = next(thing)
print(first_thing.datadir)
assert os.path.exists(first_thing.datadir)
first_thing will be the object you want to test, i.e., Thing(datadir=datadir, errorfile=errorfile).
But this is only the first hurdle. The generator function is not finished. Its internal "program counter" is just after the yield statement. So you haven't exited the context manager and haven't deleted your temporary directory yet. To do this you must call next(thing) again and catch a StopIteration exception.
Alternatively I think this will work:
def test_attr(thing):
for a_thing in thing:
print(a_thing.datadir)
assert os.path.exists(a_thing.datadir)
The in expression loops through all the items in the iterator (there's only one) and exits gracefully when StopIteration occurs. The function exits from the context manager and your work is done.
To me it's an open question whether this makes your code more or less readable and maintainable. It's a bit clumsy.

Related

How to have a teardown function in module scope based on running test result

I want to clean up some files after all tests pass. If they fail, keep them for debug. I read https://docs.pytest.org/en/latest/example/simple.html#making-test-result-information-available-in-fixtures so I have the following in my conftest.py:
#pytest.hookimpl(tryfirst=True, hookwrapper=True)
def pytest_runtest_makereport(item, call):
# execute all other hooks to obtain the report object
outcome = yield
rep = outcome.get_result()
# set a report attribute for each phase of a call, which can
# be "setup", "call", "teardown"
setattr(item, "rep_" + rep.when, rep)
#pytest.fixture(scope="module", autouse=True)
def teardown(request):
yield
# request.node is an "item" because we use the default
# "function" scope
if request.node.rep_setup.failed:
print("setting up a test failed!", request.node.nodeid)
elif request.node.rep_setup.passed:
#clean up my files
however, I got the error:
AttributeError: 'Module' object has no attribute 'rep_setup'
The only difference from doc example is that my teardown has 'scope=module'. But I have to do this because I want to clean up files after all tests pass, some files are used by all tests. If I use the default scope which is 'function' level, it will clean up after each test case rather than after the whole module. How can I fix this?
Update: Before I had 'hook', I still had the teardown which is "module" level, and it worked fine, meaning it cleaned up all files for me after all tests running, the only problem is that it will clean up for me no matter tests pass or fail.
If you are in module scope, request.node represents the module, not a single test. If you want just check for failed tests, you can check the session:
#pytest.fixture(scope="module", autouse=True)
def teardown(request):
yield
if request.session.testsfailed > 0:
print(f"{} test(s) failed!", request.session.testsfailed)
else:
# clean up my files
I'm not sure if there is any information about setup failures in the request at this point, if you are only interested in these.
In this case you could implement a file scoped fixture which sets a flag in case of a setup failure, and use that, something like:
SETUP_FAILED = False
#pytest.fixture(autouse=True)
def teardown_test(request):
yield
if request.node.rep_setup.failed:
global SETUP_FAILED
SETUP_FAILED = True
#pytest.fixture(scope="module", autouse=True)
def teardown_module():
global SETUP_FAILED
SETUP_FAILED = False
yield
if SETUP_FAILED:
print("At least one test setup failed!")
else:
# clean up my files
This is not nice, and maybe someone knows a better solution, but it will work.
You could also collect information about the tests where the setup failed if needed.

How to get DRYer tests for simple function using python glob?

I have a function that searches for a file in current location, then in Download folder and if not found, raises and error. Using pytest and pytest-mock, I was able to test the code with a code much bigger than the tested one. Is there a way to make this tighter/DRYer?
Tested code:
# cei.py
import glob
import os
def get_xls_filename() -> str:
""" Returns first xls filename in current folder or Downloads folder """
csv_filenames = glob.glob("InfoCEI*.xls")
if csv_filenames:
return csv_filenames[0]
home = os.path.expanduser("~")
csv_filenames = glob.glob(home + "/Downloads/InfoCEI*.xls")
if csv_filenames:
return csv_filenames[0]
return sys.exit(
"Error: file not found."
)
There are three test scenarios here. Found in current, found in downloads and not found.
Test code:
# test_cei.py
from unittest.mock import Mock
import pytest
from pytest_mock import MockFixture
import cei
#pytest.fixture
def mock_glob_glob_none(mocker: MockFixture) -> Mock:
"""Fixture for mocking glob.glob."""
mock = mocker.patch("glob.glob")
mock.return_value = []
return mock
#pytest.fixture
def mock_os_path_expanduser(mocker: MockFixture) -> Mock:
"""Fixture for mocking os.path.expanduser."""
mock = mocker.patch("os.path.expanduser")
mock.return_value = "/home/user"
return mock
def test_get_xls_filename_not_found(mock_glob_glob_none, mock_os_path_expanduser) -> None:
with pytest.raises(SystemExit):
assert cei.get_xls_filename()
mock_glob_glob_none.assert_called()
mock_os_path_expanduser.assert_called_once()
#pytest.fixture
def mock_glob_glob_found(mocker: MockFixture) -> Mock:
"""Fixture for mocking glob.glob."""
mock = mocker.patch("glob.glob")
mock.return_value = ["/my/path/InfoCEI.xls"]
return mock
def test_get_xls_filename_current_folder(mock_glob_glob_found) -> None:
assert cei.get_xls_filename() == "/my/path/InfoCEI.xls"
mock_glob_glob_found.assert_called_once()
#pytest.fixture
def mock_glob_glob_found_download(mocker: MockFixture) -> Mock:
"""Fixture for mocking glob.glob."""
values = {
"InfoCEI*.xls": [],
"/home/user/Downloads/InfoCEI*.xls": ["/home/user/Downloads/InfoCEI.xls"],
}
def side_effect(arg):
return values[arg]
mock = mocker.patch("glob.glob")
mock.side_effect = side_effect
return mock
def test_get_xls_filename_download_folder(
mock_glob_glob_found_download, mock_os_path_expanduser
) -> None:
assert cei.get_xls_filename() == "/home/user/Downloads/InfoCEI.xls"
mock_os_path_expanduser.assert_called_once()
mock_glob_glob_found_download.assert_called_with(
"/home/user/Downloads/InfoCEI*.xls"
)
This is obviously a bit opinion-based, but I'll try.
First, there is nothing wrong with the tests being larger than the tested code. Depending on the number of tested use cases, this can easily happen, and I wouldn't use this a criterion for test quality.
That being said, your tests shall usually test the API / interface, which in this case is the returned file path under different conditions. Testing if os.path.expanduser has been called is part of the internal implementation that may not be stable - I would not consider that a good thing at least in this case. You already test the most relevant use cases (a test for having files in both locations might be added), where these internals are used.
Here is what I would probably do:
import os
import pytest
from cei import get_xls_filename
#pytest.fixture
def cwd(fs, monkeypatch):
fs.cwd = "/my/path"
monkeypatch.setenv("HOME", "/home/user")
def test_get_xls_filename_not_found(fs, cwd) -> None:
with pytest.raises(SystemExit):
assert get_xls_filename()
def test_get_xls_filename_current_folder(fs, cwd) -> None:
fs.create_file("/my/path/InfoCEI.xls")
assert get_xls_filename() == "InfoCEI.xls" # adapted to your implementation
def test_get_xls_filename_download_folder(fs, cwd) -> None:
path = os.path.join("/home/user", "Downloads", "InfoCEI.xls")
fs.create_file(path)
assert get_xls_filename() == path
Note that I used the pyfakefs fixture fs to mock the fs (I'm a contributor to pyfakefs, so this is what I'm used to, and it makes the code a bit shorter), but this may be overkill for you.
Basically, I try to test only the API, put the common setup (here cwd and home path location) into a fixture (or in a setup method for xUnit-like tests), and add the test-specific setup (creation of the test file) to the test itself.

How to check if mock functions has been called?

I'm writing unit tests for a simple function that writes bytes into s3:
import s3fs
def write_bytes_as_csv_to_s3(payload, bucket, key):
fs = s3fs.S3FileSystem()
fname = f"{bucket}/{key}"
print(f"writing {len(payload)} bytes to {fname}")
with fs.open(fname, "wb") as f:
f.write(payload)
return fname
def test_write_bytes_as_csv_to_s3(mocker):
s3fs_mock = mocker.patch('s3fs.S3FileSystem')
open_mock = mocker.MagicMock()
# write_mock = mocker.MagicMock()
# open_mock.write.return_value = write_mock
s3fs_mock.open.invoke.return_value = open_mock
result = write_bytes_as_csv_to_s3('awesome'.encode(), 'random', 'key')
assert result == 'random/key'
s3fs_mock.assert_called_once()
open_mock.assert_called_once()
# write_mock.assert_called_once()
How can I check if method open and write has been called once? Not sure how to set mocker to cover my case.
The unit-test you written above is perfect and mostly covered all the functionality of the methods which you want to test.
In pytest, there is a functionality to get the unittest coverage report which will show the lines covered by unittest.
Kindly install the pytest plugin html-report(if not installed) and execute the following document:-
py.test --cov=<filename to cover: unittest> --cov-report=html <testfile>
After that, you would likely found a html file in the current location o/r in the htmlconv/ directory. And from that, you could easily figure it out about the line covered and also the percentage of the unittest test coverage.
The issue is understanding how each mock is created and what exactly it mocks. For example mocker.patch('s3fs.S3FileSystem') returns a mock of s3fs.S3FileSystem, not the instance returned by calling s3fs.S3FileSystem(). Then to mock with fs.open(fname, "wb") as f you need to mock what the __enter__ dunder method returns. Hopefully the following code makes the relations clear:
def test_write_bytes_as_csv_to_s3(mocker):
# Mock of the open's context manager
open_cm_mock = mocker.MagicMock()
# Mock of the object returned by fs.open()
open_mock = mocker.MagicMock()
open_mock.__enter__.return_value = open_cm_mock
# Mock of the new instance returned by s3fs.S3FileSystem()
fs_mock = mocker.MagicMock()
fs_mock.open.return_value = open_mock
# Patching of s3fs.S3FileSystem
mocker.patch('s3fs.S3FileSystem').return_value = fs_mock
# Running the tested code and making assertions
result = write_bytes_as_csv_to_s3('awesome'.encode(), 'random', 'key')
assert result == 'random/key'
assert open_cm_mock.write.call_count == 1

All my test functions are loading a fixture that is in the conftest.py, even when they don't need it

I have 2 different test files and some fixtures in my conftest.py:
1)"Test_dummy.py" which contains this function:
def test_nothing():
return 1
2)"Test_file.py". which contains this function:
def test_run(excelvalidation_io):
dfInput, expectedOutput=excelvalidation_io
output=run(dfInput)
for key, df in expectedOutput.items():
expected=df.fillna(0)
real=output[key].fillna(0)
assert expected.equals(real)
3)"conftest.py" which contains these fixtures:
def pytest_generate_tests(metafunc):
inputfiles=glob.glob(DATADIR+"**_input.csv", recursive=False)
iofiles=[(ifile, getoutput(ifile)) for ifile in
inputfiles]
metafunc.parametrize("csvio", iofiles)
#pytest.fixture
def excelvalidation_io(csvio):
dfInput, expectedOutput= csvio
return(dfInput, expectedOutput)
#pytest.fixture
def client():
client = app.test_client()
return client
When i run the tests, "Test_dummy.py" also tries to load the "excelvalidation_io" fixture and it generates error:
In test_nothing: function uses no argument 'csvio'
I have tried to place just the fixture inside the "Test_file.py" and the problem is solved, but i read that it's a good practice to locate all the fixtures in the conftest file.
The function pytest_generate_tests is a special function that is always called before executing any test, so in this case you need to check if the metafunc accepts an argument named "csvio" and do nothing otherwise, as in:
def pytest_generate_tests(metafunc):
if "excelvalidation_io" in metafunc.fixturenames:
inputfiles=glob.glob(DATADIR+"**_input.csv", recursive=False)
iofiles=[(ifile, getoutput(ifile)) for ifile in
inputfiles]
metafunc.parametrize("csvio", iofiles)
Source

How do I mock an open used in a with statement (using the Mock framework in Python)?

How do I test the following code with unittest.mock:
def testme(filepath):
with open(filepath) as f:
return f.read()
Python 3
Patch builtins.open and use mock_open, which is part of the mock framework. patch used as a context manager returns the object used to replace the patched one:
from unittest.mock import patch, mock_open
with patch("builtins.open", mock_open(read_data="data")) as mock_file:
assert open("path/to/open").read() == "data"
mock_file.assert_called_with("path/to/open")
If you want to use patch as a decorator, using mock_open()'s result as the new= argument to patch can be a little bit weird. Instead, use patch's new_callable= argument and remember that every extra argument that patch doesn't use will be passed to the new_callable function, as described in the patch documentation:
patch() takes arbitrary keyword arguments. These will be passed to the Mock (or new_callable) on construction.
#patch("builtins.open", new_callable=mock_open, read_data="data")
def test_patch(mock_file):
assert open("path/to/open").read() == "data"
mock_file.assert_called_with("path/to/open")
Remember that in this case patch will pass the mocked object as an argument to your test function.
Python 2
You need to patch __builtin__.open instead of builtins.open and mock is not part of unittest, you need to pip install and import it separately:
from mock import patch, mock_open
with patch("__builtin__.open", mock_open(read_data="data")) as mock_file:
assert open("path/to/open").read() == "data"
mock_file.assert_called_with("path/to/open")
The way to do this has changed in mock 0.7.0 which finally supports mocking the python protocol methods (magic methods), particularly using the MagicMock:
http://www.voidspace.org.uk/python/mock/magicmock.html
An example of mocking open as a context manager (from the examples page in the mock documentation):
>>> open_name = '%s.open' % __name__
>>> with patch(open_name, create=True) as mock_open:
... mock_open.return_value = MagicMock(spec=file)
...
... with open('/some/path', 'w') as f:
... f.write('something')
...
<mock.Mock object at 0x...>
>>> file_handle = mock_open.return_value.__enter__.return_value
>>> file_handle.write.assert_called_with('something')
With the latest versions of mock, you can use the really useful mock_open helper:
mock_open(mock=None, read_data=None)
A helper function to create a
mock to replace the use of open. It works for open called directly or
used as a context manager.
The mock argument is the mock object to configure. If None (the
default) then a MagicMock will be created for you, with the API
limited to methods or attributes available on standard file handles.
read_data is a string for the read method of the file handle to
return. This is an empty string by default.
>>> from mock import mock_open, patch
>>> m = mock_open()
>>> with patch('{}.open'.format(__name__), m, create=True):
... with open('foo', 'w') as h:
... h.write('some stuff')
>>> m.assert_called_once_with('foo', 'w')
>>> handle = m()
>>> handle.write.assert_called_once_with('some stuff')
To use mock_open for a simple file read() (the original mock_open snippet already given on this page is geared more for write):
my_text = "some text to return when read() is called on the file object"
mocked_open_function = mock.mock_open(read_data=my_text)
with mock.patch("__builtin__.open", mocked_open_function):
with open("any_string") as f:
print f.read()
Note as per docs for mock_open, this is specifically for read(), so won't work with common patterns like for line in f, for example.
Uses python 2.6.6 / mock 1.0.1
The top answer is useful but I expanded on it a bit.
If you want to set the value of your file object (the f in as f) based on the arguments passed to open() here's one way to do it:
def save_arg_return_data(*args, **kwargs):
mm = MagicMock(spec=file)
mm.__enter__.return_value = do_something_with_data(*args, **kwargs)
return mm
m = MagicMock()
m.side_effect = save_arg_return_array_of_data
# if your open() call is in the file mymodule.animals
# use mymodule.animals as name_of_called_file
open_name = '%s.open' % name_of_called_file
with patch(open_name, m, create=True):
#do testing here
Basically, open() will return an object and with will call __enter__() on that object.
To mock properly, we must mock open() to return a mock object. That mock object should then mock the __enter__() call on it (MagicMock will do this for us) to return the mock data/file object we want (hence mm.__enter__.return_value). Doing this with 2 mocks the way above allows us to capture the arguments passed to open() and pass them to our do_something_with_data method.
I passed an entire mock file as a string to open() and my do_something_with_data looked like this:
def do_something_with_data(*args, **kwargs):
return args[0].split("\n")
This transforms the string into a list so you can do the following as you would with a normal file:
for line in file:
#do action
I might be a bit late to the game, but this worked for me when calling open in another module without having to create a new file.
test.py
import unittest
from mock import Mock, patch, mock_open
from MyObj import MyObj
class TestObj(unittest.TestCase):
open_ = mock_open()
with patch.object(__builtin__, "open", open_):
ref = MyObj()
ref.save("myfile.txt")
assert open_.call_args_list == [call("myfile.txt", "wb")]
MyObj.py
class MyObj(object):
def save(self, filename):
with open(filename, "wb") as f:
f.write("sample text")
By patching the open function inside the __builtin__ module to my mock_open(), I can mock writing to a file without creating one.
Note: If you are using a module that uses cython, or your program depends on cython in any way, you will need to import cython's __builtin__ module by including import __builtin__ at the top of your file. You will not be able to mock the universal __builtin__ if you are using cython.
If you don't need any file further, you can decorate the test method:
#patch('builtins.open', mock_open(read_data="data"))
def test_testme():
result = testeme()
assert result == "data"
To patch the built-in open() function with unittest:
This worked for a patch to read a json config.
class ObjectUnderTest:
def __init__(self, filename: str):
with open(filename, 'r') as f:
dict_content = json.load(f)
The mocked object is the io.TextIOWrapper object returned by the open() function
#patch("<src.where.object.is.used>.open",
return_value=io.TextIOWrapper(io.BufferedReader(io.BytesIO(b'{"test_key": "test_value"}'))))
def test_object_function_under_test(self, mocker):
I'm using pytest in my case, and the good news is that in Python 3 the unittest library can also be imported and used without issue.
Here is my approach. First, I create a conftest.py file with reusable pytest fixture(s):
from functools import cache
from unittest.mock import MagicMock, mock_open
import pytest
from pytest_mock import MockerFixture
class FileMock(MagicMock):
def __init__(self, mocker: MagicMock = None, **kwargs):
super().__init__(**kwargs)
if mocker:
self.__dict__ = mocker.__dict__
# configure mock object to replace the use of open(...)
# note: this is useful in scenarios where data is written out
_ = mock_open(mock=self)
#property
def read_data(self):
return self.side_effect
#read_data.setter
def read_data(self, mock_data: str):
"""set mock data to be returned when `open(...).read()` is called."""
self.side_effect = mock_open(read_data=mock_data)
#property
#cache
def write_calls(self):
"""a list of calls made to `open().write(...)`"""
handle = self.return_value
write: MagicMock = handle.write
return write.call_args_list
#property
def write_lines(self) -> str:
"""a list of written lines (as a string)"""
return ''.join([c[0][0] for c in self.write_calls])
#pytest.fixture
def mock_file_open(mocker: MockerFixture) -> FileMock:
return FileMock(mocker.patch('builtins.open'))
Where I decided to make the read_data as a property, in order to be more pythonic. It can be assigned in a test function with whatever data that open() needs to return.
In my test file, named something like test_it_works.py, I have a following test case to confirm intended functionality:
from unittest.mock import call
def test_mock_file_open_and_read(mock_file_open):
mock_file_open.read_data = 'hello\nworld!'
with open('/my/file/here', 'r') as in_file:
assert in_file.readlines() == ['hello\n', 'world!']
mock_file_open.assert_called_with('/my/file/here', 'r')
def test_mock_file_open_and_write(mock_file_open):
with open('/out/file/here', 'w') as f:
f.write('hello\n')
f.write('world!\n')
f.write('--> testing 123 :-)')
mock_file_open.assert_called_with('/out/file/here', 'w')
assert call('world!\n') in mock_file_open.write_calls
assert mock_file_open.write_lines == """\
hello
world!
--> testing 123 :-)
""".rstrip()
Check out the gist here.
Sourced from a github snippet to patch read and write functionality in python.
The source link is over here
import configparser
import pytest
simpleconfig = """[section]\nkey = value\n\n"""
def test_monkeypatch_open_read(mockopen):
filename = 'somefile.txt'
mockopen.write(filename, simpleconfig)
parser = configparser.ConfigParser()
parser.read(filename)
assert parser.sections() == ['section']
def test_monkeypatch_open_write(mockopen):
parser = configparser.ConfigParser()
parser.add_section('section')
parser.set('section', 'key', 'value')
filename = 'somefile.txt'
parser.write(open(filename, 'wb'))
assert mockopen.read(filename) == simpleconfig
SIMPLE #patch with assert
If you're wanting to use #patch. The open() is called inside the handler and is read.
#patch("builtins.open", new_callable=mock_open, read_data="data")
def test_lambda_handler(self, mock_open_file):
lambda_handler(event, {})

Categories

Resources