Python mock.patch() not mocking imported class module - python

I have a some code that looks like this (abridged):
## ./module_1/lambda_function.py
from shared.graphql_helpers import GraphQLClient
gql_client = GraphQLClient('http://host_url.test/')
post_event_to_consumer(event):
response = gql_client.make_query(mutation, { 'data': event, {})
lambda_hanbdler(event, context):
post_event_to_consumer(event['detail']
The module imported is such:
## ./shared/graphql_helpers.py
import requests
class GraphQLClient:
def __init__(self, url):
self.host_url = url
def make_query(self, query, variables, headers):
request = requests.post(self.host_url, json={'query': query, 'variables': variables}, headers=headers)
if request.status_code != 200:
raise Exception()
elif request.json()['errors']:
raise Exception()
else:
return request.json()['data']
The project structure is such:
module_1
__init__.py
lambda_function.py
lambda_function_test.py
shared
__init__.py
graphql_helpers.py
My problem is that I am trying to patch GraphQLClient in lambda_function_test.py but it does not seem to be working.
My unit test looks like this:
## ./module_1/lambda_function_test.py
import os
from unittest import mock, TestCase
class TestLambdaFunction(TestCase):
#mock.patch('module_1.lambda_function.GraphQLClient')
def test_post_event_to_compplex(self, mock_gql_client):
mock_gql_client = mock.Mock()
mock_gql_client.make_query.return_value = {}
result = post_event_to_consumer(json_data['detail'])
assert result == None
From what I read online, patch mocks the import in the SUT's namespace, however, when I run the test, it imports the original GraphQLClient and throws a ConnectionError. I also tried removing module_1 from the path but that didn't work either.
Not sure what's incorrect here, but any help would be appreciated.

Related

Unit test for Python AWS Lambda: mock function before module is imported

I'm trying to write unit tests for my aws lambda function written in python 3.9. I tried different things to mock the get_object function that makes calls to the S3. I wanted to focus only on the calculate method, to verify if I'm getting correct results of an calculation.
When I try to run the following approach I'm getting credential errors about boto3
python -m unittest tests/app-test.py
...
botocore.exceptions.NoCredentialsError: Unable to locate credentials
Is there a way to import the calculate method from app.py and mock call to the get_object fn?
directory:
functions:
- __init__.py
- app.py
tests:
- __init__.py
- app-test.py
lambda function app.py:
import json
import boto3
def get_object():
s3 = boto3.client('s3')
response = s3.get_object(Bucket='mybucket', Key='object.json')
content = response['Body'].read().decode('utf-8')
return json.loads(content)
stops = get_object()
def lambda_handler(event, context):
params = event['queryStringParameters']
a = int(params['a'])
b = int(params['b'])
result = calculate(a, b)
return {
'statusCode': 200,
'body': json.dumps(result)
}
def calculate(a, b):
return a + b
unit test app-test.py:
import unittest
from unittest import mock
with mock.patch('functions.app.get_object', return_value={}):
from functions.app import calculate
class TestCalculation(unittest.TestCase):
def test_should_return_correct_calculation(self):
# when
result = calculate(1, 2)
# then
self.assertEqual(3, result)
I was able to fix the issue. The biggest obstacle was to mock the boto3 in the app.py. I did this, by mocking the whole boto3 module before it's imported. Here's the code of app-test.py
import sys
from io import BytesIO
from json import dumps
from unittest import TestCase, main
from unittest.mock import Mock
from botocore.stub import Stubber
from botocore.session import get_session
from botocore.response import StreamingBody
# prepare mocks for boto3
stubbed_client = get_session().create_client('s3')
stubber = Stubber(stubbed_client)
# mock response from S3
body_encoded = dumps({'name': 'hello world'}).encode()
body = StreamingBody(BytesIO(body_encoded), len(body_encoded))
stubbed.add_response('get_object', {'Body': body})
stubber.activate()
# add mocks to the real module
sys.modules['boto3'] = Mock()
sys.modules['boto3'].client = Mock(return_value=stubbed_client)
# Import the module that will be tested
# boto3 should be mocked in the app.py
from functions.app import calculate
class TestCalculation(TestCase):
def test_should_return_correct_calculation(self):
# when
result = calculate(1, 2)
# then
self.assertEqual(3, result)

How to reference local import when patching - proper python testing structure

I have a Starlette app and I'm attempting to perform end to end testing on the entire application. A function defined in a.py is called by a function in b.py through a local import, and for testing purpose I would like to replace the function with a self defined function when testing.
Like others, I'm running into issues getting the path string to work. After looking at existing questions on stackoverflow, I think I'm supposed to be patching the reference in b.py, but run into the following error AttributeError: <function b at 0x7f5298087af0> does not have the attribute 'a'
Here's the relevant structure and code
Project_folder
- app
- lib
+ __init__.py
+ a.py
+ b.py
- handle
+ __init__.py
- test
+ test.py
+ main.py
#a.py
def a(id):
return stuff
#b.py
from .a import a
def b():
return a(some_stuff)
#lib.__init__py
from .a import a
from .b import b
routes refers to an async function which calls b somewhere in the code
#main.py
from starlette.applications import Starlette
from handle import routes
app = Starlette(routes=routes)
My attempt at test.py
from starlette.testclient import TestClient
from main import app
import pytest
from mock import patch
client = TestClient(app)
def mock_a(id):
return 'some value'
#patch('app.lib.b.a', new=mock_a)
def test_app(request):
response = client.post('/route', request)
assert response.status_code == 200
I'm very new to mocks and patching, and would appreciate any advice on how I should be setting this up
You import "a" and "b" as functions in init.py. So patch libs, not libs.a or libs.b. if you want to patch inside the module itself the take the imports out of init.py and the you can do something like this:
import libs
libs.b.b = libs.a.a

How to mock in pytest global variable in imported module

I have a module with methods, that I want to test with pytest. In my module I set up global vars such as CONFIG and SESSION etc. to use them in various places.
my_common_module.py, located in /my_project/my_common_module.py
import yaml
import os
import requests
from requests.auth import HTTPBasicAuth
def get_data(url):
response = SESSION.get(url)
if response.status_code == 200:
assets = response.json()
else:
raise RuntimeError(f'ERROR during request: {url}')
return assets
def read_config(path):
with open(path, 'r') as yaml_file:
return yaml.safe_load(yaml_file)
CONFIG = read_config(os.sys.argv[1])
SESSION = requests.Session()
SESSION.auth = HTTPBasicAuth(os.environ['USER'], os.environ['PASS'])
Run command:
python my_common_module.py config.yaml
test_my_common_module.py located in /my_project/tests/test_my_common_module.py
from my_common_module import read_config, SESSION
import pytest
def test_get_data_success(monkeypatch):
class MockResponse(object):
def __init__(self):
self.status_code = 200
#staticmethod
def json(self):
return {'name': 'value'}
def mock_get():
return MockResponse()
url = 'https://testurl.com'
monkeypatch.setattr(SESSION, 'get', mock_get)
assert get_all_assets(url) == {'name': 'value'}
When I run:
pytest tests
I got this error:
tests/test_my_common_module.py:1: in <module>
from my_common_module import get_data, SESSION
my_common_module.py:20: in <module>
CONFIG = read_config(os.sys.argv[1])
my_common_module:16: in read_config
with open(path, 'r') as yaml_file:
E IsADirectoryError: [Errno 21] Is a directory: 'tests'
==========================short test summary info ==================================
ERROR tests/test_my_common_module.py - IsADirectoryError: [Errno 21] Is a directory: 'tests'
How can I mock or patch CONFIG os.sys.argv[1] or the whole method read_config, before I imported my_commons_module in test_my_common_module.py and avoid this error?

How to mock mongodb when it is called from another function?

I need help while mocking mongodb. I am using mongomock to mock mongodb.
My project structure is:
-- my_mongo.py
-- code.py
-- my_test.py
my_mongo.py has :
from pymongo import MongoClient
def get_db():
client = MongoClient(os.environ['MONGODB_URI'])
db = client['my_db']
return db
def insert(id, data):
return get_db().results.insert_one(
{
"id": id,
"data":df.to_json(),
}).id
and code.py has
import my_mongo
def action():
#do somethings
my_mongo.insert(id, data)
and my_test.py has
import mongomock
import my_mongo
from unittest import mock
with patch.object(my_mongo.get_db().client, "client", mongomock.MongoClient()):
import code
def test_action_1():
my_mongo.insert = mock.Mock(return_value=1)
code.action()
def test_action_2():
with patch.object(my_mongo.get_db(), "get_db", mongomock.MongoClient().db):
code.action()
It throws pymongo.errors.ServerSelectionTimeoutError for both tests. So, It still goes into the insert_one() method in my_mongo.py.
I expect in test_action_1 my_mongo.insert returns 1, but it doesn't.
What am I missing?
I'm not entirely sure what mongomock is for, but it looks like it's for mocking an entire mongo database and not actually using python mocking. I'm going to answer without including mongomock since I don't think you really need it, so you can take that for what it's worth.
There were a few issues:
Calling patch.object will patch the given method on whatever object you give it. If you call get_db in the test, then code.action calls get_db, those are 2 different objects. Maybe this works? But I'm skeptical, so I just changed it.
Don't use code as your module name. That's already a module included with python.
code.action was missing args and a return statement.
You'll also notice that I changed how and what was being mocked to illustrate different ways to accomplish the mocking. Test 1 mocks the insert call with a function decorator. Test 2 mocks the get_db call with a contextmanager. Either is correct, just showing that you have options.
Here's the finished product:
my_mongo.py:
from pymongo import MongoClient
def get_db():
client = MongoClient(os.environ['MONGODB_URI'])
db = client['my_db']
return db
def insert(id, data):
return get_db().results.insert_one({"id": id, "data":data.to_json()}).id # df was undefined, updated to data
my_code.py:
import my_mongo
# I added id and data args. They were undefined
def action(id, data):
return my_mongo.insert(id, data) # I added a return here
my_test.py
from unittest import mock
import my_code
# I removed the contextmanager import. Nothing is being evaluated here that would
# need to be patched, so I'm pretty certain it has no effect
#mock.patch('my_mongo.insert')
def test_action_1(mock_insert):
expected_id = 1
mock_insert.return_value = expected_id
ret = my_code.action(expected_id, mock.Mock())
assert ret == expected_id
def test_action_2():
with mock.patch('my_mongo.get_db') as mock_get_db:
expected_id = 'some id'
mock_db = mock.Mock()
mock_db.results.insert_one.return_value.id = expected_id
mock_get_db.return_value = mock_db
ret = my_code.action(expected_id, mock.Mock())
assert ret == expected_id
That line of code for patching mongodb is wrong. Instead of using patch.object(my_mongo.get_db(), "get_db", mongomock.MongoClient().db), you should use patch.object("my_mongo.get_db", return_value=mongomock.MongoClient()['my_db']).
Following is the complete runable code for your example:
my_test.py
import mongomock
from unittest.mock import patch
import my_code
import my_mongo
def test_action_2():
mocked_mongo = mongomock.MongoClient()
with patch("my_mongo.get_db", return_value=mongomock.MongoClient()['my_db']):
my_code.action()
assert mocked_mongo.my_db.results.count_documents({'id': 'some_id'}) == 1
my_mongo.py
from pymongo import MongoClient
def get_db():
client = MongoClient(os.environ['MONGODB_URI'])
db = client['my_db']
return db
def insert(id, data):
return get_db().results.insert_one(
{
"id": id,
"data": data,
})
my_code.py
import my_mongo
def action():
#do somethings
return my_mongo.insert('some_id', '{"a": 3}')

How to correctly import object in fixture after changing env variables

I'm writing tests for the config file of a Flask application. To make sure the env variables set in the system do not influence the results of the test I'm using pytest's monkeypatch to create predictable test outcomes.
I'm testing the config file once in a 'clean' state with a fixture with no env variables set and once in a 'fake' config, with a fixture where I let monkeypatch set the variables before running the test.
Both fixtures set env variables and then import the config object before passing it on to the test function.
When the config object is loaded at the head of the document instead of inside the fixtures, both fixtures use a version based on the actual system env variables.
It seems like the second fixture does not import the config object, but reuses the one created by the cleanConfig fixture. How can I force the fixture to reimport the config object?
test_config.py:
import pytest
from config import config
class TestConfigSettings(object):
#pytest.fixture(scope='function')
def cleanConfig(config_name, monkeypatch):
def makeCleanConfig(config_name):
monkeypatch.delenv('SECRET_KEY', raising=False)
monkeypatch.delenv('DEV_DATABASE_URL', raising=False)
from config import config
configObject = config[config_name]
return configObject
return makeCleanConfig
#pytest.fixture(scope='function')
def fakeEnvConfig(config_name, monkeypatch):
def makeFakeEnvConfig(config_name):
monkeypatch.setenv('SECRET_KEY', 'fake difficult string')
monkeypatch.setenv('DEV_DATABASE_URL', 'postgresql://fake:5432/fakeDevUrl')
from config import config
configObject = config[config_name]
return configObject
return makeFakeEnvConfig
def test_configObject_withDevelopmentConfig_containsCorrectSettings(self, cleanConfig):
configObject = cleanConfig('development')
assert configObject.SECRET_KEY == 'hard to guess string'
assert configObject.DEBUG == True
assert configObject.SQLALCHEMY_DATABASE_URI == None
def test_configObject_withDevelopmentConfigAndEnvSet_copiesEnvSettings(self, fakeEnvConfig):
configObject = fakeEnvConfig('development')
assert configObject.SECRET_KEY == 'fake difficult string'
assert configObject.SQLALCHEMY_DATABASE_URI == 'postgresql://fake:5432/fakeDevUrl'
Config.py:
class Config:
SECRET_KEY = os.environ.get('SECRET_KEY') or 'hard to guess string'
class DevelopmentConfig(Config):
DEBUG = True
SQLALCHEMY_DATABASE_URI = os.environ.get('DEV_DATABASE_URL')
config = {
'default': DevelopmentConfig,
'development': DevelopmentConfig,
...
}
I finally found a solution for my problem. By using the reload() function, you can import a module again after changing the content (the loaded env variables in this case). To be able to use it I had to change the import to the config module instead of the config dictionary I was importing before, since the reload() object only works on modules.
The new code:
import pytest
from importlib import reload
import config
class TestConfigSettings(object):
#pytest.fixture(scope='function')
def cleanConfig(config_name, monkeypatch):
def makeCleanConfig(config_name):
monkeypatch.delenv('SECRET_KEY', raising=False)
monkeypatch.delenv('DEV_DATABASE_URL', raising=False)
reload(config)
configObject = config.config[config_name]
return configObject
return makeCleanConfig
#pytest.fixture(scope='function')
def fakeEnvConfig(config_name, monkeypatch):
def makeFakeEnvConfig(config_name):
monkeypatch.setenv('SECRET_KEY', 'fake difficult string')
monkeypatch.setenv('DEV_DATABASE_URL', 'postgresql://fake:5432/fakeDevUrl')
reload(config)
configObject = config.config[config_name]
return configObject
return makeFakeEnvConfig

Categories

Resources