I am very new to python and this is probably something trivial.
I have the following test:
import pytest
from pytest_mock import MockerFixture, call
# Create environment before importing anything from app/.
import makeenv
from data_f import balance_ledger_functions
import orm
from mock_orm import mock_nodes_db
def test_balance_ledger_process_settled(mock_nodes_db: None, mocker: MockerFixture) -> None:
settled_tranaction = created_transaction
settled_tranaction["recent_status"]["status_id"] = "4"
spy = mocker.spy(orm.Nodes, "balance_update")
assert balance_ledger_functions.balance_ledger(created_transaction) == settled_tranaction
to_node_id = settled_tranaction["to"]["id"]
amount = settled_tranaction["amount"]["amount"]
update_transaction_payload = {"balance":"{0}".format(-int(float(amount))), "is_cma" : False, "currency" : "cUSD"}
spy.assert_called_with(to_node_id, update_transaction_payload)
# fees
spy.assert_called_with(
settled_tranaction["fees"][0]["to"]["id"],
{"balance":"{0}".format(-int(float(settled_tranaction["fees"][0]["fee"])))}
)
spy.assert_called_with(
settled_tranaction["fees"][1]["to"]["id"],
{"balance":"{0}".format(-int(float(settled_tranaction["fees"][1]["fee"])))}
)
In the function that we are trying to test the order of the calls are exactly as defined in the test (with different arguments). However, the test is failing with the following error:
> spy.assert_called_with(to_node_id, update_transaction_payload)
E AssertionError: Expected call: balance_update('6156661f7c1c6b71adefbb40', {'balance': '-10000', 'is_cma': False, 'currency': 'cUSD'})
E Actual call: balance_update('559339aa86c273605ccd35df', {'balance': '5'})
Basically, it is asserting the last set of arguments.
What is the correct way to test something like that?
Tried this - didn't work either...
I created a pytest plugin to help me in those situations: pip install pytest-mock-generator
Once you install it, you'll have the mg fixture. You can put this line of code in your test and it would print and return the asserts for you: mg.generate_asserts(spy).
Here is a complete code example:
Say that you have a python file named example.py:
def hello(name: str) -> str:
return f"Hello {name}!"
Then you have this test:
import example
def test_spy_list_of_calls(mocker, mg):
example.hello("before spy")
my_spy = mocker.spy(example, "hello")
example.hello("after spy")
example.hello("another after spy")
example.hello("one more time")
mg.generate_asserts(my_spy)
The final line would print this:
from mock import call
assert 3 == my_spy.call_count
my_spy.assert_has_calls(
calls=[call('after spy'), call('another after spy'), call('one more time'), ])
Add those lines to your test and it should work:
import example
from mock import call
def test_spy_list_of_calls(mocker):
example.hello("before spy")
my_spy = mocker.spy(example, "hello")
example.hello("after spy")
example.hello("another after spy")
example.hello("one more time")
assert 3 == my_spy.call_count
my_spy.assert_has_calls(
calls=[call('after spy'), call('another after spy'), call('one more time'), ])
Related
I have created this monitoring class that updates some Counter Metrics according to some logic. I have attached the code. Please can someone explain to me why would my registry be empty even after I add the test metric.
import logging
from prometheus_client import (
CollectorRegistry,
Counter,
start_http_server
)
class Reporter:
def __init__(self):
self._set_counters()
start_http_server(8080, registry=self.registry)
def _set_counters(self):
self.registry = CollectorRegistry()
self.bycounter = Counter(
'bycounter',
'blah blah',
['by', 'level0top'],
registry=self.registry
)
self.bycounter.labels(by='test', level0top='test').inc()
I am trying to test the metrics like
import unittest
from sc_eol.monitoring import TodayDataReporter
from sc_eol.sc_eol_utils import generate_query_url
reporter = TodayDataReporter()
class TestTodayDataReporter(unittest.TestCase):
#staticmethod
def test_publish():
by = 'level1'
parse_query = {'level0top' : 'WSJ2', 'date' : '2021-11-01'}
start = '2021-11-01'
print(dir(reporter.registry))
reporter.registry.collect()
before = reporter.registry.get_sample_value('bycounter', ['level1', 'WSJ2'])
print("BEFOREEE", before)
reporter.registry.collect()
generate_query_url(by, start, parse_query, reporter)
before = reporter.registry.get_sample_value('bycounter', {'by':'level1', 'level0top': 'WSJ2'})
reporter.registry.collect()
print("After", before)
if __name__ == "__main__":
unittest.main()
Why is the bycounter None?
How do I test if a server is running at port 8080 or not
before = reporter.registry.get_sample_value('bycounter', ['level1', 'WSJ2'])
The counter is being renamed as bycounter_total
i have a function in python in athena.py file
#retry(stop_max_attempt_number=10,
wait_exponential_multiplier=300,
wait_exponential_max=1 * 60 * 1000)
def poll_status(_id):
result = client.get_query_execution(QueryExecutionId=_id)
state = result['QueryExecution']['Status']['State']
if state == 'SUCCEEDED':
return result
elif state == 'FAILED':
return result
else:
raise Exception
where client is reference of boto3.client like this:
client = boto3.client('athena', 'us-west-2')
i have written the unit test for this function using unittest in test_athena.py
#mock.patch('boto3.client')
def test_poll_status(self, mock_client):
event1 = {'QueryExecution': {'Status': {'State': 'SUCCEEDED'}}}
instance = mock_client.return_value()
instance.get_query_execution.return_value = event1
result = athena.poll_status('id')
expected_result = event1
self.assertEqual(expected_result, result)
But this fails. I don't know the reason as i wrote test cases for other functions in similar fashion but this one does not work.
"botocore.exceptions.NoCredentialsError: Unable to locate credentials"
This error is thrown
imports in athena.py
import boto3
from retrying import retry
imports in test_athena.py
import unittest
from unittest import mock
Here's a way to make your test pass - or fail fast.
The fail-fast part is because I'm mocking the retry piece.
Then, I'm mocking a specific function of the client object.
I'm attaching a complete file - including the original code, the test, and a 'main'. It passes on my machine.
import boto3
from retrying import retry
import retrying
import unittest
from unittest import mock
client = boto3.client('athena', 'us-west-2')
#retry(stop_max_attempt_number=10,
wait_exponential_multiplier=300,
wait_exponential_max=1 * 60 * 1000)
def poll_status(_id):
result = client.get_query_execution(QueryExecutionId=_id)
state = result['QueryExecution']['Status']['State']
if state == 'SUCCEEDED':
return result
elif state == 'FAILED':
return result
else:
raise Exception
dummy_retry = retrying.Retrying(stop_max_attempt_number = 10, wait_fixed=1)
class MyTests(unittest.TestCase):
#mock.patch('retrying.Retrying', new = lambda **kwargs: dummy_retry)
#mock.patch.object(client, 'get_query_execution')
def test_poll_status(self, mock_client):
event1 = {'QueryExecution': {'Status': {'State': 'SUCCEEDED'}}}
mock_client.return_value = event1
result = poll_status('id')
expected_result = event1
self.assertEqual(expected_result, result)
if __name__ == '__main__':
unittest.main()
Is you system configured with the aws cli. It seems like you might not have configured aws cli with appropriate access key id and secret key.
I have the following test
from unittest.mock import ANY
try:
from unittest import mock # python 3.3+
except ImportError:
import mock # python 2.6-3.2
import pytest
from data_cleaning import __main__ as data_cleaning
#mock.patch('Repositories.repository.Repository.delete')
#pytest.mark.parametrize("argv", [
(['-t', 'signals']),
(['-t', 'signals', 'visualizations']),
(['-t', 'signals', 'visualizations']),
(['-d', '40', '--processes', 'historicals'])])
def test_get_from_user(mock_delete, argv):
with mock.patch('data_cleaning.__main__.sys.argv', [''] + argv):
data_cleaning.main()
mock_delete.assert_has_calls([ANY])
pytest.main('-x ../data_cleaning'.split())
which tries to cover the following code
import argparse
import logging
import sys
from Repositories import repository
from common import config
_JSONCONFIG = config.config_json()
config.initial_configuration_for_logging()
def parse_args(args):
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--days', help='Dias de datos que se quiere mantener.',
required=False, type=int, default=30)
parser.add_argument('-p', '--processes', help='Tablas a procesar.',
required=True, nargs='+', choices=['signals', 'not_historicals', 'historicals'])
return parser.parse_args(args)
def main():
args = parse_args(sys.argv[1:])
try:
repo = repository.Repository()
for process in args.processes:
if process == 'signals':
tables_with_column = get_tables_with_column(_JSONCONFIG['SIGNAL_TABLES'])
for table, column in tables_with_column:
repo.delete(column, table, args.days)
elif process == 'not_historicals':
tables_with_column = get_tables_with_column(_JSONCONFIG['NOT_HISTORICALS_TABLES'])
for table, column in tables_with_column:
repo.delete(column, table, args.days)
elif process == 'historicals':
tables_with_column = get_tables_with_column(_JSONCONFIG['HISTORICAL_TABLES'])
for table, column in tables_with_column:
repo.delete(column, table, args.days)
repo.execute_copy_table_data_from(table, 'historica')
except AttributeError as error:
logging.exception(f'AttributeError: {repr(error)}')
except KeyError as error:
logging.exception(f'KeyError: {repr(error)}')
except TypeError as error:
logging.exception(f'TypeError: {repr(error)}')
except Exception as error:
logging.exception(f'Exception: {repr(error)}')
def get_tables_with_column(json_object):
tables_with_column = convert_values_to_pairs_from(json_object, 'table_name', 'column_name')
return tables_with_column
def convert_values_to_pairs_from(obj: [dict], ket_to_key: str, key_to_value: str) -> [tuple]:
return [(item[ket_to_key], item[key_to_value]) for item in obj]
How can I cover 100% of my code through tests? the test case that specified how well implemented is it? What do I have to cover in my tests so that this module is fully covered?
How should I cover testing for this code? I am starting in unit tests, but I have been more than 2 months and I have a hard time understanding what I should test.
You can use the assert_called functions to assert if that particular method was called or not.
In your case, this could look something like this:
#mock.patch('Repositories.repository.Repository')
#pytest.mark.parametrize("argv", ...)
def test_get_from_user(mock_repository, argv):
with mock.patch('data_cleaning.__main__.sys.argv', [''] + argv):
data_cleaning.main()
mock_repository.delete.assert_called_once()
I am using pytest to validate db data. I am generating a html report which shows test case result,For failed case it only shows Assertion error, But i need the break point where the test case fails. Can anyone help with this?
Main_methods.py
import pymongo
import re
import unittest
import pytest
class Main_methods():
def minlength(self,data,category_name,min_length):
''' validate the minimum length condition by comparing with db data for a given category
Parameter: Db data, category name, minium length '''
for name in data:
len(name[category_name])>=min_length
test_case.py
mport pymongo
import re
import unittest
import pytest
from main_method import Main_methods
class_object=Main_methods()
#pytest.fixture
def data():
'''Initialise the variable for all the methods using this method and returns the value after the yield keyword.'''
myclient = pymongo.MongoClient("mongodb://root:mongodbadmin#18.223.241.113:27017")
mydb = myclient["Ecomm_Product_db"]
mycol = mydb["products"]
yield mycol.find({})
class Test_Category_Name():
def test_minlength(self,data):
assert class_object.minlength(data,'category',5)
Actual result
def test_minlength(self,data):
> assert class_object.minlength(data,'category',5)
E AssertionError: assert None
E + where None = <bound method Main_methods.minlength of <main_method.Main_methods object at 0x0326B670>>(<pymongo.cursor.Cursor object at 0x0346A230>, 'category', 5)
E + where <bound method Main_methods.minlength of <main_method.Main_methods object at 0x0326B670>> = <main_method.Main_methods object at 0x0326B670>.minlength
testcase.py:20: AssertionError
Result i need(Expected)
def test_category_minlength(data):
'''Asserts given min length condition for category name '''
for name in data:
> assert len(name['category'])>=5
E AssertionError: assert 3 >= 5
E + where 3 = len('SSD')
I'd like to add metadata to individual tests in a TestCase that I've written to use Python's unittest framework. The metadata (a string, really) needs to be carried through the testing process and output to an XML file.
Other than remaining with the test the data isn't going to be used by unittest, nor my test code. (I've got a program that will run afterwards, open the XML file, and go looking for the metadata/string).
I've previously used NUnit which allows one to use C# attribute to do this. Specifically, you can put this above a class:
[Property("SmartArrayAOD", -3)]
and then later find that in the XML output.
Is it possible to attach metadata to a test in Python's unittest?
Simple way for just dumping XML
If all you want to do is write stuff to an XML file after every unit test, just add a tearDown method to your test class (e.g. if you have , give it a).
class MyTest(unittest.TestCase):
def tearDown(self):
dump_xml_however_you_do()
def test_whatever(self):
pass
General method
If you want a general way to collect and track metadata from all your tests and return it at the end, try creating an astropy table in your test class's __init__() and adding rows to it during tearDown(), then extracting a reference to your initialized instances of your test class from unittest, like this:
Step 1: set up a re-usable subclass of unittest.TestCase so we don't have to duplicate the table handling
(put all the example code in the same file or copy the imports)
"""
Demonstration of adding and retrieving meta data from python unittest tests
"""
import sys
import warnings
import unittest
import copy
import time
import astropy
import astropy.table
if sys.version_info < (3, 0):
from StringIO import StringIO
else:
from io import StringIO
class DemoTest(unittest.TestCase):
"""
Demonstrates setup of an astropy table in __init__, adding data to the table in tearDown
"""
def __init__(self, *args, **kwargs):
super(DemoTest, self).__init__(*args, **kwargs)
# Storing results in a list made it convenient to aggregate them later
self.results_tables = [astropy.table.Table(
names=('Name', 'Result', 'Time', 'Notes'),
dtype=('S50', 'S30', 'f8', 'S50'),
)]
self.results_tables[0]['Time'].unit = 'ms'
self.results_tables[0]['Time'].format = '0.3e'
self.test_timing_t0 = 0
self.test_timing_t1 = 0
def setUp(self):
self.test_timing_t0 = time.time()
def tearDown(self):
test_name = '.'.join(self.id().split('.')[-2:])
self.test_timing_t1 = time.time()
dt = self.test_timing_t1 - self.test_timing_t0
# Check for errors/failures in order to get state & description. https://stackoverflow.com/a/39606065/6605826
if hasattr(self, '_outcome'): # Python 3.4+
result = self.defaultTestResult() # these 2 methods have no side effects
self._feedErrorsToResult(result, self._outcome.errors)
problem = result.errors or result.failures
state = not problem
if result.errors:
exc_note = result.errors[0][1].split('\n')[-2]
elif result.failures:
exc_note = result.failures[0][1].split('\n')[-2]
else:
exc_note = ''
else: # Python 3.2 - 3.3 or 3.0 - 3.1 and 2.7
# result = getattr(self, '_outcomeForDoCleanups', self._resultForDoCleanups) # DOESN'T WORK RELIABLY
# This is probably only good for python 2.x, meaning python 3.0, 3.1, 3.2, 3.3 are not supported.
exc_type, exc_value, exc_traceback = sys.exc_info()
state = exc_type is None
exc_note = '' if exc_value is None else '{}: {}'.format(exc_type.__name__, exc_value)
# Add a row to the results table
self.results_tables[0].add_row()
self.results_tables[0][-1]['Time'] = dt*1000 # Convert to ms
self.results_tables[0][-1]['Result'] = 'pass' if state else 'FAIL'
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=astropy.table.StringTruncateWarning)
self.results_tables[0][-1]['Name'] = test_name
self.results_tables[0][-1]['Notes'] = exc_note
Step 2: set up a test manager that extracts metadata
def manage_tests(tests):
"""
Function for running tests and extracting meta data
:param tests: list of classes sub-classed from DemoTest
:return: (TextTestResult, Table, string)
result returned by unittest
astropy table
string: formatted version of the table
"""
table_sorting_columns = ['Result', 'Time']
# Build test suite
suite_list = []
for test in tests:
suite_list.append(unittest.TestLoader().loadTestsFromTestCase(test))
combo_suite = unittest.TestSuite(suite_list)
# Run tests
results = [unittest.TextTestRunner(verbosity=1, stream=StringIO(), failfast=False).run(combo_suite)]
# Catch test classes
suite_tests = []
for suite in suite_list:
suite_tests += suite._tests
# Collect results tables
results_tables = []
for suite_test in suite_tests:
if getattr(suite_test, 'results_tables', [None])[0] is not None:
results_tables += copy.copy(suite_test.results_tables)
# Process tables, if any
if len(results_tables):
a = []
while (len(a) == 0) and len(results_tables):
a = results_tables.pop(0) # Skip empty tables, if any
results_table = a
for rt in results_tables:
if len(rt):
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=DeprecationWarning)
results_table = astropy.table.join(results_table, rt, join_type='outer')
try:
results_table = results_table.group_by(table_sorting_columns)
except Exception:
print('Error sorting test results table. Columns may not be in the preferred order.')
column_names = list(results_table.columns.keys())
alignments = ['<' if cn == 'Notes' else '>' for cn in column_names]
if len(results_table):
rtf = '\n'.join(results_table.pformat(align=alignments, max_width=-1))
exp_res = sum([result.testsRun - len(result.skipped) for result in results])
if len(results_table) != exp_res:
print('ERROR forming results table. Expected {} results, but table length is {}.'.format(
exp_res, len(results_table),
))
else:
rtf = None
else:
results_table = rtf = None
return results, results_table, rtf
Step 3: Example usage
class FunTest1(DemoTest):
#staticmethod
def test_pass_1():
pass
#staticmethod
def test_fail_1():
assert False, 'Meant to fail for demo 1'
class FunTest2(DemoTest):
#staticmethod
def test_pass_2():
pass
#staticmethod
def test_fail_2():
assert False, 'Meant to fail for demo 2'
res, tab, form = manage_tests([FunTest1, FunTest2])
print(form)
print('')
for r in res:
print(r)
for error in r.errors:
print(error[0])
print(error[1])
Sample results:
$ python unittest_metadata.py
Name Result Time Notes
ms
-------------------- ------ --------- ----------------------------------------
FunTest2.test_fail_2 FAIL 5.412e-02 AssertionError: Meant to fail for demo 2
FunTest1.test_fail_1 FAIL 1.118e-01 AssertionError: Meant to fail for demo 1
FunTest2.test_pass_2 pass 6.199e-03
FunTest1.test_pass_1 pass 6.914e-03
<unittest.runner.TextTestResult run=4 errors=0 failures=2>
Should work with python 2.7 or 3.7. You can add whatever columns you want to the table. You can add parameters and stuff to the table in setUp, tearDown, or even during the tests.
Warnings:
This solution accesses a protected attribute _tests of unittest.suite.TestSuite, which can have unexpected results. This specific implementation works as expected for me in python2.7 and python3.7, but slight variations on how the suite is built and interrogated can easily lead to strange things happening. I couldn't figure out a different way to extract references to the instances of my classes that unittest uses, though.