Configuring nose tests for different test targets - python

I have a set of nose tests which I use to test a piece of hardware. For example, the test below is concerned with testing the alarm for each mode on the system:
import target
modes = ("start","stop","restart","stage1","stage2")
max_alarm_time = 10
# generate tests for testing each mode
def test_generator():
for m in modes:
yield check_alarm, m, max_alarm_time
# test alarm for a mode
def check_alarm(m, max_alarm_time):
target.set_mode(m)
assert target.alarm() < max_alarm_time
Most of my tests have this appearance where I am testing a particular function for all modes on the system.
I now wish to use the same set of tests to test a new piece of hardware which has two extra modes:
modes = ("start","stop","restart","stage1","stage2","stage3","stage4")
Of course, I want my tests to still work for the old hardware also. When running automated test I will need to hardcode, for the test environment, the hardware I am connected to.
I believe the best way to do this is to create a paramaters.py module as follows:
def init(hardware):
global max_alarm_time
global modes
max_alarm_time = 10
if hardware == "old":
modes = ("start","stop","restart","stage1","stage2")
elif hardware == "new":
modes = ("start","stop","restart","stage1","stage2","stage3","stage4")
with test_alarms.py now looking like this instead:
import target
import parameters
# generate tests for testing each mode
def test_generator():
for m in parameters.modes:
yield check_alarm, m, parameters.max_alarm_time
# test alarm for a mode
def check_alarm(m, max_alarm_time):
target.set_mode(m)
assert target.alarm() < max_alarm_time
Then in my main I have the following:
import nose
import parameters
parameters.init("new")
nose.main()
Is this a valid approach in your opinion?

An alternative way to solve a similar problem is to abuse the #attr decorator from the attribute plugin in the following way:
from nose.plugins.attrib import attr
max_alarm_time = 10
# generate tests for testing each mode
#attr(hardware='old')
#attr(modes = ("start","stop","restart","stage1","stage2"))
def test_generator_old():
for m in test_generator_old.__dict__['modes']:
yield check_alarm, m, max_alarm_time
#attr(hardware='new')
#attr(modes = ("start","stop","restart","stage1","stage2", "stage3","stage4"))
def test_generator_new():
for m in test_generator_new.__dict__['modes']:
yield check_alarm, m, max_alarm_time
# test alarm for a mode
def check_alarm(m, max_alarm_time):
print "mode=", m
You can immediately switch between 'old' and 'new',like this:
$ nosetests modes_test.py -a hardware=new -v
modes_test.test_generator_new('start', 10) ... ok
modes_test.test_generator_new('stop', 10) ... ok
modes_test.test_generator_new('restart', 10) ... ok
modes_test.test_generator_new('stage1', 10) ... ok
modes_test.test_generator_new('stage2', 10) ... ok
modes_test.test_generator_new('stage3', 10) ... ok
modes_test.test_generator_new('stage4', 10) ... ok
----------------------------------------------------------------------
Ran 7 tests in 0.020s
OK
And the old one:
$ nosetests modes_test.py -a hardware=old -v
modes_test.test_generator_old('start', 10) ... ok
modes_test.test_generator_old('stop', 10) ... ok
modes_test.test_generator_old('restart', 10) ... ok
modes_test.test_generator_old('stage1', 10) ... ok
modes_test.test_generator_old('stage2', 10) ... ok
----------------------------------------------------------------------
Ran 5 tests in 0.015s
OK
Also, although I have not played with it that much, nose-testconfig could help you to do the same trick.

Related

How to mock data returned from MongoClient() in Python

I am trying to write test case, I want to mock data object returned from MongoClient(), below is the code.
numbers.py
def get_count():
client_int = MongoClient('abc.xyz.com', port=27010)
return client_int
test_numbers.py
#patch('pymongo.MongoClient')
def test_get_count(mocked_object):
mocked_object.return_value = [{'1': 'data'}]
assert numbers.get_count() == [{'1': 'data'}] # Here i am getting Assertion Error, MongoClient!=[{'1': 'data'}]
How to make this work?? What went wrong??
First of all, you should rename your module. You can't use numbers, because it conflicts with python built-in library numbers.
You didn't patch the target correctly. You should patch MongoClient of my_numbers.py module. For more info, see where-to-patch
E.g.
my_numbers.py:
from pymongo import MongoClient
def get_count():
client_int = MongoClient('abc.xyz.com', port=27010)
return client_int
test_my_numbers.py:
import unittest
from unittest.mock import patch
import my_numbers
class TestNumbers(unittest.TestCase):
#patch('my_numbers.MongoClient')
def test_get_count(self, mocked_object):
mocked_object.return_value = [{'1': 'data'}]
assert my_numbers.get_count() == [{'1': 'data'}]
mocked_object.called_once_with_value('abc.xyz.com', port=27010)
if __name__ == '__main__':
unittest.main()
unit test result:
⚡ coverage run /Users/dulin/workspace/github.com/mrdulin/python-codelab/src/stackoverflow/66852436/test_my_numbers.py && coverage report -m --include='./src/**'
.
----------------------------------------------------------------------
Ran 1 test in 0.001s
OK
Name Stmts Miss Cover Missing
-----------------------------------------------------------------------------
src/stackoverflow/66852436/my_numbers.py 4 0 100%
src/stackoverflow/66852436/test_my_numbers.py 11 0 100%
-----------------------------------------------------------------------------
TOTAL 15 0 100%

Pytest - run same tests for different sets of input data

I have a number of functions that I want to test using pytest.
Throughout my testing, I use several input files that I specify at the top of the script:
import pytest
from mymodule.mymodule import *
test_bam = 'bam/test/test_reads_pb.bwa.bam'
sample_mapping_file = 'tests/test_sample_mapping.txt'
pb_errors_file = 'tests/data/pb_test_out.json'
pb_stats = 'tests/data/pb_stats.json'
I am then running several tests using this input:
#pytest.fixture
def options():
o, a = get_args()
return o
#pytest.fixture
def samples_dict():
d = get_sample_mapping(sample_mapping_file)
return d
#pytest.fixture
def read_stats(options, samples_dict):
stats, bam = clean_reads(options, test_bam, samples_dict)
return stats
#pytest.fixture
def clean_bam(options, samples_dict):
stats, bam = clean_reads(options, test_bam, samples_dict)
return bam
def test_errors(options, samples_dict, clean_bam):
"""Test successful return from find_errors"""
sample, genome, chrom = set_genome(options, test_bam, samples_dict)
base_data = find_errors(options, chrom, sample, clean_bam)
assert base_data
I would like to be able to run the same tests on multiple different sets of input, where test_bam, sample_mapping_file, pb_errors_file and pb_stats will all be different.
What's the best way of running the same tests on different sets of input data?
I've played around with using marks to run input-specific functions:
#pytest.mark.pd
def get_pb_data():
"""Read in all pb-related files"""
#pytest.mark.ab
def get_ab_data():
"""Read in all ab-related files"""
But this doesn't work with the fixtures that I have set up (unless I'm missing something).
Any advice would be great!
use pytest parametrize wrapper.
test_bam = 'bam/test/test_reads_pb.bwa.bam'
sample_mapping_file = 'tests/test_sample_mapping.txt'
pb_errors_file = 'tests/data/pb_test_out.json'
pb_stats = 'tests/data/pb_stats.json'
#pytest.mark.parametrize("config", [test_bam, sample_mapping_file, pb_errors_file, pb_stats])
def do_something(config):
#
It will create multiple test on every config test input and assign to config variable.
#pytest.mark.pd doesn't specify an input type, it adds pd marker to the test which can be used when running the tests, for example running all the tests marked with pd
pytest TestsFolder -m pd
If you want to run the tests on different sets of files you can store the files names in a csv for example and read the sets from there in the test parametrized marker
def data_source():
for files in read_files_groups_from_csv():
yield files
#pytest.mark.parametrize('files', data_source())
def test_errors(options, samples_dict, clean_bam, files):
"""for example, files parameter will be ['bam/test/test_reads_pb.bwa.bam', 'tests/test_sample_mapping.txt', 'tests/data/pb_test_out.json', 'tests/data/pb_stats.json']"""
mark.parametrize will run before the fixtures, so you can send files as a parameter to them as well
#pytest.fixture
def options(files):
d = samples_dict(files[1])
return d
If you don't want to rely on index create a class with files names as attributes and return it from data_source().

Add metadata to TestCase in Python's unittest

I'd like to add metadata to individual tests in a TestCase that I've written to use Python's unittest framework. The metadata (a string, really) needs to be carried through the testing process and output to an XML file.
Other than remaining with the test the data isn't going to be used by unittest, nor my test code. (I've got a program that will run afterwards, open the XML file, and go looking for the metadata/string).
I've previously used NUnit which allows one to use C# attribute to do this. Specifically, you can put this above a class:
[Property("SmartArrayAOD", -3)]
and then later find that in the XML output.
Is it possible to attach metadata to a test in Python's unittest?
Simple way for just dumping XML
If all you want to do is write stuff to an XML file after every unit test, just add a tearDown method to your test class (e.g. if you have , give it a).
class MyTest(unittest.TestCase):
def tearDown(self):
dump_xml_however_you_do()
def test_whatever(self):
pass
General method
If you want a general way to collect and track metadata from all your tests and return it at the end, try creating an astropy table in your test class's __init__() and adding rows to it during tearDown(), then extracting a reference to your initialized instances of your test class from unittest, like this:
Step 1: set up a re-usable subclass of unittest.TestCase so we don't have to duplicate the table handling
(put all the example code in the same file or copy the imports)
"""
Demonstration of adding and retrieving meta data from python unittest tests
"""
import sys
import warnings
import unittest
import copy
import time
import astropy
import astropy.table
if sys.version_info < (3, 0):
from StringIO import StringIO
else:
from io import StringIO
class DemoTest(unittest.TestCase):
"""
Demonstrates setup of an astropy table in __init__, adding data to the table in tearDown
"""
def __init__(self, *args, **kwargs):
super(DemoTest, self).__init__(*args, **kwargs)
# Storing results in a list made it convenient to aggregate them later
self.results_tables = [astropy.table.Table(
names=('Name', 'Result', 'Time', 'Notes'),
dtype=('S50', 'S30', 'f8', 'S50'),
)]
self.results_tables[0]['Time'].unit = 'ms'
self.results_tables[0]['Time'].format = '0.3e'
self.test_timing_t0 = 0
self.test_timing_t1 = 0
def setUp(self):
self.test_timing_t0 = time.time()
def tearDown(self):
test_name = '.'.join(self.id().split('.')[-2:])
self.test_timing_t1 = time.time()
dt = self.test_timing_t1 - self.test_timing_t0
# Check for errors/failures in order to get state & description. https://stackoverflow.com/a/39606065/6605826
if hasattr(self, '_outcome'): # Python 3.4+
result = self.defaultTestResult() # these 2 methods have no side effects
self._feedErrorsToResult(result, self._outcome.errors)
problem = result.errors or result.failures
state = not problem
if result.errors:
exc_note = result.errors[0][1].split('\n')[-2]
elif result.failures:
exc_note = result.failures[0][1].split('\n')[-2]
else:
exc_note = ''
else: # Python 3.2 - 3.3 or 3.0 - 3.1 and 2.7
# result = getattr(self, '_outcomeForDoCleanups', self._resultForDoCleanups) # DOESN'T WORK RELIABLY
# This is probably only good for python 2.x, meaning python 3.0, 3.1, 3.2, 3.3 are not supported.
exc_type, exc_value, exc_traceback = sys.exc_info()
state = exc_type is None
exc_note = '' if exc_value is None else '{}: {}'.format(exc_type.__name__, exc_value)
# Add a row to the results table
self.results_tables[0].add_row()
self.results_tables[0][-1]['Time'] = dt*1000 # Convert to ms
self.results_tables[0][-1]['Result'] = 'pass' if state else 'FAIL'
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=astropy.table.StringTruncateWarning)
self.results_tables[0][-1]['Name'] = test_name
self.results_tables[0][-1]['Notes'] = exc_note
Step 2: set up a test manager that extracts metadata
def manage_tests(tests):
"""
Function for running tests and extracting meta data
:param tests: list of classes sub-classed from DemoTest
:return: (TextTestResult, Table, string)
result returned by unittest
astropy table
string: formatted version of the table
"""
table_sorting_columns = ['Result', 'Time']
# Build test suite
suite_list = []
for test in tests:
suite_list.append(unittest.TestLoader().loadTestsFromTestCase(test))
combo_suite = unittest.TestSuite(suite_list)
# Run tests
results = [unittest.TextTestRunner(verbosity=1, stream=StringIO(), failfast=False).run(combo_suite)]
# Catch test classes
suite_tests = []
for suite in suite_list:
suite_tests += suite._tests
# Collect results tables
results_tables = []
for suite_test in suite_tests:
if getattr(suite_test, 'results_tables', [None])[0] is not None:
results_tables += copy.copy(suite_test.results_tables)
# Process tables, if any
if len(results_tables):
a = []
while (len(a) == 0) and len(results_tables):
a = results_tables.pop(0) # Skip empty tables, if any
results_table = a
for rt in results_tables:
if len(rt):
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=DeprecationWarning)
results_table = astropy.table.join(results_table, rt, join_type='outer')
try:
results_table = results_table.group_by(table_sorting_columns)
except Exception:
print('Error sorting test results table. Columns may not be in the preferred order.')
column_names = list(results_table.columns.keys())
alignments = ['<' if cn == 'Notes' else '>' for cn in column_names]
if len(results_table):
rtf = '\n'.join(results_table.pformat(align=alignments, max_width=-1))
exp_res = sum([result.testsRun - len(result.skipped) for result in results])
if len(results_table) != exp_res:
print('ERROR forming results table. Expected {} results, but table length is {}.'.format(
exp_res, len(results_table),
))
else:
rtf = None
else:
results_table = rtf = None
return results, results_table, rtf
Step 3: Example usage
class FunTest1(DemoTest):
#staticmethod
def test_pass_1():
pass
#staticmethod
def test_fail_1():
assert False, 'Meant to fail for demo 1'
class FunTest2(DemoTest):
#staticmethod
def test_pass_2():
pass
#staticmethod
def test_fail_2():
assert False, 'Meant to fail for demo 2'
res, tab, form = manage_tests([FunTest1, FunTest2])
print(form)
print('')
for r in res:
print(r)
for error in r.errors:
print(error[0])
print(error[1])
Sample results:
$ python unittest_metadata.py
Name Result Time Notes
ms
-------------------- ------ --------- ----------------------------------------
FunTest2.test_fail_2 FAIL 5.412e-02 AssertionError: Meant to fail for demo 2
FunTest1.test_fail_1 FAIL 1.118e-01 AssertionError: Meant to fail for demo 1
FunTest2.test_pass_2 pass 6.199e-03
FunTest1.test_pass_1 pass 6.914e-03
<unittest.runner.TextTestResult run=4 errors=0 failures=2>
Should work with python 2.7 or 3.7. You can add whatever columns you want to the table. You can add parameters and stuff to the table in setUp, tearDown, or even during the tests.
Warnings:
This solution accesses a protected attribute _tests of unittest.suite.TestSuite, which can have unexpected results. This specific implementation works as expected for me in python2.7 and python3.7, but slight variations on how the suite is built and interrogated can easily lead to strange things happening. I couldn't figure out a different way to extract references to the instances of my classes that unittest uses, though.

Pytest report summary to display error information

I am relatively new to pytest hooks and plugins and I am unable to figure out how to get my pytest code to give me test execution summary with reason of failure.
Consider the code:
class Foo:
def __init__(self, val):
self.val = val
def test_compare12():
f1 = Foo(1)
f2 = Foo(2)
assert f1 == f2, "F2 does not match F1"
def test_compare34():
f3 = Foo(3)
f4 = Foo(4)
assert f3 == f4, "F4 does not match F3"
When I run the pytest script with -v option, it gives me the following result on the console:
========================= test session starts=================================
platform darwin -- Python 2.7.5 -- py-1.4.26 -- pytest-2.7.0 -- /Users/nehau/src/QA/bin/python
rootdir: /Users/nehau/src/QA/test, inifile:
plugins: capturelog
collected 2 items
test_foocompare.py::test_compare12 FAILED
test_foocompare.py::test_compare34 FAILED
================================ FAILURES ===============================
_______________________________ test_compare12 _________________________
def test_compare12():
f1 = Foo(1)
f2 = Foo(2)
> assert f1 == f2, "F2 does not match F1"
E AssertionError: F2 does not match F1
E assert <test.test_foocompare.Foo instance at 0x107640368> == <test.test_foocompare.Foo instance at 0x107640488>
test_foocompare.py:11: AssertionError
_____________________________ test_compare34______________________________
def test_compare34():
f3 = Foo(3)
f4 = Foo(4)
> assert f3 == f4, "F4 does not match F3"
E AssertionError: F4 does not match F3
E assert <test.test_foocompare.Foo instance at 0x107640248> == <test.test_foocompare.Foo instance at 0x10761fe60>
test_foocompare.py:16: AssertionError
=============================== 2 failed in 0.01 seconds ==========================
I am running close to 2000 test cases, so it would be really helpful if I could have pytest display output in the following format:
::
test_foocompare.py::test_compare12 FAILED AssertionError:F2 does not match F1
test_foocompare.py::test_compare34 FAILED AssertionError:F2 does not match F1
::
I have looked at pytest_runtest_makereport plugin but can't seem to get it working. Anyone has any other ideas?
Thanks
Try the -tb flag:
pytest --tb=line
This gives one line of output per test.
See the docs.
Also try pytest -v --tb=no to show all pass/fail results.
Try -rA option of pytest. It will provide a summary at the end of the logs that show all (failures, skips, etc. and passed).
See https://docs.pytest.org/en/latest/usage.html#detailed-summary-report

pytest automation apparently running tests during test collection phase

Running webui automated tests with pytest and selenium; having an issue where it appears that my tests are actually running during the collection phase. During this phase, I would expect pytest to be collecting tests - not running them. The end result is I end up with 6 test results where I would expect 2. Now the interesting piece, the 6 results only appear in the HTML report; on the command line I only get the expected 2 lines of output (but it 300 seconds to run those two tests because the tests are literally running multiple times).
tests/test_datadriven.py
#!/usr/bin/env python
from unittestzero import Assert
from pages.home import Home
from pages.administration import RolesTab
from api.api import ApiTasks
import time
import pytest
from data.datadrv import *
class TestRolesDataDriven(object):
scenarios = [scenario1,scenario2]
#pytest.mark.challenge
def test_datadriven_rbac(self, mozwebqa, org, perm_name, resource, verbs, allowed, disallowed):
"""
Perform a data driven test related to role based access controls.
All parameters are fullfilled by the data.
:param org: Organization Name
:param perm_name: Permission name
:param resource: Resource
:param verbs: A tuple of verbs
:returns: Pass or Fail for the test
"""
sysapi = ApiTasks(mozwebqa)
home_page = Home(mozwebqa)
rolestab = RolesTab(mozwebqa)
role_name = "role_%s" % (home_page.random_string())
perm_name = "perm_%s" % (home_page.random_string())
username = "user%s" % home_page.random_string()
email = username + "#example.com"
password = "redhat%s" % (home_page.random_string())
sysapi.create_org(org)
sysapi.create_user(username, password, email)
home_page.login()
home_page.tabs.click_tab("administration_tab")
home_page.tabs.click_tab("roles_administration")
home_page.click_new()
rolestab.create_new_role(role_name)
rolestab.click_role_permissions()
rolestab.role_org(org).click()
rolestab.click_add_permission()
rolestab.select_resource_type(resource)
home_page.click_next()
for v in verbs:
home_page.select('verbs', v)
home_page.click_next()
rolestab.enter_permission_name(perm_name)
rolestab.enter_permission_desc('Added by QE test.')
rolestab.click_permission_done()
rolestab.click_root_roles()
rolestab.click_role_users()
rolestab.role_user(username).add_user()
home_page.header.click_logout()
home_page.login(username, password)
for t in allowed:
Assert.true(t(home_page))
for t in disallowed:
Assert.false(t(home_page))
data/data.py
###
# DO NOT EDIT HERE
###
def pytest_generate_tests(metafunc):
"""
Parse the data provided in scenarios.
"""
idlist = []
argvalues = []
for scenario in metafunc.cls.scenarios:
idlist.append(scenario[0])
items = scenario[1].items()
argnames = [x[0] for x in items]
argvalues.append(([x[1] for x in items]))
metafunc.parametrize(argnames, argvalues, ids=idlist)
###
# EDIT BELOW
# ADD NEW SCENARIOS
###
scenario1 = ('ACME_Manage_Keys', { 'org': 'ACME_Corporation',
'perm_name': 'ManageAcmeCorp',
'resource': 'activation_keys',
'verbs': ('manage_all',),
'allowed': (Base.is_system_tab_visible,
Base.is_new_key_visible,
Base.is_activation_key_name_editable),
'disallowed': (Base.is_dashboard_subs_visible,)})
scenario2 = ('Global_Read_Only', { 'org': 'Global Permissions',
'perm_name': 'ReadOnlyGlobal',
'resource': 'organizations',
'verbs': ('read','create'),
'allowed': (Base.is_organizations_tab_visible,
Base.is_new_organization_visible,
Base.is_new_organization_name_field_editable),
'disallowed': (Base.is_system_tab_visible,
Base.is_new_key_visible)})
Full source is available at github; https://github.com/eanxgeek/katello_challenge
Anyone have any idea what might be going on here? I am using the pytest-mozwebqa plugin, pytests, and selenium.
Thanks!
Check the version of pytest-mozwebqa you have installed. If your installed version is < 0.10 then you must update.
pip-python install --upgrade pytest-mozwebqa
Due to the number of changes in pytest-mozwebqa I strongly encourage you to test first in a python virtualenv.

Categories

Resources