I'm writing a web app by imitating The Flask Mega-Tutorial.
As I was trying to add some unit test cases to my code. I found that the test cases in the Tutorial had many duplicated codes.
Here is the code segments:
def test_avatar(self):
u = User(nickname='john', email='john#example.com')
avatar = u.avatar(128)
expected = 'http://www.gravatar.com/avatar/d4c74594d841139328695756648b6bd6'
...
def test_make_unique_nickname(self):
u = User(nickname='john', email='john#example.com')
db.session.add(u)
db.session.commit()
...
The problem is that every time I want to test a new case I have to repeat this process:
u = User(nickname='john', email='john#example.com')
db.session.add(u)
db.session.commit()
So, I moved this process out and made it like this:
import unittest
from config import basedir
from app import app, db
from app.models import User
u = User(nickname='john', email='john#example.com') # I put this out because some cases may want to use this stuff.
def _init_database():
db.session.add(u)
db.session.commit(u)
class TestCase(unittest.TestCase):
def setUp(self):
app.config['TESTING'] = True
app.config['WTF_CSRF_ENABLED'] = False
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///:memory:')
self.app = app.test_client()
db.create_all()
_init_database() # init database every time
def tearDown(self):
db.session.remove()
db.drop_all()
def test_case_1(self):
self.assertTrue(User.query.count() == 1) # case 1
def test_case_2(self):
self.assertTrue(User.query.count() == 1) # case 1
if __name__ == '__main__':
unittest.main()
As you can see, two test cases are the same. However, only one case can pass. the other one will fail.
But if I move u = User(nickname='john', email='john#example.com') into _init_database():
def _init_database():
u = User(nickname='john', email='john#example.com')
db.session.add(u)
db.session.commit(u)
every thing is fine now.
I really don't know why! Could you help me?
The User instance is an object that is controlled by the ORM, so it is not a good idea to make it a global variable. That object not only contains your data but also includes database information. By making it global you are using it first on the database for the first test, then on the database for the second test.
A better approach is to create a new User instance for each test, and return it in your setUp() method:
def _init_database():
u = User(nickname='john', email='john#example.com')
db.session.add(u)
db.session.commit()
return u
Then you can attach this user to your test case and access it from your tests.
Related
I am trying to mock the below function but I'm not sure how to mock the Connection response:
def get_user_res(user, pass):
res = None
server = Server('my_server')
connnection = Connection(server, user, pass, strategy=SAFE_SYNC, auto_bind=True)
if connection.bind():
connection.search(search_base, search_filter, SUBTREE)
res = connection.response
connection.unbind()
return res
#mock.patch("ldap3.Server")
#mock.patch("ldap3.Connection.response")
def test_get_user_res(mock_connection, mock_server):
mock_connection.return_value = ""
retrived_res = get_user_res("fake_user","fake_password")
expected_res = ""
assert retrived_res == expected_res
The root problem is that you're mocking the wrong things. If you have a file named ldapclient.py that contains your get_user_rest method, like this (note that I've rewritten things a bit to make our lives easier when writing tests):
import ldap3
server = ldap3.Server('my_server')
search_base = 'dc=example, dc=com'
def get_user_res(user, password, search_filter=None):
res = None
connection = ldap3.Connection(
server, user, password,
client_strategy=ldap3.SAFE_SYNC, auto_bind=True)
if connection.bind():
res = connection.search(search_base, search_filter, ldap3.SUBTREE)
connection.unbind()
return res
Then what you need to mock is the ldap3.Connection class. But since your test is in a different module, you'll need to call #mock.patch('ldapclient.ldap3.Connection), assuming that your test is defined like this:
import ldap3
from unittest import mock
import ldapclient
#mock.patch("ldapclient.ldap3.Connection")
def test_get_user_res(mock_connection_class):
mock_connection = mock.Mock()
mock_connection.search.return_value = 'fake_return'
mock_connection_class.return_value = mock_connection
retrived_res = ldapclient.get_user_res("fake_user", "fake_password")
expected_res = "fake_return"
assert retrived_res == expected_res
There are a few things to note here:
As mentioned earlier, because we have import ldapclient, we need to mock ldapclient.ldap3.Connection.
We make the ldap3.Connection class return a new mock.Mock object, since we want to be able to mock methods on the object returned when calling connection = ldap3.Connection(...)
We make the search method return a fake value so that we can ensure it gets called as expected.
I created a class to make my life easier while doing some integration tests involving workers and their contracts. The code looks like this:
class ContractID(str):
contract_counter = 0
contract_list = list()
def __new__(cls):
cls.contract_counter += 1
new_entry = super().__new__(cls, f'Some_internal_name-{cls.contract_counter:10d}')
cls.contract_list.append(new_entry)
return new_entry
#classmethod
def get_contract_no(cls, worker_number):
return cls.contract_list[worker_number-1] # -1 so WORKER1 has contract #1 and not #0 etc.
When I'm unit-testing the class, I'm using the following code:
from test_helpers import ContractID
#pytest.fixture
def get_contract_numbers():
test_string_1 = ContractID()
test_string_2 = ContractID()
test_string_3 = ContractID()
return test_string_1, test_string_2, test_string_3
def test_contract_id(get_contract_numbers):
assert get_contract_ids[0] == 'Some_internal_name-0000000001'
assert get_contract_ids[1] == 'Some_internal_name-0000000002'
assert get_contract_ids[2] == 'Some_internal_name-0000000003'
def test_contract_id_get_contract_no(get_contract_numbers):
assert ContractID.get_contract_no(1) == 'Some_internal_name-0000000001'
assert ContractID.get_contract_no(2) == 'Some_internal_name-0000000002'
assert ContractID.get_contract_no(3) == 'Some_internal_name-0000000003'
with pytest.raises(IndexError) as py_e:
ContractID.get_contract_no(4)
assert py_e.type == IndexError
However, when I try to run these tests, the second one (test_contract_id_get_contract_no) fails, because it does not raise the error as there are more than three values. Furthermore, when I try to run all my tests in my folder test/, it fails even the first test (test_contract_id), which is probably because I'm trying to use this function in other tests that run before this test.
After reading this book, my understanding of fixtures was that it provides objects as if they were never called before, which is obviously not the case here. Is there a way how to tell the tests to use the class as if it hasn't been used before anywhere else?
If I understand that correctly, you want to run the fixture as setup code, so that your class has exactly 3 instances. If the fixture is function-scoped (the default) it is indeed run before each test, which will each time create 3 new instances for your class. If you want to reset your class after the test, you have to do this yourself - there is no way pytest can guess what you want to do here.
So, a working solution would be something like this:
#pytest.fixture(autouse=True)
def get_contract_numbers():
test_string_1 = ContractID()
test_string_2 = ContractID()
test_string_3 = ContractID()
yield
ContractID.contract_counter = 0
ContractID.contract_list.clear()
def test_contract_id():
...
Note that I did not yield the test strings, as you don't need them in the shown tests - if you need them, you can yield them, of course. I also added autouse=True, which makes sense if you need this for all tests, so you don't have to reference the fixture in each test.
Another possibility would be to use a session-scoped fixture. In this case the setup would be done only once. If that is what you need, you can use this instead:
#pytest.fixture(autouse=True, scope="session")
def get_contract_numbers():
test_string_1 = ContractID()
test_string_2 = ContractID()
test_string_3 = ContractID()
yield
I'd like to have different database files for each Peewee ORM instance. Peewee assigns the database engine to an instance using a nested "Meta" class.
My issue seems to come down to accessing a class instance attribute from an inner class. Using the Peewee quickstart example, this is what I'm trying to achieve in (broken) Python:
from peewee import *
class Person(Model):
def __init__(self, database):
self.database = database
name = CharField()
birthday = DateField()
is_relative = BooleanField()
class Meta:
# The following is incorrect; I'm trying to access the instance
# variable for the database filename string
database = SqliteDatabase(Person.database)
# Create two instances with different databases:
john = Person('john-database.db')
jane = Person('jane-database.db')
I've found a few general answers regarding nested classes, but struggle to translate their lessons to this specific application. I appreciate your help!
I think the short answer is "peewee isn't really designed for your use case". But I played around with it a bit, and while there has to be a better solution out there, here's something worked. But it's not a good idea, and you shouldn't do it.
First, we use the standard peewee example model, except we use the Proxy class for the database connection:
from peewee import *
from playhouse import *
db = Proxy()
class Person(Model):
name = CharField()
birthday = DateField()
is_relative = BooleanField()
class Meta:
database = db
Assume we have this in model.py.
Now, to make this work, we're going to need two instances of the model module, which we can get by (ab)using the importlib module:
import importlib.util
import peewee
import sys
def load_module_as(modname, alias):
mod_spec = importlib.util.find_spec(modname)
mod = importlib.util.module_from_spec(mod_spec)
mod_spec.loader.exec_module(mod)
sys.modules[alias] = mod
return mod
This allows us to load in two separate instances of the model:
model1 = load_module_as('model', 'model1')
model2 = load_module_as('model', 'model2')
And we can then initialize two different databases:
model1.db.intitialize(pwee.SqliteDatabase('db1.db'))
model2.db.intitialize(pwee.SqliteDatabase('db2.db'))
While this sort of gets you what you want, you will always need to qualify your classes (model1.Person, model2.Person).
Here's a complete example, with unit tests:
import datetime
import importlib.util
import os
import peewee
import shutil
import sys
import tempfile
import unittest
def load_module_as(modname, alias):
mod_spec = importlib.util.find_spec(modname)
mod = importlib.util.module_from_spec(mod_spec)
mod_spec.loader.exec_module(mod)
sys.modules[alias] = mod
return mod
model1 = load_module_as('model', 'model1')
model2 = load_module_as('model', 'model2')
class TestDatabase(unittest.TestCase):
def setUp(self):
self.workdir = tempfile.mkdtemp('testXXXXXX')
self.db1_path = os.path.join(self.workdir, 'db1.db')
self.db1 = peewee.SqliteDatabase(self.db1_path)
self.db1.connect()
self.db2_path = os.path.join(self.workdir, 'db2.db')
self.db2 = peewee.SqliteDatabase(self.db2_path)
self.db2.connect()
model1.db.initialize(self.db1)
model2.db.initialize(self.db2)
self.db1.create_tables([model1.Person])
self.db2.create_tables([model2.Person])
def test_different_instances(self):
assert model1.db != model2.db
def test_create_model1_person(self):
p = model1.Person(name='testperson',
birthday=datetime.datetime.now().date(),
is_relative=True)
p.save()
def test_create_model2_person(self):
p = model2.Person(name='testperson',
birthday=datetime.datetime.now().date(),
is_relative=True)
p.save()
def test_create_both(self):
p1 = model1.Person(name='testperson',
birthday=datetime.datetime.now().date(),
is_relative=True)
p2 = model2.Person(name='testperson',
birthday=datetime.datetime.now().date(),
is_relative=False)
p1.save()
p2.save()
p1 = model1.Person.select().where(model1.Person.name == 'testperson').get()
p2 = model2.Person.select().where(model2.Person.name == 'testperson').get()
assert p1.is_relative
assert not p2.is_relative
def tearDown(self):
self.db1.close()
self.db2.close()
shutil.rmtree(self.workdir)
if __name__ == '__main__':
unittest.main(verbosity=2)
I also located this thread with some possible answers.
I have some trouble with integration test. I'm using python 3.5, SQLAlchemy 1.2.0b3, latest docker image of postgresql. So, I wrote test:
# tests/integration/usecases/test_users_usecase.py
class TestGetUsersUsecase(unittest.TestCase):
def setUp(self):
Base.metadata.reflect(_pg)
Base.metadata.drop_all(_pg)
Base.metadata.create_all(_pg)
self._session = sessionmaker(bind=_pg, autoflush=True, autocommit=False, expire_on_commit=True)
self.session = self._session()
self.session.add(User(id=1, username='user1'))
self.session.commit()
self.pg = PostgresService(session=self.session)
def test_get_user(self):
expected = User(id=1, username='user1')
boilerplates.get_user_usecase(storage_service=self.pg, id=1, expected=expected)
# tests/boilerplates/usecases/user_usecases.py
def get_user_usecase(storage_service, id, expected):
u = GetUser(storage_service=storage_service)
actual = u.apply(id=id)
assert expected == actual
In usecase I did next:
# usecases/users.py
class GetUser(object):
"""
Usecase for getting user from storage service by Id
"""
def __init__(self, storage_service):
self.storage_service = storage_service
def apply(self, id):
user = self.storage_service.get_user_by_id(id=id)
if user is None:
raise UserDoesNotExists('User with id=\'%s\' does not exists' % id)
return user
storage_service.get_user_by_id looks like:
# infrastructure/postgres.py (method of Postgres class)
def get_user_by_id(self, id):
from anna.domain.user import User
return self.session.query(User).filter(User.id == id).one_or_none()
And it does not work in my integration test. But if I add print(actual) in test before assert - all is OK. I thought that my test is bad, I try many variants and all does not works. Also I tried return generator from storage_service.get_user_by_id() and it also does not work. What I did wrong? It works good only if print() was called in test.
The question is simple, the answer I dont know...
I'm newbie with testing and I have problems testing class for drive a sql3 database. What is the best way for test a class like this? Test the class or test the init function is not a problem, but the others? the test insert a test row?
import sqlite3
class DataBase:
def __init__(self):
self._database_path = 'data.sql'
self._conn = sqlite3.connect(self._database_path)
self._cursor = self._conn.cursor()
def get(self, sql):
# select
self._cursor.execute(sql)
dataset = []
for row in self._cursor:
dataset.append(row)
return dataset
def post(self, sql):
# insert
self._cursor.execute(sql)
self._conn.commit()
Thank you for all of you, thank you for all your answers!!
You can use the rollback function of the database.
Just replace self._conn.commit() with self._conn.rollback() and you can test the validity of your sql with no effects on the data.
If you need to test a series of actions (i.e: get data->modify data->insert new data->remove some data->get data again) you can remove all the _conn.commit() in your code, run the tests and finally call _conn.rollback().
Example:
import sqlite3
class DataBase:
def __init__(self):
self._database_path = 'data.sql'
self._conn = sqlite3.connect(self._database_path)
self._cursor = self._conn.cursor()
def get(self, sql):
# select
self._cursor.execute(sql)
dataset = []
for row in self._cursor:
dataset.append(row)
return dataset
def post(self, sql):
# insert
self._cursor.execute(sql)
def delete(self, sql):
# delete
self._cursor.execute(sql)
def rollback(self):
self._conn.rollback()
# You do your tests:
db = DataBase()
data = db.get('select name from table')
new_data = ['new' + name for name in data]
db.post('insert into table values {}'.format(','.join('({})'.format(d) for d in new_data)))
db.delete('delete from table where name = \'newMario\'')
check = bool(db.get('select name from table where name = \'newMario\''))
if check:
print('delete ok')
# You make everything as before the test:
db.rollback()
I think the CursorTests in official sqlite3 tests is a good example.
https://github.com/python/cpython/blob/master/Lib/sqlite3/test/dbapi.py#L187
You can write setUp and tearDown methods to set up and rollback the database.
from unittest import TestCase
class TestDataBase(TestCase):
def setUp(self):
self.db = DataBase()
def test_get(self):
pass # your code here
def test_post(self):
pass # your code here