The question is simple, the answer I dont know...
I'm newbie with testing and I have problems testing class for drive a sql3 database. What is the best way for test a class like this? Test the class or test the init function is not a problem, but the others? the test insert a test row?
import sqlite3
class DataBase:
def __init__(self):
self._database_path = 'data.sql'
self._conn = sqlite3.connect(self._database_path)
self._cursor = self._conn.cursor()
def get(self, sql):
# select
self._cursor.execute(sql)
dataset = []
for row in self._cursor:
dataset.append(row)
return dataset
def post(self, sql):
# insert
self._cursor.execute(sql)
self._conn.commit()
Thank you for all of you, thank you for all your answers!!
You can use the rollback function of the database.
Just replace self._conn.commit() with self._conn.rollback() and you can test the validity of your sql with no effects on the data.
If you need to test a series of actions (i.e: get data->modify data->insert new data->remove some data->get data again) you can remove all the _conn.commit() in your code, run the tests and finally call _conn.rollback().
Example:
import sqlite3
class DataBase:
def __init__(self):
self._database_path = 'data.sql'
self._conn = sqlite3.connect(self._database_path)
self._cursor = self._conn.cursor()
def get(self, sql):
# select
self._cursor.execute(sql)
dataset = []
for row in self._cursor:
dataset.append(row)
return dataset
def post(self, sql):
# insert
self._cursor.execute(sql)
def delete(self, sql):
# delete
self._cursor.execute(sql)
def rollback(self):
self._conn.rollback()
# You do your tests:
db = DataBase()
data = db.get('select name from table')
new_data = ['new' + name for name in data]
db.post('insert into table values {}'.format(','.join('({})'.format(d) for d in new_data)))
db.delete('delete from table where name = \'newMario\'')
check = bool(db.get('select name from table where name = \'newMario\''))
if check:
print('delete ok')
# You make everything as before the test:
db.rollback()
I think the CursorTests in official sqlite3 tests is a good example.
https://github.com/python/cpython/blob/master/Lib/sqlite3/test/dbapi.py#L187
You can write setUp and tearDown methods to set up and rollback the database.
from unittest import TestCase
class TestDataBase(TestCase):
def setUp(self):
self.db = DataBase()
def test_get(self):
pass # your code here
def test_post(self):
pass # your code here
Related
I'm trying to figure out how I can simply check that the execute_db returns
a method call with the name cursor.fetchone?
I'm not interested to see if the db works, that will be done in a integration test later on.
I've written a small unittest already, but here I'm only mocking the return value.. I want to find a way to test that the method with the given name is being called as well.
class DataChecker:
def __init__(self):
# Initialize class
self.conn = sqlite3.connect("pos.db")
self.cursor = self.conn.cursor()
def execute_db(self, query, params=None):
# Execute SQL query with parameters and return data
self.cursor.execute(query, [params])
self.conn.commit()
return self.cursor.fetchone()
Test:
def test_execute_db():
mock_datachecker = Mock()
mock_datachecker.cursor.fetchone.return_value = "one"
assert DataChecker.execute_db(mock_datachecker, "SELECT * FROM Customers;", 1) == "one"
You would mock the method fetchone from sqlite3 that is imported in DataChecker module.
db.py
import sqlite3
class DataChecker:
def __init__(self):
# Initialize class
self.conn = sqlite3.connect("pos.db")
self.cursor = self.conn.cursor()
def execute_db(self, query, params=None):
# Execute SQL query with parameters and return data
if params:
self.cursor.execute(query, params)
else:
self.cursor.execute(query)
self.conn.commit()
return self.cursor.fetchone()
Then you could use the db.sqlite3 to mock the connect().cursor().fetchone method.
def test_execute_db():
with patch('db.sqlite3') as mock_db:
mock_db.connect().cursor().fetchone.return_value = "one"
assert DataChecker().execute_db("SELECT * FROM Customers") == "one"
I am working on a program to store my picture meta data and thumbnails into a postgres database using python and psycopg2. In the example I have defined a class MyDbase with methods to create a table, store a value and load a value. Each of these methods needs to connect to the database and a cursor object to execute sql commands. To avoid repetition of code to make the connection and get the cursor I have made a sub class DbDecorators with a decorator connect.
My question: is this a proper way to handle this and specifically using the with statement and passing the cursor to the Dbase method (func) inside the wrapper?
from functools import wraps
import psycopg2
class MyDbase:
''' example using a decorator to connect to a dbase
'''
table_name = 'my_table'
class DbDecorators:
host = 'localhost'
db_user = 'db_tester'
db_user_pw = 'db_tester_pw'
database = 'my_database'
#classmethod
def connect(cls, func):
#wraps(func)
def wrapper(*args, **kwargs):
connect_string = f'host=\'{cls.host}\' dbname=\'{cls.database}\''\
f'user=\'{cls.db_user}\' password=\'{cls.db_user_pw}\''
result = None
try:
with psycopg2.connect(connect_string) as connection:
cursor = connection.cursor()
result = func(*args, cursor, **kwargs)
except psycopg2.Error as error:
print(f'error while connect to PostgreSQL {cls.database}: '
f'{error}')
finally:
if connection:
cursor.close()
connection.close()
print(f'PostgreSQL connection to {cls.database} is closed')
return result
return wrapper
#staticmethod
def get_cursor(cursor):
if cursor:
return cursor
else:
print(f'no connection to database')
raise()
#classmethod
#DbDecorators.connect
def create_table(cls, *args):
cursor = cls.DbDecorators().get_cursor(*args)
sql_string = f'CREATE TABLE {cls.table_name} '\
f'(id SERIAL PRIMARY KEY, name VARCHAR(30));'
print(sql_string)
cursor.execute(sql_string)
#classmethod
#DbDecorators.connect
def store_value(cls, name, *args):
cursor = cls.DbDecorators().get_cursor(*args)
sql_string = f'INSERT INTO {cls.table_name} (name) VALUES (%s);'
print(sql_string)
cursor.execute(sql_string, (name,))
#classmethod
#DbDecorators.connect
def load_value(cls, _id, *args):
cursor = cls.DbDecorators().get_cursor(*args)
sql_string = f'SELECT * FROM {cls.table_name} where id = \'{_id}\';'
print(sql_string)
cursor.execute(sql_string)
db_row = cursor.fetchone()
return db_row
def test():
my_db = MyDbase()
my_db.create_table()
my_db.store_value('John Dean')
db_row = my_db.load_value(1)
print(f'id: {db_row[0]}, name: {db_row[1]}')
if __name__ == '__main__':
test()
probably I did not get your request correctly. Why you need decorator but don't use context manager? Like define db client in any file where from you can import it later and then use it in context manager –
from psycopg2 import SomeDataBase
db = SomeDataBase(credentials)
def create_table(table_name):
with db:
sql_string = f'CREATE TABLE {table_name} '\
f'(id SERIAL PRIMARY KEY, name VARCHAR(30));'
db.cursor.execute(sql_string)
Using a context manager will not close the connection, only the cursor. So using the decorator pattern actually makes more sense here. More info on the context manager: https://www.psycopg.org/docs/usage.html (scroll down to the "with statement" section.)
I have some trouble with integration test. I'm using python 3.5, SQLAlchemy 1.2.0b3, latest docker image of postgresql. So, I wrote test:
# tests/integration/usecases/test_users_usecase.py
class TestGetUsersUsecase(unittest.TestCase):
def setUp(self):
Base.metadata.reflect(_pg)
Base.metadata.drop_all(_pg)
Base.metadata.create_all(_pg)
self._session = sessionmaker(bind=_pg, autoflush=True, autocommit=False, expire_on_commit=True)
self.session = self._session()
self.session.add(User(id=1, username='user1'))
self.session.commit()
self.pg = PostgresService(session=self.session)
def test_get_user(self):
expected = User(id=1, username='user1')
boilerplates.get_user_usecase(storage_service=self.pg, id=1, expected=expected)
# tests/boilerplates/usecases/user_usecases.py
def get_user_usecase(storage_service, id, expected):
u = GetUser(storage_service=storage_service)
actual = u.apply(id=id)
assert expected == actual
In usecase I did next:
# usecases/users.py
class GetUser(object):
"""
Usecase for getting user from storage service by Id
"""
def __init__(self, storage_service):
self.storage_service = storage_service
def apply(self, id):
user = self.storage_service.get_user_by_id(id=id)
if user is None:
raise UserDoesNotExists('User with id=\'%s\' does not exists' % id)
return user
storage_service.get_user_by_id looks like:
# infrastructure/postgres.py (method of Postgres class)
def get_user_by_id(self, id):
from anna.domain.user import User
return self.session.query(User).filter(User.id == id).one_or_none()
And it does not work in my integration test. But if I add print(actual) in test before assert - all is OK. I thought that my test is bad, I try many variants and all does not works. Also I tried return generator from storage_service.get_user_by_id() and it also does not work. What I did wrong? It works good only if print() was called in test.
This is my first time using sqlite3 through class inheritance, and I've run into a problem where I get no traceback errors, but the queries I execute won't commit. I simplified my code
import sqlite3 as lite
class BaseModel(lite.Connection):
def __init__(self, **args):
lite.Connection.__init__(self, **args)
self.cur = self.cursor()
def execute(self, query):
self.cur.execute(query)
class Model(BaseModel):
def __init__(self, **args):
BaseModel.__init__(self, **args)
def _new_(self):
queries = []
queries.append(' '.join(['CREATE TABLE IF NOT EXISTS tb1',
'(id INTEGER PRIMARY KEY,',
'column1 TEXT,',
'column2 INT)']))
for q in queries:
self.execute(q) # execute the queries
self.commit() # write changes to db
def tables(self):
query = 'SELECT name FROM sqlite_master WHERE type="table" ORDER BY name'
results = self.execute(query)
return results#.fetchall()
if __name__ == '__main__':
model = Model(database='test.db')
model._new_()
# Test Fails because the queries aren't being saved in the db
# see Model.__new__ for details
tables = model.tables() # get all tables
print 'Tables Created:'
if tables:
for t in model.tables():
print '\t%s' % str(t[0])
else: print tables
You need to call self.commit():
self.commit() # write changes to db
Without the () you are merely referencing the method, not invoking it.
Next, your execute() function doesn't return anything:
def execute(self, query):
return self.cur.execute(query)
I am new to Python and can't seem to figure out why the .getRow method doesn't run. I created a DBMain class in dbMain.py and I am using pyTest.py to create the DBMain object to run getRow. When I run the debugger in Eclipse and DBMain's constructor does run but but when the getRow method is call nothing happens.
pyTest.py
import dbMain
def main():
db = dbMain.DbMain()
db.getRow()
if __name__ == '__main__':
main()
dbMain.py
##PydevCodeAnalysisIgnore
import pyodbc
class DbMain(object):
cncx = ''
def __init__(self):
cnxn = pyodbc.connect(driver='{SQL Server}',
server='server',
database='database',
uid='name',
pwd='pwd')
def getRow():
cursor = cnxn.cursor()
cursor.execute("select user_id, user_name from users")
row = cursor.fetchone()
return row
You do not return anything from getRow. Maybe you want to include something like
...
return row
Your getRow() method is not bound to the class. The signature for an instance method should look something like getRow(self) - the first parameter is the instance, which is received explicitly (but passed implicitly, when you call someinstance.method()).
To have something functional, you maybe should alter your dbMain to something like this:
##PydevCodeAnalysisIgnore
import pyodbc
class DbMain(object):
def __init__(self):
# make cnxn an attribute of the instance
self.cnxn = pyodbc.connect(driver='{SQL Server}', server='server',
database='database', uid='name', pwd='pwd')
# receive `self` explicitly
def getRow(self):
cursor = self.cnxn.cursor()
cursor.execute("select user_id, user_name from users")
row = cursor.fetchone()
# actually return something
return row
Further reading:
Python: Difference between class and instance attributes