I have a simple class driven sqlite app. Basically, I want to run unit tests for it, but I haven't been able to so far.
class DB:
def __init__(self, dbname='mydb.db'):
try:
self.connection = sqlite3.connect(dbname)
except:
print('Error')
finally:
pass
Any class would use it then:
class Hello:
def hi(self):
db = DB() # Create db or connect to existing one
cursor = db.connection.cursor()
Now, when testing, I pass on a test database:
db = DB('test.db')
#create datatabase here and all works fine
h = Hello()
Now, h makes use of mydb.db, instead of test.db. How does one go out about testing the above structure?
If you want to pass the instance of your DB class (db) you would need to feed the instance to your Hello class. Try:
class DB:
def __init__(self, dbname='mydb.db'):
try:
self.connection = sqlite3.connect(dbname)
except:
print('Error')
finally:
pass
class Hello:
def hi(self, db=DB()): # we make it have a default db of DB() (which in turn defaults to 'mydb.db')
# db = DB() # Create db or connect to existing one
# ^^ we remove this line because now this is the default
cursor = db.connection.cursor()
db = DB('test.db') # this makes an instance of your DB class and calls it "db"
h = Hello(db) # now we need to feed this instance
Although this probably isn't the best way to go about it. You would likely benefit more from having a single class with methods because your second class is basically useless and is very closely related to your first class anyways:
class DB:
def __init__(self, dbname='mydb.db'):
try:
self.connection = sqlite3.connect(dbname)
except:
print('Error')
finally:
pass
def hello(self): # making a method instead
cursor = self.connection.cursor()
db = DB('test.db') # this makes an instance of your DB class and calls it "db"
db.hello() # call our method
EDIT
I missed something originally that I found from testing my code. Your code should work fine, but you need to call the method you've made! Try this:
import sqlite3
class DB:
def __init__(self, dbname='mydb.db'):
try:
self.connection = sqlite3.connect(dbname)
except:
print('Error')
finally:
pass
class Hello:
def hi(self):
db = DB('test.db')
cursor = db.connection.cursor()
db = DB('test.db')
h = Hello() # make our instance
h.hi() # use the method "hi" associated with the class (our function name within the class)
Related
I'm doing my first telegram-bot-project using python+peewee+postgresql (without django).
I just want to know, how to connect to my database not once (in the start of my project's code), but everytime when it's needed. For example: user tap on button -> connection opens -> it adds line to table(s)
Right now file structure looks like this:
tgbot.py
# some lines with imports
updater = Updater(TELEGRAM_TOKEN)
dp = updater.dispatcher
dp = setup_dispatcher(dp)
# some lines with logging config
updater.start_polling()
updater.idle()
dispatcher.py
# some lines with imports
# some def for event handlers
# !!!!! here will be database connections (in functions for events)
def setup_dispatcher(dp):
db_conn = DBConnection() # Enter database connection
from dbmodels import db
db = db_conn.get_connection()
ToDo.create(...) # creating line in postgres-table
dp.add_handler(CommandHandler('start', start))
dp.add_handler(CommandHandler('location', ask_for_location))
dp.add_handler(MessageHandler(Filters.location, change_location))
dp.add_handler(CommandHandler('today', ask_for_today))
return dp
dbhelper.py
# some lines with imports
class DBConnection(Singleton):
def __init__(self): ... # initializing some variables like self.database, etc
def get_connection(self):
""" Creating PostgreSQL's database connection """
if self.connection is None:
try:
self.connection = PostgresqlDatabase(self.database, user=self.user, password=self.password, host=self.host, port=self.port)
self.curs = self.connection.cursor()
except (Exception, Error) as error:
print("PostgreSQL's connection error: \n", error)
sys.exit(1)
return self.connection
def __del__(self):
""" Closing database connection """
if self.connection is not None:
self.curs.close()
self.connection.close()
print("PostgreSQL's connection closed.")
dbmodels.py
# some lines with imports
db = PostgresqlDatabase(None)
class Singleton:
""" Singleton realisation for database connection class in dbhelper.py """
_instance = None
def __new__(cls):
if cls._instance is None:
cls._instance = super().__new__(cls)
return cls._instance
class BaseModel(Model):
""" Basic class, used for tables-classes in models.py """
class Meta:
database = db
models.py
# some lines with imports
class User(BaseModel):
""" A model for client's table """
# initializing some fields
class Meta:
db_table = "clients"
...
class ToDo(BaseModel):
""" A model of to-do's table """
# initializing some fields
class Meta:
db_table = "to_do_s"
...
This is all completely unnecessary. The peewee database object is already a singleton. You just need to call the connect() and close() methods on it when you want to connect/close.
Alternatively you can use the database instance as a context-manager, e.g.:
db = PostgresqlDatabase(...)
with db:
ToDo.create(...)
I want to be able to create multiple files of same model database.
To use each file with it own connection.
Without the need to initialize the database each time I want to use a different one.
Something like:
sqlite_1.add_user(name="Jerry")
sqlite_2.add_user(name="Jerry")
Solution with proxy with initialize
main.py
# main.py
import peewee as pw
import database as db
sqlite_1 = pw.SqliteDatabase('sqlite_1.db')
sqlite_2 = pw.SqliteDatabase('sqlite_2.db')
db.proxy.initialize(sqlite_1)
sqlite_1.create_tables([db.User], safe=True)
db.add_user(name="Tom")
db.proxy.initialize(sqlite_2)
sqlite_2.create_tables([db.User], safe=True)
db.add_user(name="Jerry")
database.py:
# database.py
import peewee as pw
proxy = pw.Proxy()
class BaseModel(pw.Model):
class Meta:
database = proxy
class User(BaseModel):
name = pw.CharField()
def add_user(name):
with proxy.atomic() as txn:
User.create(name=name).save()
def get_user(name):
with proxy.atomic() as txn:
return User.get(User.name == name)
Can I do multiple databased with same model without proxy ?
You probably want to use the bind() and bind_ctx() methods to swap a model between databases at runtime:
http://docs.peewee-orm.com/en/latest/peewee/api.html#Database.bind_ctx
MODELS = (User, Account, Note)
# Bind the given models to the db for the duration of wrapped block.
def use_test_database(fn):
#wraps(fn)
def inner(self):
with test_db.bind_ctx(MODELS):
test_db.create_tables(MODELS)
try:
fn(self)
finally:
test_db.drop_tables(MODELS)
return inner
class TestSomething(TestCase):
#use_test_database
def test_something(self):
# ... models are bound to test database ...
pass
I have a Python class where the constructor creates a MySQL database connection as follows:
class MySQL:
def __init__(self):
self.client = self.get_client()
def get_client():
client = pymysql.connect(**mysql_credentials)
return client
The problem with this implementation is that the connection never ends. So I want to modify the class to create the database connection in the __enter__ method and close the connection on __exit__ method as follows:
class MySQL:
def __enter__(self):
self.client = self.get_client()
def __exit__(self, exc_type, exc_val, exc_tb):
self.client.close()
def get_client():
client = pymysql.connect(**mysql_credentials)
return client
def execute_query(self, sql_query: str):
with self.client.cursor() as cursor:
cursor.execute(sql_query)
Now the question. How can instantiate MySQL class with __enter__ and __exit__ inside the constructor of another class?
Can't be do like this because it calls MySQL __init__ method and it will not open the connection:
class AnotherClass:
def __init__(self):
self.mysql_cli = MySQL()
def run_etl(self):
self.mysql_cli.execute_query('''SELECT VERSION();''')
Any suggestions?
Many thanks!
There are three different ways for a class to properly use a context manager inside its own code.
One option is for the new class to be a context manager itself, so its users can do the managing of the connection lifetimes through it. This is pretty straight forward, just call the __enter__ and __exit__ methods of your contained context manager from your own versions of those methods. That might look like this:
class AnotherClass:
def __init__(self):
self.db = MySQL()
def __enter__(self):
self.db.__enter__()
return self
def __exit__(self, *exc_args):
return self.db.__exit__(*exc_args)
def do_stuff(self):
# do stuff with the database
The burden of managing the connection is delegated to the user of the class, who can use with statements on their AnotherClass instance:
with AnotherClass() as another:
another.do_stuff()
This approach can get tedious though if you have lots of layers of aggregation, and need all the intermediate containers to become context managers just because they contain one at some much lower level.
Another approach is to make separate database connections for each operation that needs to use the database for something. This lets you use with statements for the connection management, but may require that you do a lot more connecting and reconnecting than you'd like:
class AnotherClass:
def __init__(self):
self.db = MySQL()
def do_stuff(self):
with self.db:
# do the actual stuff here, using the database
With this design, the user of AnotherClass doesn't need to use the context manager protocol because the database connections are only alive during the runtime of the methods you call.
another = AnotherClass()
another.do_stuff() # creates and destroys the db connection internally
The third option is to say that AnotherClass should not be involved with the connection management of the MySQL class it uses. That's the responsibility of some other part of the code, and we don't even need to know about it here. For this approach, you probably want the MySQL instance to be created elsewhere (wherever it is being managed) and have it be passed in as an argument to the AnotherClass constructor:
class AnotherClass:
def __init__(self, db):
self.db = db
def do_stuff(self):
# do stuff with the database
The caller would do something like this:
db = MySQL()
with db:
another = AnotherClass(db)
another.do_stuff()
This has the advantage that the db object passed in to AnotherClass can be whatever type you want, as long as it has the APIs that AnotherClass expects. If you need to change your database from MySQL to Postgres, you don't need to change AnotherClass, only the higher level code.
I am writing some python gui app (PySide to be exact) and I am using my own class to handling DB. What's the correct way to use models? Currently I have something like this:
class DB(object):
def __init__(self, dbfile):
some db connect work
def updateEntry(entryid):
some update query etc
def getEntry(entryid):
fetching entry from db
def createEntry(entryvalue):
insert entry
class EntryModel(object):
def __init__(db,entryid=None,entryvalue=None):
self.db=db
self.entryid=entryid
self.entryvalue=entryvalue
if entryid is None:
self.db.createEntry(self.entryvalue)
elif self.entryvalue is None:
self.db.getEntry(self.entryid)
def some_func(self):
some other work
And it's working just fine... But I have a feeling that something is wrong here... I mean, I have to pass DB to each model, I don't think that's correct way. How to do it in proper way without using frameworks like SQLAlchemy and so on?
You can at least create a base class, let's called it Model (like in Django, or Base as it is called in SQLAlchemy)
We'll keep a reference to the db object as a class attribute so it is the same for all instances, and inherited so you don't have to pass it around
class Model(object):
db = None # This var is a class attribute
#classmethod
def init_db(cls):
cls.db = your_code_to_create_db()
class Entry(Model):
def __init__(self, entry_id, entry_value):
self.entry_id = entry_id
self.entry_value = entry_value
super(Entry, self).__init__()
def save(self):
# Use db here
self.db
# To use
Model.init_db() # Inits the one db var for the class
entry = Entry(...)
entry.save()
I hope you see the idea and adapt it to your needs!
I saw a code as follows (from https://github.com/daydayfree/diggit/blob/master/model/model.py) :
from database import Database
...
class Model(object):
#property
def db(self): return Database()
def insert(self, documents):
return self.db.insert(self.table, documents)
...
The main aim for #property is to provide access to the methods in Database() instance, am I correct?
So can I rewrite it as:
from database import Database
...
class Model(object):
def __init__(self):
self.db = Database()
def insert(self, documents):
return self.db.insert(self.table, documents)
and
from database import Database
...
class Model(object):
def db(self):
return Database()
def insert(self, documents):
return self.db().insert(self.table, documents)
...
? If not, what are the differences between them?
There are differences...
Method 1: property decorator
class Model(object):
#property
def db(self): return Database()
o = Model()
db1 = o.db #a database instance. No brackets
db2 = o.db #another database instance
o.db = foo #error due to read only property
Every time db is called it creates a new database instance.
Method 2: db set on initialization
class Model(object):
def __init__(self):
self.db = Database()
o = Model()
db1 = o.db #a database instance
db2 = o.db #the same database instance
o.db = foo #works fine so long as foo is defined
Every time db is accessed it returns the same database instance.
Method 3: db as a function
class Model(object):
def db(self):
return Database()
o = Model()
db1 = o.db() #a database instance. note the brackets
db2 = o.db() #another database instance
o.db = foo #works fine so long as foo is defined
Every time db is called it creates a new database instance.
The #property decorator is used to make calling a method look like calling an instance.
So, if you had a Model instance, you could get a new database object by calling the db what looks like the db attribute, but is really the db method:
>>> a = Model()
>>> a.db
Database()
In your first "rewrite" example, you create a db attribute in the __init__ method of your class. Now, every time you call the db attribute, you will get the same Database object each time (the one created during the __init__ call), not a new one as before.
To imagine this, you could replace return Database() with return random.random() from the python standard library. In the original implementation, a new number will be returned each time you call db. In your suggested implementation, the same number will be returned each time because random.random() was only called once (in the __init__ method), and it's output was saved in db.
Your second "rewrite" is essentially the same as the original implementation, except that you would call db as a method (i.e. with the open and close parentheses).
>>> a = Model()
>>> a.db()
Database()
It creates a new Database instance when it is called similar to your second alternative. This means every call to insert creates a new Database instance, inserts and then deletes the Database instance because there is no reference left pointing to it.
In your first alternative you will always acess the same instance. This means after a call to insert, the Database object is still there.