With peewee how connect to existing SQLite db for reading only - python

I have a silly question.
This my code:
from peewee import *
db = SqliteDatabase(None)
class Base(Model):
class Meta:
database = db
class Table(Base):
a_date = DateField()
url = CharField()
def __main()__
parser = argparse.ArgumentParser()
parser.add_argument('--db-dir', action='store')
args = parser.parse_args()
db_path = os.path.join(args.db_dir, 'data.db')
try:
db.init(db_path)
db.connect()
query = Table.select().order_by(Table.a_date.desc()).get()
except Exception:
sys.exit(1)
else:
print(query.url)
sys.exit(0)
if __name__ == '__main__':
main()
This code is working fine, but if the file db not exist db.connect always create it. How I can prevent this ?
Another question is , How can query table database for this field without declare the peewee Model?
Thanks

If I understand correctly peewee doc (http://docs.peewee-orm.com/en/latest/peewee/database.html), they use the api provided by python in order to connect to sqlite.
Which means you have to deal with this api (https://docs.python.org/2/library/sqlite3.html#sqlite3.connect), and the connect method always create the database beforehand.
I however believe that you can pass a custom Connection class to this method (parameter factory), you could define your behaviour in this custom class.
import os
from sqlite3 import Connection
from peewee import *
class CustomConnection(Connection):
def __init__(self, dbname, *args, **kwargs):
# Check if db already exists or not
if not os.path.exists(dbname):
raise ValueError('DB {} does not exist'.format(dbname))
super(CustomConnection, self).__init__(dbname, *args, **kwargs)
db = SqliteDatabase('mydatabase', factory=CustomConnection)

Related

Peewee retrieves data in python console but not in app

I have entities designed with peewee in Python. Before I started implementing real database, I've made several tests with in-memory databases. When I started to implement database functionality, I faced strange problem. My queries returns empty results, what more it depends if I run script or use python console.
First of all, let me proof that logic is correct. When I use python console, everything is ok:
>>> from Entities import *
>>> print (RouterSettings.select().where(RouterSettings.name=='RUT00').get().name)
RUT00
As you see, everything is correct. Specific query is executed and returns result. Now the same in a script:
from Entities import *
print (RouterSettings.select().where(RouterSettings.name=='RUT00').get().name)
This one returns exception instance matching query does not exist
print
(RouterSettings.select().where(RouterSettings.name=='RUT00').get().name)
File
"C:\Users\Kamil\AppData\Local\Programs\Python\Python37-32\lib\site-packages\peewee.py",
line 5975, in get
(clone.model, sql, params)) Entities.RouterSettingsDoesNotExist: instance matching query does not exist : SQL:
SELECT "t1"."id", "t1"."name", "t1"."ip", "t1"."username",
"t1"."password", "t1"."model", "t1"."phone_num", "t1"."provider",
"t1"."location" FROM "routersettings" AS "t1" WHERE ("t1"."name" = ?)
LIMIT ? OFFSET ? Params: ['RUT00', 1, 0]
When I was trying to debug, I've found that database was as if not created:
Please note that within debugged variables database object is null (None).
Do you have any ideas what's going on?
My Entities are defined as follows:
from peewee import *
class EnumField(IntegerField):
def __init__(self, *argv):
super().__init__()
self.enum = []
for label in argv:
self.enum.append(label)
def db_value(self, value):
try:
return self.enum.index(value)
except ValueError:
raise EnumField.EnumValueDoesnExistError(
"Value doesn\'t exist in enum set.\nMaybe you forgot to add "
"that one: " + value + "?")
def python_value(self, value):
try:
return self.enum[value]
except IndexError:
raise EnumField.EnumValueDoesnExistError(
'No value for given id')
class EnumValueDoesnExistError(Exception):
pass
class ModelField(EnumField):
def __init__(self):
super().__init__('RUT955_Q', 'RUT955_H', 'GLiNet300M')
class ProviderField(EnumField):
def __init__(self):
super().__init__('Orange', 'Play', 'Virgin')
class BaseModel(Model):
class Meta:
database = SqliteDatabase('SIMail.db', pragmas={'foreign_keys': 1})
class RouterSettings(BaseModel):
name = CharField(unique=True)
ip = CharField(unique=True)
username = CharField()
password = CharField()
model = ModelField()
phone_num = IntegerField(unique=True)
provider = ProviderField()
location = CharField()
You probably are running it with a relative path to the database file, and depending on the current working directory when you're running your app vs the console, its using a different database file.

Cannot patch sqlalchemy database engine

I am using SQLAlchemy (note: not Flask_SQLAlchemy) for a python 3 project, and I'm trying to write tests for the database by patching the engine with a test engine that points to a test database (as opposed to the production database). In the past, I successfully patched Session, and had working tests, but I recently switched to using the "insert" method, which is executed using engine.execute(), as opposed to a context managed session scope which was invoked using with session_scope() as session:
So heres the setup: I'm using a db_session module to establish a common session to be used by all DB functions:
import sys
import os
import logging
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from database.util import managed_session
import config
logger = logging.getLogger('default')
dirname = os.path.dirname
sys.path.append(dirname(dirname(__file__)))
engine = create_engine(config.POSTGRES_URI)
Session = sessionmaker(bind=engine)
def session_scope():
return managed_session(Session)
and then in the crud_function file we have a setup as follows:
import logging
import re
from collections import defaultdict
from sqlalchemy import desc
from sqlalchemy.exc import IntegrityError
from database.db_session import session_scope, engine, Session
from database.models import *
from database.util import windowed_query
from sqlalchemy.dialects.postgresql import insert
import pandas as pd
def store_twitter_user(unprotected_row):
'''
Creates a TwitterUser object from the given attributes, adds it to the session, and then commits it to the database.
:param attributes:
:return:
'''
row = defaultdict(lambda: None, unprotected_row)
pg_upsert(TwitterUser, row)
def pg_upsert(model, row):
'''Performs an UPDATE OR INSERT ON CONFLICT (Upsert), which is a special SQL command for Postgres dbs.
More info here: http://docs.sqlalchemy.org/en/latest/dialects/postgresql.html#insert-on-conflict-upsert
'''
try:
insert_stmt = insert(model.__table__).values(row)
do_update_stmt = insert_stmt.on_conflict_do_update(constraint=model.__table__.primary_key, set_=row)
engine.execute(do_update_stmt)
logger.debug('New {} stored successfully!'.format(type(object)))
return True
except IntegrityError as e:
if re.search('violates foreign key constraint .* Key \(author_id\)=\(\d+\) is not present in table', str(e.args)):
# Sends exception to celery task which will retry the task for a certain number of times
raise
except Exception as err:
logger.error('pg_upsert: An error occurred while trying to store the new {}: {}'.format(model.__mapper__, err))
return False
database.models just contains a bunch of classes used to create DB models for SQLAlchemy, like as follows:
class User(Base):
__tablename__ = 'users'
id = Column(BigInteger, primary_key=True)
name = Column(String())
screen_name = Column(String())
location = Column(String(), index=True)
friends_count = Column(Integer)
created_at = Column(DateTime)
time_zone = Column(String())
lang = Column(String())
Now here's the test file:
engine = create_engine(config.POSTGRES_TEST_URI)
class TestDBCRUD(unittest.TestCase):
ClassIsSetup = False
ReadyForTeardown = False
def setUp(self):
"""
Creates all the tables in the test DB
:return:
"""
if not self.ClassIsSetup:
print("SETTING UP!!!")
Base.metadata.create_all(engine)
self.__class__.ClassIsSetup = True
def tearDown(self):
"""
Deletes all test DB data and destroys the tables after a test is finished
:return:
"""
if self.ReadyForTeardown:
print("TEARING DOWN!!!")
Base.metadata.drop_all(engine)
self.__class__.ReadyForTeardown = False
#patch('database.crud.db_crud_functions.Session')
#patch('database.crud.db_crud_functions.engine', autospec=True)
#patch('database.db_session.Session')
#patch('database.db_session.engine', autospec=True)
def test_00_store_user(self, mock_session_engine, mock_session_Session, mock_engine, mock_session):
print("testing store user!")
Session = sessionmaker()
Session.configure(bind=engine)
mock_session_Session.return_value = Session()
mock_session_engine.return_value = engine
mock_session.return_value = Session()
mock_engine.return_value = engine
user = User(id=6789, screen_name="yeti")
user_dict = {'id': 6789, 'screen_name': "yeti"}
store_twitter_user(user_dict)
with managed_session(Session) as session:
retrieved_user = session.query(User).first()
print("users are: {}".format(retrieved_user))
self.assertEqual(user.id, retrieved_user.id)
self.assertEqual(user.screen_name, retrieved_user.screen_name)
You'll notice a stupid amount of patches on top of the test function, and that is to show that I've tried to patch the engine and session from multiple locations. I've read that patches should be made where objects are used, and not where they are imported from, so I tried to cover all the bases. It doesn't matter, the test function always ends up inserting a user into the production database, and not into the test database. Then, when the retrieval happens, it returns None.
File "tests/testdatabase/test_db_crud_functions.py", line 59, in test_00_store_user
self.assertEqual(user.id, retrieved_user.id)
AttributeError: 'NoneType' object has no attribute 'id'
Again, before pg_upsert was added, I used:
with session_scope() as session:
session.add(something_here)
And session was successfully mocked to point to POSTGRES_TEST_URI, and not POSTGRES_URI. I'm at a loss here, please let me know if anything sticks out. Thanks!

Mongoengine pre_delete FileField

I'm new to mongoengine. I am trying to get the pre_delete hook to delete a FileField storing in GridFS.
I am using Python 2.7.10, Mongo 3.4 and mongoengine 0.8.7.
Here is what I have.
import uuid
import mongoengine as me
class MyFiles(me.Document):
meta = {"collection": "test"}
guid = me.UUIDField(binary=False, required=True)
my_file = me.FileField()
#classmethod
def pre_delete(cls, sender, document, **kwargs):
document.my_file.delete()
if __name__ == '__main__':
me.connect(db='main', alias='default', host='localhost')
m = MyFiles(guid=uuid.uuid4())
m.my_file.new_file(content_type='text/plain')
m.my_file.write("This is")
m.my_file.write("my file")
m.my_file.write("Hooray!")
m.my_file.close()
m.save()
print(m.my_file.read())
m.delete()
Now I am debugging with a breakpoint on m.delete()
my.file.read() worked.
There is a document in collection "test" that refers to the file in GridFS.
There is a file in fs.files.
And in fs.chunks.
Now I ran m.delete().
Collection "test" is empty.
fs.files is not empty. Neither is fs.chunks. The file remains.
According to mongoengine docs for gridfs, I need to run m.my_file.delete() to delete the GridFS entry before deleting the MyFiles document. I have confirmed this works if I put m.my_file.delete() before m.delete() like so.
m.save()
print(m.my_file.read())
m.my_file.delete()
m.delete()
However I want it to run in pre_delete. This seems like the purpose of pre_delete. Any ideas what I am doing wrong?
Here is the problem. I did not register the signal. This works:
import uuid
import mongoengine as me
class MyFiles(me.Document):
meta = {"collection": "test"}
guid = me.UUIDField(binary=False, required=True)
my_file = me.FileField()
#classmethod
def pre_delete(cls, sender, document, **kwargs):
document.my_file.delete()
me.signals.pre_delete.connect(MyFiles.pre_delete, sender=MyFiles)
if __name__ == '__main__':
me.connect(db='main', alias='default', host='localhost')
m = MyFiles(guid=uuid.uuid4())
m.my_file.new_file(content_type='text/plain')
m.my_file.write("This is")
m.my_file.write("my file")
m.my_file.write("Hooray!")
m.my_file.close()
m.save()
print(m.my_file.read())
m.delete()

Peewee transaction seems not work

I met an transaction problem when I used the python orm peewee these days. I save two book instances using this orm, and beween the two savings I raise an exception so I except that none of them are saved to database, but it doesn't work. Could anyone explain why? I am new to python, thanks.
this code is below:
from peewee import *
def get_db():
return SqliteDatabase("test.db")
class Book(Model):
id = PrimaryKeyField()
name = CharField()
class Meta:
database = get_db()
def test_transaction():
book1 = Book(name="book1")
book2 = Book(name="book2")
db = get_db()
db.create_tables([Book], safe=True)
try:
with db.transaction() as tran:
book1.save()
raise ProgrammingError("test")
book2.save()
except:
pass
for book in Book.select():
print(book.name)
if __name__ == '__main__':
test_transaction()
The problem is that when you are calling "get_db()" you are instantiating new database objects. Databases are stateful, in that they manage the active connection for a given thread. So what you've essentially got is two different databases, one that your models are associated with, and one that has your connection and transaction. When you call db.transaction() a transaction is taking place, but not on the connection you think it is.
Change the code to read as follows and it will work like you expect.
book1 = Book(name='book1')
book2 = Book(name='book2')
db = Book._meta.database
# ...

Explicit Master-Master DB setup with Flask and SQLAlchemy, hopefully with Flask-SQLAlchemy

I want to use an explicit master-master DB setup together with Flask and SQLAlchemy, hopefully this is supported with Flask-SQLAlchemy.
I want to be able to do something like the following code snippet but I'm not sure if it's supported by Flask-SQLAlchemy
from flask import Flask
from flask.ext.sqlalchemy import SQLAlchemy
app = Flask(__name__)
SQLALCHEMY_DATABASE_URI = 'default_DB_uri'
SQLALCHEMY_BINDS = { 'master1':'first_master_DB_uri', 'master2': 'second_master_DB_uri' }
app.config['SQLALCHEMY_DATABASE_URI'] = SQLALCHEMY_DATABASE_URI
app.config['SQLALCHEMY_BINDS'] = SQLALCHEMY_BINDS
db = SQLAlchemy(app)
#app.route('/some_endpoint')
def some_endpoint():
# read some data for the default DB
readData = db.session.query('select ...')
m = SomeModel()
masterSession1 = db.session(bind='master1')
# persist the data in m into the first master
masterSession1.add(m)
masterSession2 = db.session(bind='master2')
# persist the data into the second master
masterSession2.add(m)
return "some return value"
Is there a way to achieve this using Flask-SQLAlchemy and binds?
I guess that Flask-SQLAlchemy already handles more than one engine with the binds but I can't see how to use that for an explicit DB selection and not a model based selection like mentioned here: http://pythonhosted.org/Flask-SQLAlchemy/binds.html
Thanks for the help.
The code below is what I ended up with to have this functionality.
A few notes:
I changed get_table_for_bind to bind all tables without an explicit __bind_key__ to all the binds. This is done in order to be able to call db.create_all() or db.drop_all() and create/drop the tables in all the DBs. In order for this to work and not break the default DB selection, when not specifying a specific bind, get_binds was changed to map the None bind again after the original implementation, to override the Table->Bind mapping.
If you don't specify a using_bind everything should work with the default DB.
SQLAlchemy mapped objects keep a reference to the session and state so you can't really add the same object to two DBs. I made a copy of the object before adding it in order to persist it in two DBs. Not sure if there is some better way to do this.
I haven't fully tested this and this might break some other functionality I'm not using or not aware of.
flask-sqlalchemy overrides:
from flask_sqlalchemy import SQLAlchemy, SignallingSession, get_state
from flask_sqlalchemy._compat import itervalues
class UsingBindSignallingSession(SignallingSession):
def get_bind(self, mapper=None, clause=None):
if self._name:
_eng = get_state(self.app).db.get_engine(self.app,bind=self._name)
return _eng
else:
return super(UsingBindSignallingSession, self).get_bind(mapper, clause)
_name = None
def using_bind(self, name):
self._name = name
return self
class UsingBindSQLAlchemy(SQLAlchemy):
def create_session(self, options):
return UsingBindSignallingSession(self, **options)
def get_binds(self, app=None):
retval = super(UsingBindSQLAlchemy, self).get_binds(app)
# get the binds for None again in order to make sure that it is the default bind for tables
# without an explicit bind
bind = None
engine = self.get_engine(app, bind)
tables = self.get_tables_for_bind(bind)
retval.update(dict((table, engine) for table in tables))
return retval
def get_tables_for_bind(self, bind=None):
"""Returns a list of all tables relevant for a bind.
Tables without an explicit __bind_key__ will be bound to all binds.
"""
result = []
for table in itervalues(self.Model.metadata.tables):
# if we don't have an explicit __bind_key__ bind this table to all databases
if table.info.get('bind_key') == bind or table.info.get('bind_key') == None:
result.append(table)
return result
db = UsingBindSQLAlchemy()
Now you can do this:
# This is the default DB
SQLALCHEMY_DATABASE_URI=YOUR_MAIN_DB_URI_CONNECT_STRING
# Master1 and Master2
SQLALCHEMY_BINDS = { 'master1':YOUR_MASTER1_DB_URI_CONNECT_STRING, 'master2':YOUR_MASTER2_DB_URI_CONNECT_STRING }
# Tables without __bind_key__ will be dropped/created on all DBs (default, master1, master2)
db.drop_all()
db.create_all()
s = db.session().using_bind('master1')
s.add(SOME_OBJECT)
s.commit()
s = db.session().using_bind('master2')
s.add(SOME_OBJECT_CLONE) # a clone of the original object, before the first add()
s.commit()
# and the default DB, as always
db.session.add(SOME_OTHER_OBJECT)
db.session.commit()

Categories

Resources