Why Doesn't Django Reset Sequences in SQLite3? - python

Why does Django allow you to reset the sequences (AutoID) fields on postgres and other DBMS's but not SQLite3?
Looking at the source code for the sql_flush method in django/db/backends/sqlite3/base.py, there is a comment that says:
Note: No requirement for reset of auto-incremented indices (cf. other sql_flush() implementations). Just return SQL at this point
I have a few tests where I load in fixture files that depend on absolute primary key ids. Because Django doesn't reset the auto id field for SQLite, these fixtures do not load correctly.
It appears that it is somewhat trivial to reset the auto id columns in sqlite: How can I reset a autoincrement sequence number in sqlite

You can monkey-patch sql_flush as follows to reset SQLite sequences:
from django.db.backends.sqlite3.operations import DatabaseOperations
from django.db import connection
def _monkey_patch_sqlite_sql_flush_with_sequence_reset():
original_sql_flush = DatabaseOperations.sql_flush
def sql_flush_with_sequence_reset(self, style, tables, sequences, allow_cascade=False):
sql_statement_list = original_sql_flush(self, style, tables, sequences, allow_cascade)
if tables:
# DELETE FROM sqlite_sequence WHERE name IN ($tables)
sql = '%s %s %s %s %s %s (%s);' % (
style.SQL_KEYWORD('DELETE'),
style.SQL_KEYWORD('FROM'),
style.SQL_TABLE(self.quote_name('sqlite_sequence')),
style.SQL_KEYWORD('WHERE'),
style.SQL_FIELD(self.quote_name('name')),
style.SQL_KEYWORD('IN'),
', '.join(style.SQL_FIELD(f"'{table}'") for table in tables)
)
sql_statement_list.append(sql)
return sql_statement_list
DatabaseOperations.sql_flush = sql_flush_with_sequence_reset
You would use it as follows for example in a TransactionTestCase:
from django.test import TransactionTestCase
class TransactionTestCaseWithSQLiteSequenceReset(TransactionTestCase):
reset_sequences = True
#classmethod
def setUpClass(cls):
super().setUpClass()
if connection.vendor == 'sqlite':
_monkey_patch_sqlite_sql_flush_with_sequence_reset()
This assures that tests that depend on fixed primary keys work with both SQLite and other database backends like PostgreSQL. However, see Django documentation for caveats regarding reset_sequences. For one thing, it makes tests slow.

Maybe this snippet will help:
import os
from django.core.management import call_command
from django.db import connection
from django.utils.six import StringIO
def reset_sequences(app_name):
os.environ['DJANGO_COLORS'] = 'nocolor'
buf = StringIO()
call_command('sqlsequencereset', app_name, stdout=buf)
buf.seek(0)
sql = "".join(buf.readlines())
with connection.cursor() as cursor:
cursor.execute(sql)
print("Sequences for app '{}' reset".format(app_name))

Related

How to pass 'using' DB to the django connection object

To query a specific database in django I can do:
Item.objects.using('specific_db').all()
Is there a way to do the same using a django connection? For example:
>>> from django.db import connection
>>> cursor=connection.using('specific_db').cursor()
If not, how could I get a cursor/connection for a specific DB without manually providing all the credentials?
According the the django documentation on Using raw SQL on multiple databases, you would use connections rather than connection:
from django.db import connections
cursor = connections['specific_db'].cursor()
cursor.execute("select * from item")
Use this code snippet for performing raw SQL queries
from django.db import connection
def connect_to_db():
with connection.cursor() as cursor:
cursor.execute("""SELECT * FROM Table""")
return dict(zip([col[0] for col in cursor.description], row)) for row /
in cursor.fetchall()

migrations are getting created repeatedly

I have created some models and when I run python manage.py db migrate command it creates migrations file, so that is fine.
python manage.py db upgrade command also creates table in Database.
If I again run the python manage.py db migrate command then it is creating migrations file for those models that I have upgraded recently.
Can you please help me to resolve it.
I had same problem and i've resolved it.
In my case, There is a problem on getting current table names.
(when calling get_table_names function in _autogen_for_tables((alembic/autogenerate/compare.py))
I am using sqlalchemy with the mysql-connector.
mysql-connector return table information as bytearray.
so i've changed temporally the following. (base.py(sqlalchemy/dialects/mysql))
#reflection.cache
def get_table_names(self, connection, schema=None, **kw):
"""Return a Unicode SHOW TABLES from a given schema."""
if schema is not None:
current_schema = schema
else:
current_schema = self.default_schema_name
charset = self._connection_charset
if self.server_version_info < (5, 0, 2):
rp = connection.execute(
"SHOW TABLES FROM %s"
% self.identifier_preparer.quote_identifier(current_schema)
)
return [
row[0] for row in self._compat_fetchall(rp, charset=charset)
]
else:
rp = connection.execute(
"SHOW FULL TABLES FROM %s"
% self.identifier_preparer.quote_identifier(current_schema)
)
return [
row[0]
for row in self._compat_fetchall(rp, charset=charset)
if row[1] == "BASE TABLE"
]
to
#reflection.cache
def get_table_names(self, connection, schema=None, **kw):
"""Return a Unicode SHOW TABLES from a given schema."""
if schema is not None:
current_schema = schema
else:
current_schema = self.default_schema_name
charset = self._connection_charset
if self.server_version_info < (5, 0, 2):
rp = connection.execute(
"SHOW TABLES FROM %s"
% self.identifier_preparer.quote_identifier(current_schema)
)
return [
row[0] for row in self._compat_fetchall(rp, charset=charset)
]
else:
rp = connection.execute(
"SHOW FULL TABLES FROM %s"
% self.identifier_preparer.quote_identifier(current_schema)
)
return [
row[0].decode("utf-8")
for row in self._compat_fetchall(rp, charset=charset)
if row[1].decode("utf-8") == "BASE TABLE"
]
I think the problem is to manage.py. If you did it as described on flask-migration site and stored all your models in this file - flask-migration just get these models and generates migrations and will do it always. You wrapped the standard command in your file and this is the problem.
If you want to fix it - store models in another directory (or another file), add them to an app and use command flask db migrate. In this case, flask-migration will generate migration for models only at first time, for others, it will detect changes and generate migrations only for changes.
But be careful, flask-migration don't see all changes. From site:
The migration script needs to be reviewed and edited, as Alembic currently does not detect every change you make to your models. In particular, Alembic is currently unable to detect table name changes, column name changes, or anonymously named constraints. A detailed summary of limitations can be found in the Alembic autogenerate documentation.

SQLAlchemy Create View in PostgresQL

I am trying create a view with SQLAlchemy with Postgresql as the underlying DB. The separate select query to create the view works well and returns results but when I use it in the create view, I get the error sqlalchemy.exc.NoSuchTableError: popular which means the view is not being selected. I am getting the error when I try to select from the view. Creating the view does not throw any error but it does not create the view. Here is my code:
from sqlalchemy import *
import sqlalchemy as db
from sqlalchemy import func
from sqlalchemy import desc
from sqlalchemy import Table
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.sql.expression import Executable, ClauseElement
try:
engine = db.create_engine('postgresql://user:pass#localhost:5432/db_name')
connection = engine.connect()
except:
print('Error establishing DB connection')
# Import metadata
metadata = db.MetaData()
# Import articles, authors and log tables
art = db.Table('articles', metadata, autoload=True, autoload_with=engine)
aut = db.Table('authors', metadata, autoload=True, autoload_with=engine)
log = db.Table('log', metadata, autoload=True, autoload_with=engine)
class CreateView(Executable, ClauseElement):
def __init__(self, name, select):
self.name = name
self.select = select
#compiles(CreateView)
def visit_create_view(element, compiler, **kw):
return "CREATE VIEW %s AS %s" % (
element.name,
compiler.process(element.select, literal_binds=True)
)
# Method to create view with top three articles
def view_top_three():
top_three_view = CreateView('popular', db.select([art.columns.title, func.count(log.columns.path)]) \
.where(func.concat('/article/', art.columns.slug) == log.columns.path) \
.where(log.columns.path != "/") \
.group_by(log.columns.path, art.columns.title) \
.order_by(desc(func.count(log.columns.path))) \
.limit(3))
engine.execute(top_three_view)
v = Table('popular', metadata, autoload=True, autoload_with=engine)
for r in engine.execute(v.select()):
print(r)
# Call the method which creates view and selects from view
view_top_three()
Any help will be appreciated.
Since your CreateView inherits from Executable, and ClauseElement, it is not considered a data changing operation. In other words
engine.execute(top_three_view)
executes the CREATE VIEW statement and then implicitly rollbacks, when the connection is returned to the pool.
Instead it should be a subclass of DDLElement, as shown in the usage recipes wiki. Simply changing the baseclass will allow SQLAlchemy autocommit to work properly.
I found the solution. The issue was to do with autocommit. Setting autocommit to true when creating the engine solved the issue as follows:
engine = db.create_engine('postgresql://user:pass#localhost:5432/db_name').execution_options(autocommit=True)
Special mention to #ilja-everilä

Django UnicodeEncodeError when importing from db

I have a custom management command to import data from one db and create model instances from it. Basically it is:
class Command(BaseCommand):
def handle(self, **kwargs):
cursor = connections['db2'].cursor()
sql = 'SELECT * FROM table'
cursor.execute(sql)
for row in cursor.fetchall():
my_model = MyModel(*row)
my_model.save()
First I was importing from sqlite to sqlite and all went well. But when I switched to MySQL as my main db, I started getting UnicodeEncodeError, when calling my_model.save(). The caveat is that I have non-ascii symbols in db (namely Russian), but as I get, Django converts all strings to unicode. And yes, both dbs use utf-8.
It seems that bug is due to mezzanine app, I'm using
https://github.com/stephenmcd/mezzanine/issues/1132

Custom sqlite database for unit tests for code using peewee ORM

I am trying to implement a many-to-many scenario using peewee python ORM and I'd like some unit tests. Peewee tutorial is great but it assumes that database is defined at module level then all models are using it. My situation is different: I don't have a source code file (a module from python's point of view) with tests which I run explicitly, I am using nose which collects tests from that file and runs them.
How do I use a custom database only for models instantiated in tests (which are being run by nose)? My goal is to use an in-memory database for tests only, to speedup the testing process.
I just pushed a commit today that makes this easier.
The fix is in the form of a context manager which allows you to override the database of a model:
from unittest import TestCase
from playhouse.test_utils import test_database
from peewee import *
from my_app.models import User, Tweet
test_db = SqliteDatabase(':memory:')
class TestUsersTweets(TestCase):
def create_test_data(self):
# ... create a bunch of users and tweets
for i in range(10):
User.create(username='user-%d' % i)
def test_timeline(self):
with test_database(test_db, (User, Tweet)):
# This data will be created in `test_db`
self.create_test_data()
# Perform assertions on test data inside ctx manager.
self.assertEqual(Tweet.timeline('user-0') [...])
# once we exit the context manager, we're back to using the normal database
See the documentation and have a look at the example testcases:
Context manager
Testcases showing how to use
To not include context manager in every test case, overwrite run method.
# imports and db declaration
class TestUsersTweets(TestCase):
def run(self, result=None):
with test_database(test_db, (User, Tweet)):
super(TestUsersTweets, self).run(result)
def test_timeline(self):
self.create_test_data()
self.assertEqual(Tweet.timeline('user-0') [...])
I took the great answers from #coleifer and #avalanchy and took them one step further.
In order to avoid overriding the run method on every TestCase subclass, you can use a base class... and I also like the idea of not having to write down every model class I work with, so I came up with this
import unittest
import inspect
import sys
import peewee
from abc import ABCMeta
from playhouse.test_utils import test_database
from business_logic.models import *
test_db = peewee.SqliteDatabase(':memory:')
class TestCaseWithPeewee(unittest.TestCase):
"""
This abstract class is used to "inject" the test database so that the tests don't use the real sqlite db
"""
__metaclass__ = ABCMeta
def run(self, result=None):
model_classes = [m[1] for m in inspect.getmembers(sys.modules['business_logic.models'], inspect.isclass) if
issubclass(m[1], peewee.Model) and m[1] != peewee.Model]
with test_database(test_db, model_classes):
super(TestCaseWithPeewee, self).run(result)
so, now I can just inherit from TestCaseWithPeewee and don't have to worry about anything else other than the test
Apparently, there's a new approach for the scenario described, where you can bind the models in the setUp() method of your test case:
Example from the official docs:
# tests.py
import unittest
from my_app.models import EventLog, Relationship, Tweet, User
MODELS = [User, Tweet, EventLog, Relationship]
# use an in-memory SQLite for tests.
test_db = SqliteDatabase(':memory:')
class BaseTestCase(unittest.TestCase):
def setUp(self):
# Bind model classes to test db. Since we have a complete list of
# all models, we do not need to recursively bind dependencies.
test_db.bind(MODELS, bind_refs=False, bind_backrefs=False)
test_db.connect()
test_db.create_tables(MODELS)
def tearDown(self):
# Not strictly necessary since SQLite in-memory databases only live
# for the duration of the connection, and in the next step we close
# the connection...but a good practice all the same.
test_db.drop_tables(MODELS)
# Close connection to db.
test_db.close()
# If we wanted, we could re-bind the models to their original
# database here. But for tests this is probably not necessary.
When using test_database I encountered problems with test_db not being initialized:
nose.proxy.Exception: Error, database not properly initialized before opening connection
-------------------- >> begin captured logging << --------------------
peewee: DEBUG: ('SELECT "t1"."id", "t1"."name", "t1"."count" FROM "counter" AS t1', [])
--------------------- >> end captured logging << ---------------------
I eventually fixed this by passing create_tables=True like so:
def test_timeline(self):
with test_database(test_db, (User, Tweet), create_tables=True):
# This data will be created in `test_db`
self.create_test_data()
According to the docs create_tables should default to True but it seems that isn't the case in the latest release of peewee.
For anyone who's using pytest, here's how I did it:
conftest.py
MODELS = [User, Tweet] # Also add get_through_model() for ManyToMany fields
test_db = SqliteDatabase(':memory:')
test_db.bind(MODELS, bind_refs=False, bind_backrefs=False)
test_db.connect()
test_db.create_tables(MODELS)
#pytest.fixture(autouse=True)
def in_mem_db(mocker):
mocked_db = mocker.patch("database.db", autospec=True) # "database.db" is where your app's code imports db from
mocked_db.return_value = test_db
return mocked_db
And voila, all your tests run with an in-memory sqlite database.

Categories

Resources