I am writing a new app which connects to an old database. For now, I'm reflecting the database objects rather than define them manually in SQLA ORM classes. I've set up my flask app like so:
config = Config.get_config_for_env(env)
app = Flask(name)
app.config.from_object(config)
metadata = MetaData()
db = SQLAlchemy(metadata=metadata)
db.init_app(app)
app.db = db
app.app_context().push()
# Reflect DB
db.metadata.reflect(bind=db.engine, views=True)
The call above, reflects the entire database. My apps normally touch a few tables at a time so it makes sense to reflect database tables lazily. This can be done like so:
db.metadata.reflect(bind=db.engine, schema='MySchema', only=['MyTable'])
To do this, I'll need to insert a layer that ensures before a query is executed, the schema and table have been reflected. This adds complexity as all queries will have to go through another layer of code. Is there a way to reflect database schema+table implicitly on demand as the query is made?
Well, if know the name of the table you need, then you can do:
table_to_work_with = Table("tablename", db.metadata, bind=db.engine, autoload=True)
And you can use sqlalchemy.inspect to get the table names, as described here: List database tables with SQLAlchemy
AFAIK, there is no way to do this. It can be done via a test class. Something like this, where self.clone() clones an object:
class TempDbApp(BaseApp):
def __init__(self, env_src, name='TempDbApp', *args, **kwargs):
super().__init__('t', name, *args, **kwargs)
self.env_src = env_src
self.logger = logging.getLogger(__name__)
self.schemas = ['dbo']
self.metadata = MetaData()
def setup(self):
super().setup()
self.test_db_name = self.create_db()
def teardown(self):
# Do not drop db at end because pool
super().teardown()
self.metadata.drop_all(self.db.engine)
for schema in self.schemas:
if schema != 'dbo':
self.db.engine.execute(DropSchema(schema))
self.drop_db()
def create_db(self):
url = copy(self.db.engine.url)
engine = create_engine(url, connect_args={'autocommit': True}, isolation_level='AUTOCOMMIT')
res = engine.execute(f"exec dbo.usp_createShortLivedDB")
tempdbname = res.fetchone()[0]
res.close()
engine.dispose()
self.db.engine.url.database = tempdbname
return tempdbname
def drop_db(self):
url = copy(self.db.engine.url)
db = url.database
url.database = 'master'
engine = create_engine(url, connect_args={'autocommit': True}, isolation_level='AUTOCOMMIT')
if database_exists(url):
assert db != 'master'
res = engine.execute(f"EXEC dbo.usp_deleteshortliveddb #dbname = '{db}'")
res.close()
def fetch_schemas(self):
results = self.db.engine.execute('SELECT name FROM sys.schemas')
for schema in results:
self.schemas.append(schema[0])
results.close()
def create_schema(self, schema):
with self.session() as sess:
sess.execute(CreateSchema(schema))
self.schemas.append(schema)
#lru_cache(maxsize=2048)
def clone(self, table, schema):
if schema not in self.schemas:
self.create_schema(schema)
self.metadata.reflect(self.engine_src, schema=schema, only=[table])
self.metadata.drop_all(self.db.engine) # precautionary in case previous test didn't clean things up
self.metadata.create_all(self.db.engine)
#lru_cache(maxsize=2048)
def get_table(self, table, schema, db=None):
self.clone(table, schema)
return super().get_table(table, schema, db)
#lru_cache(maxsize=2048)
def get_selectable(self, table, schema, db=None):
self.clone(table, schema)
return Table(table, self.db.metadata, schema=schema, autoload=True, autoload_with=self.db.get_engine(bind=db))
#lazy
def engine_src(self):
conn_string = f'mssql+pymssql://user:pass#{self.env_src}-slo-sql-ds/mydb?charset=utf8'
return create_engine(conn_string, isolation_level='AUTOCOMMIT')
def start(self):
raise Exception("You must not call this method - this app is for testing")
Then a test class can be created using multiple inheritance:
#final
class MyRealWorldClassTest(TempDbApp, MyRealWorldClass):
pass
Related
Hello I'm new to flask and I have an application where i am creating different models and schemas for my entities. These two models and schemas are very close to each other except with few differences. I have base classes for my model and schema so i could inherit and re-use the same class. However, i'm having a problem when i need to deserialize them with marshall and return the union result.
I'm using marshmallow,sql-achemy and flaskapi-spec. I am not sure if there's a way to use the marshall_with decorator with multiple schemas since I want to union my results and return the aggregated model.
Here is the endpoint,models and classes I have.
Models;
class BasePublisher(Model):
__abstract__= True
id= Column(db.String(80),primary_key=True,nullable=False)
date = Column(db.DateTime, default=dt.datetime.utcnow, primary_key=True, nullable=False)
views = Column(db.Numeric)
clicks = Column(db.Numeric)
publisher = Column(db.String(80),primary_key=True,nullable=False)
class Facebook(BasePublisher):
__tablename__='facebook_table'
def __init__(self, **kwargs):
db.Model.__init__(self, **kwargs)
class Pinterest(BasePublisher):
__tablename__='pin_table'
def __init__(self, user, **kwargs):
db.Model.__init__(self, user=user, **kwargs)
Schemas
class PublisherSchema(Schema):
date = fields.DateTime(dump_only=True)
type = fields.DateTime(dump_only=True)
views = fields.Number(dump_only=True)
clicks = fields.Number(dump_only=True)
publisher = fields.Str(dump_only=True)
class FacebookSchema(PublisherSchema):
#post_dump
def dump_data(self,data):
data["type"]="Facebok"
class PinterestSchema(PublisherSchema):
#post_dump
def dump_data(self,data):
data["type"]="Pinterest
"
-View
#blueprint.route('/api/sample/publishers/<id>', methods=('GET',))
#use_kwargs({'type': fields.Str(), 'start_date': fields.Str(),'end_date':fields.Str()},location="query")
#marshal_with(facebook_schema)
def get_data(id, type, start_date=None,end_date=None):
facebook_data = Facebook.query.filter_by(id=id)
.filter(Facebook.date.between(start_date,end_date))
.limit(10).all()
Ideally i would like to do this in my view;
pinterest_data = Pinterest.query.filter_by(id=id)
.filter(Pinterest.date.between(start_date,end_date))
.limit(10).all()
facebook_data.query.union(pinterest_data)
Union like this throws an error in flask application and also i have slightly different schemas for each publisher and i don't know how i can return both of them when i de-serialize with marshall
something like this maybe?
#marshal_with(facebook_schema,pinterest_schema)
If I have a sqlalchemy query object
query = db.session(Category)
and Im trying to filter the query further as below
query = query.filter(Category.tree_id.asc())
but instead of Category, I want it to dynamically retrieve the model from the query to use in the filter.
query = query.filter(query.model.tree_id.asc()) #the goal im trying to achieve.
My main goal is to extend wtforms_sqlalchemy.fields.QuerySelectField where I am able to filter the sqlalchemy_mptt model further.
class TreeQuerySelectField(QuerySelectField):
...
def _get_object_list(self):
if self._object_list is None:
query = self.query if self.query is not None else self.query_factory()
if hasattr(self.query.model, 'tree_id'):
query = query.order_by(self.query.model.tree_id, self.query.model.left)
# self.query.model should be the current query model
get_pk = self.get_pk
self._object_list = list((str(get_pk(obj)), obj) for obj in query)
return self._object_list
def iter_choices(self):
if self.allow_blank:
yield ("__None", self.blank_text, self.data is None)
for pk, obj in self._get_object_list():
yield (pk, self.get_label(obj), obj == self.data)
I am using flask-sessions extension to store user sessions in the database and now trying to query this table to get stored sessions. The session table is created the following way,
SqlAlchemySessionInterface(app, db, table='Session', key_prefix='')
now I'd like to get all the entries using the model query call like this,
Session.query.all()
But I am not able to do that due to the fact that the Session class in a inner class of the SqlAlchemySessionInterface class. Any idea how I can go about querying the table?
The complete SqlAlchemySessionInterface class code below,
class SqlAlchemySessionInterface(SessionInterface):
serializer = pickle
session_class = SqlAlchemySession
def __init__(self, app, db, table, key_prefix, use_signer=False,
permanent=True):
if db is None:
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy(app)
self.db = db
self.key_prefix = key_prefix
self.use_signer = use_signer
self.permanent = permanent
class Session(self.db.Model):
__tablename__ = table
id = self.db.Column(self.db.Integer, primary_key=True)
session_id = self.db.Column(self.db.String(255), unique=True)
data = self.db.Column(self.db.LargeBinary)
expiry = self.db.Column(self.db.DateTime)
def __init__(self, session_id, data, expiry):
self.session_id = session_id
self.data = data
self.expiry = expiry
def __repr__(self):
return '<Session data %s>' % self.data
# self.db.create_all()
self.sql_session_model = Session
In your code, should have from flask_session import Session and Session(app).
Bind the return object of Session(app) to an variable, such as sess.
Then, session_model = sess._get_interface(app).sql_session_model.
Then, you can session_model.query.all().
Key point: look at the source code of Session.
I am building a GraphQL API using the python packages Flask, SQLAlchemy, Graphene and Graphene-SQLAlchemy. I have followed the SQLAlchemy + Flask Tutorial. I am able to execute queries and mutations to create records. Now I would like to know what is the best way to update an existing record.
Here is my current script schema.py:
from graphene_sqlalchemy import SQLAlchemyObjectType
from database.batch import BatchOwner as BatchOwnerModel
import api_utils # Custom methods to create records in database
import graphene
class BatchOwner(SQLAlchemyObjectType):
"""Batch owners."""
class Meta:
model = BatchOwnerModel
interfaces = (graphene.relay.Node,)
class CreateBatchOwner(graphene.Mutation):
"""Create batch owner."""
class Arguments:
name = graphene.String()
# Class attributes
ok = graphene.Boolean()
batch_owner = graphene.Field(lambda: BatchOwner)
def mutate(self, info, name):
record = {'name': name}
api_utils.create('BatchOwner', record) # Custom methods to create records in database
batch_owner = BatchOwner(name=name)
ok = True
return CreateBatchOwner(batch_owner=batch_owner, ok=ok)
class Query(graphene.ObjectType):
"""Query endpoint for GraphQL API."""
node = graphene.relay.Node.Field()
batch_owner = graphene.relay.Node.Field(BatchOwner)
batch_owners = SQLAlchemyConnectionField(BatchOwner)
class Mutation(graphene.ObjectType):
"""Mutation endpoint for GraphQL API."""
create_batch_owner = CreateBatchOwner.Field()
schema = graphene.Schema(query=Query, mutation=Mutation)
Remarks:
My object BatchOwner has only 2 attributes (Id, name)
To be able to update the BatchOwner name, I assume I need to provide the database Id (not the relay global Id) as an input argument of some update method
But when I query for a BatchOwner from my client, Graphene only returns me the global Id which is base64 encoded (example: QmF0Y2hPd25lcjox, which correspond to BatchOwner:1)
Example of response:
{
"data": {
"batchOwners": {
"edges": [
{
"node": {
"id": "QmF0Y2hPd25lcjox",
"name": "Alexis"
}
}
]
}
}
}
The solution I am thinking of at the moment would be:
Create an update mutation which takes the global Id as an argument
Decode the global Id (how?)
Use the database Id retrieved from the decoded global Id to query on the database and update the corresponding record
Is there a better way to do this?
I have found a solution using the method from_global_id (documented here)
from graphql_relay.node.node import from_global_id
I added the following class to schema.py:
class UpdateBatchOwner(graphene.Mutation):
"""Update batch owner."""
class Arguments:
id = graphene.String()
name = graphene.String()
# Class attributes
ok = graphene.Boolean()
batch_owner = graphene.Field(lambda: BatchOwner)
def mutate(self, info, id, name):
id = from_global_id(id)
record = {'id': id[1], 'name': name}
api_utils.update('BatchOwner', record)
batch_owner = BatchOwner(id=id, name=name)
ok = True
return UpdateBatchOwner(batch_owner=batch_owner, ok=ok)
And I updated the Mutation class:
class Mutation(graphene.ObjectType):
"""Mutation endpoint for GraphQL API."""
create_batch_owner = CreateBatchOwner.Field()
update_batch_owner = UpdateBatchOwner.Field()
I'm wondering if there is a more straight forward way to do this?
How can I update a row's information?
For example I'd like to alter the name column of the row that has the id 5.
Retrieve an object using the tutorial shown in the Flask-SQLAlchemy documentation. Once you have the entity that you want to change, change the entity itself. Then, db.session.commit().
For example:
admin = User.query.filter_by(username='admin').first()
admin.email = 'my_new_email#example.com'
db.session.commit()
user = User.query.get(5)
user.name = 'New Name'
db.session.commit()
Flask-SQLAlchemy is based on SQLAlchemy, so be sure to check out the SQLAlchemy Docs as well.
There is a method update on BaseQuery object in SQLAlchemy, which is returned by filter_by.
num_rows_updated = User.query.filter_by(username='admin').update(dict(email='my_new_email#example.com')))
db.session.commit()
The advantage of using update over changing the entity comes when there are many objects to be updated.
If you want to give add_user permission to all the admins,
rows_changed = User.query.filter_by(role='admin').update(dict(permission='add_user'))
db.session.commit()
Notice that filter_by takes keyword arguments (use only one =) as opposed to filter which takes an expression.
This does not work if you modify a pickled attribute of the model. Pickled attributes should be replaced in order to trigger updates:
from flask import Flask
from flask.ext.sqlalchemy import SQLAlchemy
from pprint import pprint
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqllite:////tmp/users.db'
db = SQLAlchemy(app)
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(80), unique=True)
data = db.Column(db.PickleType())
def __init__(self, name, data):
self.name = name
self.data = data
def __repr__(self):
return '<User %r>' % self.username
db.create_all()
# Create a user.
bob = User('Bob', {})
db.session.add(bob)
db.session.commit()
# Retrieve the row by its name.
bob = User.query.filter_by(name='Bob').first()
pprint(bob.data) # {}
# Modifying data is ignored.
bob.data['foo'] = 123
db.session.commit()
bob = User.query.filter_by(name='Bob').first()
pprint(bob.data) # {}
# Replacing data is respected.
bob.data = {'bar': 321}
db.session.commit()
bob = User.query.filter_by(name='Bob').first()
pprint(bob.data) # {'bar': 321}
# Modifying data is ignored.
bob.data['moo'] = 789
db.session.commit()
bob = User.query.filter_by(name='Bob').first()
pprint(bob.data) # {'bar': 321}
Just assigning the value and committing them will work for all the data types but JSON and Pickled attributes. Since pickled type is explained above I'll note down a slightly different but easy way to update JSONs.
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(80), unique=True)
data = db.Column(db.JSON)
def __init__(self, name, data):
self.name = name
self.data = data
Let's say the model is like above.
user = User("Jon Dove", {"country":"Sri Lanka"})
db.session.add(user)
db.session.flush()
db.session.commit()
This will add the user into the MySQL database with data {"country":"Sri Lanka"}
Modifying data will be ignored. My code that didn't work is as follows.
user = User.query().filter(User.name=='Jon Dove')
data = user.data
data["province"] = "south"
user.data = data
db.session.merge(user)
db.session.flush()
db.session.commit()
Instead of going through the painful work of copying the JSON to a new dict (not assigning it to a new variable as above), which should have worked I found a simple way to do that. There is a way to flag the system that JSONs have changed.
Following is the working code.
from sqlalchemy.orm.attributes import flag_modified
user = User.query().filter(User.name=='Jon Dove')
data = user.data
data["province"] = "south"
user.data = data
flag_modified(user, "data")
db.session.merge(user)
db.session.flush()
db.session.commit()
This worked like a charm.
There is another method proposed along with this method here
Hope I've helped some one.
Models.py define the serializers
def default(o):
if isinstance(o, (date, datetime)):
return o.isoformat()
def get_model_columns(instance,exclude=[]):
columns=instance.__table__.columns.keys()
columns=list(set(columns)-set(exclude))
return columns
class User(db.Model):
__tablename__='user'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
.......
####
def serializers(self):
cols = get_model_columns(self)
dict_val = {}
for c in cols:
dict_val[c] = getattr(self, c)
return json.loads(json.dumps(dict_val,default=default))
In RestApi, We can update the record dynamically by passing the json data into update query:
class UpdateUserDetails(Resource):
#auth_token_required
def post(self):
json_data = request.get_json()
user_id = current_user.id
try:
instance = User.query.filter(User.id==user_id)
data=instance.update(dict(json_data))
db.session.commit()
updateddata=instance.first()
msg={"msg":"User details updated successfully","data":updateddata.serializers()}
code=200
except Exception as e:
print(e)
msg = {"msg": "Failed to update the userdetails! please contact your administartor."}
code=500
return msg
I was looking for something a little less intrusive then #Ramesh's answer (which was good) but still dynamic. Here is a solution attaching an update method to a db.Model object.
You pass in a dictionary and it will update only the columns that you pass in.
class SampleObject(db.Model):
id = db.Column(db.BigInteger, primary_key=True)
name = db.Column(db.String(128), nullable=False)
notes = db.Column(db.Text, nullable=False)
def update(self, update_dictionary: dict):
for col_name in self.__table__.columns.keys():
if col_name in update_dictionary:
setattr(self, col_name, update_dictionary[col_name])
db.session.add(self)
db.session.commit()
Then in a route you can do
object = SampleObject.query.where(SampleObject.id == id).first()
object.update(update_dictionary=request.get_json())
Update the Columns in flask
admin = User.query.filter_by(username='admin').first()
admin.email = 'my_new_email#example.com'
admin.save()
To use the update method (which updates the entree outside of the session) you have to query the object in steps like this:
query = db.session.query(UserModel)
query = query.filter(UserModel.id == user_id)
query.update(user_dumped)
db.session.commit()