I am following the book Mastering flask's recommended file structure.
(The name of my project is Paw)
In Paw/paw/__init__.py:
def create_app(object_name):
app = Flask(__name__)
app.config.from_object(object_name)
db.init_app(app)
robot = LoggingWeRobot(token='banana', enable_session=True)
robot.init_app(app, endpoint='werobot', rule='/wechat')
attach_debugging_logger(app)
app.register_blueprint(poll_blueprint)
app.register_blueprint(wechat_blueprint)
return app
Note that the robot variable is actually needed in my blueprint, wechat, found in: Paw/paw/controllers/wechat.py
#robot.handler
def request_logging_middleware(message, session):
app.logger.debug("\n%s", request.data)
return False # This allows other handlers to continue execution
So my problem is that my blueprint has no access to the robot variable. However, the robot variable should be created in create_app in Paw/paw/__init__.py because I am trying to follow the application factory pattern.
Any recommendation on how to fix this? My project can be found here and I am trying to follow this application structure
Simply use the same pattern you are using for db - create robot elsewhere and import it into your Paw/paw/__init__.py file, just as you do with db:
import db from models
import robot from wechat_setup
# wechat_setup is where you would invoke
# robot = LoggingWeRobot(token='banana', enable_session=True)
def create_app(object_name):
app = Flask(__name__)
app.config.from_object(object_name)
db.init_app(app)
robot.init_app(app, endpoint='werobot', rule='/wechat')
I usually put project global variables in one file (say gvars.py).
Then the project structure will be some sort like this:
.
├── etc
│ └── webapp.py
├── models
│ └── common.py
├── views
│ └── common.py
├── gvars.py
└── webapp.py
In other files we just do this:
from gvars import db, robot # or other variables
Related
I am following the official flask tutorial.
I have already set up blueprints and they are working just fine.
I can use app.logger.debug('route "/hello" called') from __init__.py to log something–my issue is that I am struggling to use the app.logger from routes/routes.py though...
I have set up a structure like this:
fzwk_access
├── __init__.py
├── db.py
├── routes
│ ├── routes.py
│ ├── ...
...
So here's the question:
how can I use the app.logger from the file routes.py?
I already tried the following:
use from flask import app which did not work
use from fzwk_access import __init__ which does not work as I don't have app as a global variable there (I guess?)
My __init__.py looks like this:
import os
from flask import Flask
def create_app(test_config=None):
# create and configure the app
app = Flask(__name__, instance_relative_config=True)
app.config.from_mapping(
SECRET_KEY='dev',
DATABASE=os.path.join(app.instance_path, 'flaskr.sqlite'),
)
if test_config is None:
# load the instance config, if it exists, when not testing
app.config.from_pyfile('config.py', silent=True)
else:
# load the test config if passed in
app.config.from_mapping(test_config)
# ensure the instance folder exists
try:
os.makedirs(app.instance_path)
except OSError:
pass
# a simple page that says hello
#app.route('/hello')
def hello():
app.logger.debug('route "/hello" called')
return 'Hello, World!'
from . import db
db.init_app(app)
from .routes import routes, routes_admin, routes_logs
app.register_blueprint(routes.home_bp)
app.register_blueprint(routes_admin.admin_bp)
# app.register_blueprint(routes_logs.logs_bp)
return app
I am developing a flask based web app. A user can enter the car specifications and will get predictions of the price of the car based on a machine learning model.
I was following many tutorials on how to create a web app but I feel confused on where to put configurations on machine learning component and how to structure the code correctly.
I have the following folder structure of my project:
├── webapp
│ ├── app
│ ├── static
│ ├── templates
│ ├── routes.py
│ ├── utils.py --> utils function that are used in 'routes.py'
│ ├── src
│ ├── ml_utils.py --> functions for machine learning component
│ ├── else stuff
in routes.py:
from flask import Flask, request, render_template
from sklearn.externals import joblib
import numpy as np
from app.utils import find_freshest_model, convert_to_float, process_features_info_for_option_html, create_features
from src.ml_utils import load_features_info
app = Flask(__name__)
#app.route('/')
def home():
return render_template('index.html', car_type=option_values['car type'])
#app.route('/', methods=['POST'])
def predict():
#order features in a correct way according to order in features_info
features = create_features(request, features_info)
prediction = model.predict(features)
return render_template('index.html',
prediction_text='Your predicted car price is {} Euro'.format(prediction), quality=option_values['quality'])
if __name__ == '__main__':
model_file = find_freshest_model()
features_info = load_features_info() # containts correct order of the features and categorization of features (numerical, categorical)
option_values = process_features_info_for_option_html(features_info['features_dummy'])
model = joblib.load(model_file)
app.run(host='0.0.0.0', debug=True)
Task and Questions:
I want to prepare my app for production and to structure it better.
Should I put in init.py following code?
Regarding the code under if __name__ == '__main__':. Should I create a class modelConfigs, put it into models.py? In init.py I import modelConfigs and will initialize it routes.py.
models.py
from src.ml_utils import load_features_info
from sklearn.externals import joblib
from app.utils import find_freshest_model, process_features_info_for_option_html
class ModelConfigs:
__tablename__ = 'modelConfigs'
model = joblib.load(find_freshest_model())
features_info = load_features_info()
option_values = process_features_info_for_option_html(features_info['features_dummy'])
init.py:
from flask import Flask
app = Flask(__name__)
from app.models import ModelConfigs
model_config = ModelConfigs
from app import routes
routes.py:
from flask import request, render_template
import numpy as np
from app.utils import create_features
from app import app, model_config
#app.route('/')
def home():
return render_template('index.html', car_type=option_values['car type'])
#app.route('/', methods=['POST'])
def predict():
features = create_features(request, model_config.features_info)
prediction = np.expm1(model_config.model.predict(features))
return render_template('index.html',
prediction_text='Your predicted car price is {} Euro'.format(prediction), quality=option_values['quality'])
Flask has a feature called, "Blueprints," which allows you, separate your application into several folders so that routes and views can be kept more neatly in their own folder, yet allow one python file to call on them each individually as needed.
I mention this because it's one of the parts of Flask that Flask likes to highlight. What this does is allows you to keep your project structure cleaner, yet at the same time completely customizeable. Hypothetically you could build your own machine learning pipeline right within a blueprint, I can't think of a specific application for that - but who knows? The capability is there.
Outside of blueprints, from what I have read in a lot of different blogs and practice myself, generally for a small machine learning project, you might start off with just throwing a few things into /src since that's the, "source" folder where you put the types of things that go on the server. However you may quickly outgrow that, and need to separate your /src folder into several different folders which actually represent a legitimate data science project or pipeline structure of some type.
One way you might consider structuring your folder would be the following:
└── src
│ ├── features
│ ├── preparation
│ ├── preprocessing
│ ├── evaluation
│ └── js
└── tests
│ └── unit_tests
└── models
│ └── retrained_models
└── data
│ └── raw_data
│ └── processed_data
│ └── user_input_data
└── pipeline
│ └── model_retraining_automation_scripts
└── docs
└── Documentation
└── Notebooks
All of the above assumes you are storing everything on the server itself, which is restrictive from a data engineering perspective. Servers typically are more expensive, they run on SSD's or whatever. So if you start to grow in size and have massive amounts of data, you would need to perhaps store that in a document store such as AWS S3. If that's the case, you could think about that folder structure above to keep various operations and .py files that would perform similar functions on a small scale and translate that into a larger scale, however you might need to start storing training models as actual binaries in a database, or growing even larger, in S3 buckets, with some way of tracking what is going where, presumably a relational database.
I am confused about how to use Flask-Migrate when I have multiple models.
Basically my Flask app looks like this:
app
├── __init__.py
├── config.py
├── manage.py
├── migrations
├── models
│ ├── model1.py
│ ├── model2.py
├── resources
├── run.py
└── tests
I've read that for each model its best to create the db = SQLAlchemy() object in the file and then import this db object into the app's__init__.py like so:
from models.model1 import db
db.init_app(app)
from models.model2 import db
db.init_app(app)
However if I do this for multiple model files, how can I add Flasks's migrate functionality, considering I can only use 1 sql alchemy object for the migrate class instantiation:
migrate = Migrate(app, db)
manager = Manager(app)
manager.add_command('db', MigrateCommand)
Would it be best in this case to define a single sql alchemy object in the __init__.py file and import that into all my models?
You misread the referenced text. That talks about something completely different. That talks about keeping your db object separate from the app object (and tie the two togther in the create_app factory function). Creating multiple db objects is only complicating matters for you.
All that is needed is a single db = SQLAlchemy() object, and all the files that define models need to be imported. Usually that's done directly or indirectly via your create_app factory function, You need to call the create_app() function anyway to be able to run the flask db command-line tool anyway.
Next, you do not need to create a manager either. The Manager object is a hold-over from the time before the Flask project added support for scripts itself. If you are using Flask 0.12 or newer, you don't want to be using Flask-Script and it's manager.
So, all you need, in your __init_.py, is:
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
db = SQLAlchemy()
def create_app(test_config=None):
app = Flask(__name__)
app.config.from_object(f"{__name__}.config")
app.config.from_envvar("PROJECTNAME_SETTINGS", silent=True)
if test_config:
app.config.from_mapping(test_config)
db.init_app(app)
Migrate(app, db)
# importing the models to make sure they are known to Flask-Migrate
from models import models1, models2
# any other registrations; blueprints, template utilities, commands
return app
I have a flask application which uses jinja2 template filters. An example of template filter is as follows:
#app.template_filter('int_date')
def format_datetime(date):
if date:
return utc_time.localize(date).astimezone(london_time).strftime('%Y-%m-%d %H:%M')
else:
return date
This works fine if we have an app instantiated before a decorator is defined, however if we are using an app factory combined with flask-script manager, then we don't have an instantiated app. For example:
def create_my_app(config=None):
app = Flask(__name__)
if config:
app.config.from_pyfile(config)
return app
manager = Manager(create_my_app)
manager.add_option("-c", "--config", dest="config", required=False)
#manager.command
def mycommand(app):
app.do_something()
Manager accepts either an instantiated app or an app factory, so at first glance it appears that we can do this:
app = create_my_app()
#app.template_filter('int_date')
....
manager = Manager(app)
The problem with this solution is that the manager then ignores the option, since the app has already been configured during instantiation. So how is someone supposed to use template filters together with the flask-script extension?
This is where blueprints come into play. I would define a blueprint core and put all my custom template filters in say core/filters.py.
To register filters to an application in flask when using blueprints you need to use app_template_filter instead of template_filter. This way you can still use the decorator pattern to register filters and use the application factory approach.
A typical directory layout for an application using blueprint might look something like:
├── app
│ ├── blog
│ │ ├── __init__.py # blog blueprint instance
│ │ └── routes.py # core filters can be used here
│ ├── core
│ │ ├── __init__.py # core blueprint instance
│ │ ├── filters.py # define filters here
│ │ └── routes.py # any core views are defined here
│ └── __init__.py # create_app is defined here & blueprint registered
└── manage.py # application is configured and created here
For a minimal working example of this approach see: https://github.com/iiSeymour/app_factory
The solution can be found here, which states they are two ways one can define a jinja template filter. Thus, instead of defining a decorator outside the factory, one can modify the jinja_env instead. This can be done in the app factory, for example:
def format_datetime(date):
if date:
return utc_time.localize(date).astimezone(london_time).strftime('%Y-%m-%d %H:%M')
else:
return date
def create_app(production=False):
app = Flask(__name__)
....
# Register Jinja2 filters
app.jinja_env.filters['datetime'] = format_datetime
manager = Manager(create_app)
...
I would like to access an sqlite3 database from a Flask application (without using Flask-SQLAlchemy, since I require fts4 functionality). I am using Flask blueprints, and I am not sure where to put the following functions (shamelessly copied from a response to this stackoverflow question):
def request_has_connection():
return hasattr(flask.g, 'dbconn')
def get_request_connection():
if not request_has_connection():
flask.g.dbconn = sqlite3.connect(DATABASE)
# Do something to make this connection transactional.
# I'm not familiar enough with SQLite to know what that is.
return flask.g.dbconn
#app.teardown_request
def close_db_connection(ex):
if request_has_connection():
conn = get_request_connection()
# Rollback
# Alternatively, you could automatically commit if ex is None
# and rollback otherwise, but I question the wisdom
# of automatically committing.
conn.close()
My file structure is:
app
├── __init__.py
├── main
│ ├── forms.py
│ ├── __init__.py
│ ├── views.py
├── models.py
├── static
└── templates
├── base.html
├── index.html
└── login.html
I want the request_has_connection() and get_request_connection() functions accessible from all view functions and maybe from models.py as well. Right now, I'm thinking they all belong in my blueprint init.py, which currently contains:
from flask import Blueprint
main = Blueprint('main',__name__)
from . import views
and that my request teardown function would be registered as
#main.teardown_request
def close_db_connection(ex):
<blah-blah-blah>
Is this right?