Is this the right way of logging in Flask? - python

I have written a Flask App as follows:
import logging
from flask import Flask, jsonify
from mongo import Mongo
app = Flask(__name__)
app.config.from_object("config.ProductionConfig")
# routes to a particular change and patch number
#app.route("/<project>/")
def home(project):
app.logger.info('testing info log')
Mongo_obj = Mongo(ip=app.config["DB_HOST"], port=app.config["DB_PORT"],
username=app.config["DB_USERNAME"],
.....................
if __name__ == '__main__':
app.run(host = '0.0.0.0', port = '5000')
Now, the problem I face is, when I look at the logs of the Flask application, all I see is the following:
* Serving Flask app "service" (lazy loading)
* Environment: production
WARNING: This is a development server. Do not use it in a production deployment.
Use a production WSGI server instead.
* Debug mode: off
* Running on http://0.0.0.0:5000/ (Press CTRL+C to quit)
172.16.40.189 - - [01/Oct/2019 14:44:29] "GET /abc HTTP/1.1" 200 -
172.16.11.231 - - [01/Oct/2019 14:44:29] "GET /abc HTTP/1.1" 200 -
............
Is there something specific that needs to be done in order to see the log message? Do I need to run the Flask App in debug mode?

If not in debug mode the default log level is WARNING. That's why you don't see your logs. If you'd like your logs to contain INFO level ones you must set it within the logger, eg:
app = Flask(__name__)
app.logger.setLevel(logging.INFO)

This is how I set log levels in our app. I use the create_app function to create a flask app with error handling logging & other necessary configurations. Here is the snippet:
from flask import Flask
import logging
def create_app(test_config=None):
# create and configure the app
app = Flask(__name__)
app.logger.setLevel(logging.ERROR)
# Test Log levels
app.logger.debug("debug log info")
app.logger.info("Info log information")
app.logger.warning("Warning log info")
app.logger.error("Error log info")
app.logger.critical("Critical log info")
return app
app = create_app()
Output show only error and lower logs are visible for now
* Restarting with stat
[2022-12-12 17:38:39,375] ERROR in __init__: Error log info
[2022-12-12 17:38:39,375] CRITICAL in __init__: Critical log info

Related

Python FastAPI runs in PyCharm in Debug Mode and on Docker but not when Run

I have some weird behaviour with an API I am currently developing with PyCharm.
I already developed many REST-API with Python FastAPI successfully. But this time, I am facing some very weird behaviour of my application.
To test locally, I added in the main.py script the following line, which works fine for every other application I ever developed:
if __name__ == "__main__":
logger.info("starting server with dev configuration")
uvicorn.run(app, host="127.0.0.1", port=5000)
The code runs when I set a breakpoint on line uvicorn.run(...) and then resume from there.
The code does also run, when I build the docker image and then run it on docker when I use the following dockerfile:
FROM python:3.10-slim
...
EXPOSE 80
CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "80", "--workers", "1"]
But when I run normally, uvicorn does not seem to boot as no messages are logged like I would expect:
INFO: Started server process [6828]
INFO: Waiting for application startup.
INFO: Application startup complete.
INFO: Uvicorn running on http://127.0.0.1:5000 (Press CTRL+C to quit)
The problem occurred, after I added Azure application insights handler.
Configuration of application insights:
import logging
from fastapi import Request
from opencensus.ext.azure.trace_exporter import AzureExporter
from opencensus.trace import attributes_helper, samplers
from opencensus.trace.span import SpanKind
from opencensus.trace.tracer import Tracer
from starlette.types import ASGIApp
HTTP_HOST = attributes_helper.COMMON_ATTRIBUTES["HTTP_HOST"]
HTTP_METHOD = attributes_helper.COMMON_ATTRIBUTES["HTTP_METHOD"]
HTTP_PATH = attributes_helper.COMMON_ATTRIBUTES["HTTP_PATH"]
HTTP_ROUTE = attributes_helper.COMMON_ATTRIBUTES["HTTP_ROUTE"]
HTTP_URL = attributes_helper.COMMON_ATTRIBUTES["HTTP_URL"]
HTTP_STATUS_CODE = attributes_helper.COMMON_ATTRIBUTES["HTTP_STATUS_CODE"]
STACKTRACE = attributes_helper.COMMON_ATTRIBUTES["STACKTRACE"]
_logger = logging.getLogger(__name__)
class AzureApplicationInsightsMiddleware:
def __init__(self, app: ASGIApp, azure_exporter: AzureExporter) -> None:
self._app = app
self._sampler = samplers.AlwaysOnSampler()
self._exporter = azure_exporter
async def __call__(self, request: Request, call_next):
tracer = Tracer(exporter=self._exporter, sampler=self._sampler)
with tracer.span("main") as span:
span.span_kind = SpanKind.SERVER
response = await call_next(request)
# does not seem to return a response
span.name = "[{}]{}".format(request.method, request.url)
tracer.add_attribute_to_current_span(HTTP_HOST, request.url.hostname)
tracer.add_attribute_to_current_span(HTTP_METHOD, request.method)
tracer.add_attribute_to_current_span(HTTP_PATH, request.url.path)
tracer.add_attribute_to_current_span(HTTP_URL, str(request.url))
try:
tracer.add_attribute_to_current_span(HTTP_STATUS_CODE, response.status_code)
except Exception:
tracer.add_attribute_to_current_span(HTTP_STATUS_CODE, "500")
return response
Added as middleware:
from fastapi import FastAPI
from opencensus.ext.azure.trace_exporter import AzureExporter
from app.startup.middleware.application_insights import AzureApplicationInsightsMiddleware
from app.startup.middleware.cors import add_cors
from config import app_config
def create_middleware(app: FastAPI) -> FastAPI:
if app_config.appinsights_instrumentation_key is not None:
azure_exporter = AzureExporter(connection_string=app_config.appinsights_instrumentation_key)
app.middleware("http")(AzureApplicationInsightsMiddleware(app=app, azure_exporter=azure_exporter))
app = add_cors(app)
return app
Added handler to logger:
import logging
import sys
from logging import StreamHandler
from opencensus.ext.azure.log_exporter import AzureLogHandler
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter(fmt="%(asctime)s %(levelname)-8s %(name)-15s %(message)s",
datefmt="%Y-%m-%d %H:%M:%S")
handler = StreamHandler(stream=sys.stdout)
handler.setLevel(logging.DEBUG)
handler.setFormatter(formatter)
logger.addHandler(handler)
handler = AzureLogHandler(connection_string=connection_string)
handler.setLevel(logging.INFO)
handler.setFormatter(formatter)
logger.addHandler(handler)
I use the standard template, which works for all my other application but this time obviously it does not. I wonder, there might be an issue with maybe some of the installed modules I am using in requirements.txt:
fastapi
uvicorn
azure-servicebus
pymongo[srv]
pandas
numpy
jinja2
svgwrite
matplotlib
Has anyone faced the same problem yet?
changed host to localhost --> uvicorn server still not booting

Deploy a flask app in using Cloudera Application

I have been using the following python 3 script in a CDSW session which run just fine as long as the session is not killed.
I am able to click on the top-right grid and select my app
hello.py
from flask import Flask
import os
app = Flask(__name__)
#app.route('/')
def index():
return 'Web App with Python Flask!'
app.run(host=os.getenv("CDSW_IP_ADDRESS"), port=int(os.getenv('CDSW_PUBLIC_PORT')))
I would like this app to run 24/7, so instead of using a Session or scheduling a job that never ends, I would like to create a CDSW Application so that it doesn't stop.
This is the settings on my application:
Logs:
from flask import Flask
import os
app = Flask(__name__)
#app.route('/')
def index():
return 'Web App with Python Flask!'
app.run(host=os.getenv("CDSW_IP_ADDRESS"), port=int(os.getenv('CDSW_PUBLIC_PORT')))
* Serving Flask app "__main__" (lazy loading)
* Environment: production
WARNING: Do not use the development server in a production environment.
Use a production WSGI server instead.
* Debug mode: off
OSError: [Errno 98] Address already in use
I tried to change the port from CDSW_PUBLIC_PORT to CDSW_APP_PORT but it ends up the same.
As it mentions here maybe you need to change this line of code
app.run(host=os.getenv("CDSW_IP_ADDRESS"), port=int(os.getenv('CDSW_PUBLIC_PORT')))
to this
app.run(host="127.0.0.1", port=int(os.environ['CDSW_APP_PORT']))
Hope it works!

Why am I instantiating two different queues?

I'm trying to set-up an application which will receive HTTP GET's and POST's using python and flask-restful. The problem that I'm getting is that when I start the application I see that there are two instances of a queue being generated. I would like you to help me understand why?
Application output (terminal):
<queue.Queue object at 0x10876fdd8>
* Serving Flask app "main" (lazy loading)
* Environment: production
WARNING: Do not use the development server in a production environment.
Use a production WSGI server instead.
* Debug mode: on
* Running on http://127.0.0.1:5000/ (Press CTRL+C to quit)
* Restarting with stat
<queue.Queue object at 0x10ce48be0>
* Debugger is active!
* Debugger PIN: 292-311-362
Python code (ran with the following command python -m main):
import json
from flask import Flask, request
from flask_restful import Resource, Api, reqparse
import requests
import os
import threading
import queue
import sys
app = Flask(__name__)
api = Api(app)
parser = reqparse.RequestParser()
parser.add_argument("endpoint")
queue = queue.Queue()
base_path = os.path.dirname(os.path.realpath(__file__))
config = Json_Parser().get_json_object(base_path + "path")
consumer = Consumer(config, queue)
t1 = threading.Thread(target=consumer.consume)
t1.start()
class Interaction(Resource):
def get(self):
self.create_interaction()
thread_queue = consumer.get_queue()
output = thread_queue.get()
return output
api.add_resource(Interaction, '/interaction')
if __name__ == '__main__':
print(queue)
app.run(debug=True)
With the help of #Richar de Wit I changed the following line:
app.run(debug=True)
to:
app.run(debug=True, use_reloader=False)
to prevent the debugger to instantiate two queues thus giving issues later on.
The problem is referenced in this question:
Why does running the Flask dev server run itself twice?

Can't disable flask/werkzeug logging

I have been trying for way to long to disable the logger of werkzeug. I'm trying to create a socketio server in python but werkzeug keeps logging all POST and GET requests. It's really annoying because my logging get's flooded.
async_mode = 'gevent'
import logging
from flask import Flask, render_template
import socketio
sio = socketio.Server(logger=False, async_mode=async_mode)
app = Flask(__name__)
app.wsgi_app = socketio.Middleware(sio, app.wsgi_app)
app.config['SECRET_KEY'] = 'secret!'
thread = None
app.logger.disabled = True
log = logging.getLogger('werkzeug')
log.disabled = True
#app.route('/')
def index():
#global thread
#if thread is None:
# thread = sio.start_background_task(background_thread)
return render_template('index.html')
#sio.on('answer', namespace='/test')
def test_answer(sid, message):
print(message)
if __name__ == '__main__':
if sio.async_mode == 'threading':
# deploy with Werkzeug
app.run(threaded=True)
elif sio.async_mode == 'eventlet':
# deploy with eventlet
import eventlet
import eventlet.wsgi
eventlet.wsgi.server(eventlet.listen(('', 5000)), app)
elif sio.async_mode == 'gevent':
# deploy with gevent
from gevent import pywsgi
try:
from geventwebsocket.handler import WebSocketHandler
websocket = True
except ImportError:
websocket = False
if websocket:
pywsgi.WSGIServer(('', 5000), app,
handler_class=WebSocketHandler).serve_forever()
else:
pywsgi.WSGIServer(('', 5000), app).serve_forever()
elif sio.async_mode == 'gevent_uwsgi':
print('Start the application through the uwsgi server. Example:')
#print('uwsgi --http :5000 --gevent 1000 --http-websockets --master '
# '--wsgi-file app.py --callable app')
else:
print('Unknown async_mode: ' + sio.async_mode)
Everywhere is see this as the solution but it doesn't stop werkzeug from logging.
app.logger.disabled = True
log = logging.getLogger('werkzeug')
log.disabled = True
These are the kind of messages:
::1 - - [2018-02-28 22:09:03] "GET /socket.io/?EIO=3&transport=polling&t=M7UFq6u HTTP/1.1" 200 345 0.000344
::1 - - [2018-02-28 22:09:03] "POST /socket.io/?EIO=3&transport=polling&t=M7UFq7A&sid=daaf8a43faf848a7b2ae185802e7f164 HTTP/1.1" 200 195 0.000284
::1 - - [2018-02-28 22:09:03] "GET /socket.io/?EIO=3&transport=polling&t=M7UFq7B&sid=daaf8a43faf848a7b2ae185802e7f164 HTTP/1.1" 200 198 0.000153
::1 - - [2018-02-28 22:10:03] "GET /socket.io/?EIO=3&transport=polling&t=M7UFq7N&sid=daaf8a43faf848a7b2ae185802e7f164 HTTP/1.1" 400 183 60.058020
I've tried to set the level to only critical, but that didn't help either. I've also tried to use grep to suppress the messages but it seems that grep doesn't work with python console output.
Edit: I'm using python 3.5.2 on linux but had the same problem on 3.6 on windows. werkzeug is 0.14.1, flaks is 0.12.2 and python-socketio is 1.8.4
Edit2: I was able to fix the problem by using grep, the problem was that werkzeug send everything to stderr which should be handled differently in the command line.
python app.py 2>&1 | grep -v 'GET\|POST'
This gives the result I wanted.
The quick answer is to pass log=None when you create your WSGIServer:
pywsgi.WSGIServer(('', 5000), app, log=None).serve_forever()
The gevent WSGI server logging is apparently a bit special according to the documentation:
[...] loggers are likely to not be gevent-cooperative. For example, the socket and syslog handlers use the socket module in a way that can block, and most handlers acquire threading locks.
If you want to have more control over the gevent WSGI server logging, you can pass in your own logger (and error_log). Just make sure to wrap it in a LoggingLogAdapter first:
from gevent.pywsgi import LoggingLogAdapter
server_log = LoggingLogAdapter(logging.getLogger(__file__))
# server_log.disabled = True # Now you can disable it like a normal log
...
pywsgi.WSGIServer(('', 5000), app, log=server_log).serve_forever()
As a side note, I checked which loggers were instantiated with this little patch to logging.getLogger. Maybe it will be helpful for others trying to understand where log output comes from:
import logging
old_getLogger = logging.getLogger
def getLogger(*args, **kwargs):
print('Getting logger', args, kwargs)
return old_getLogger(*args, **kwargs)
logging.getLogger = getLogger
The output was something like:
Getting logger ('concurrent.futures',) {}
Getting logger ('asyncio',) {}
Getting logger ('engineio.client',) {}
Getting logger ('engineio.server',) {}
Getting logger ('socketio.client',) {}
Getting logger ('socketio',) {}
Getting logger ('socketio',) {}
Getting logger ('socketio',) {}
Getting logger ('socketio.server',) {}
Getting logger ('socketio.client',) {}
Getting logger () {}
Getting logger ('main',) {}
Getting logger ('flask.app',) {}
Getting logger ('flask',) {}
Getting logger ('werkzeug',) {}
Getting logger ('wsgi',) {}
But of course disabling any of these loggers doesn't work since the default gevent WSGI logger is just printing directly to stderr.

How to get arrived timestamp of a request in flask

I have an ordinary Flask application, with just one thread to process requests. There are many requests arriving at the same time. They queue up to wait for be processed. How can I get the waiting time in queue of each request?
from flask import Flask, g
import time
app = Flask(__name__)
#app.before_request()
def before_request():
g.start = time.time()
g.end = None
#app.teardown_request
def teardown_request(exc):
g.end = time.time()
print g.end - g.start
#app.route('/', methods=['POST'])
def serve_run():
pass
if __name__ == '__main__':
app.debug = True
app.run()
There is no way to do that using Flask's debug server in single-threaded mode (which is what your example code uses). That's because by default, the Flask debug server merely inherits from Python's standard HTTPServer, which is single-threaded. (And the underlying call to select.select() does not return a timestamp.)
I just have one thread to process requests.
OK, but would it suffice to spawn multiple threads, but prevent them from doing "real" work in parallel? If so, you might try app.run(..., threaded=True), to allow the requests to start immediately (in their own thread). After the start timestamp is recorded, use a threading.Lock to force the requests to execute serially.
Another option is to use a different WSGI server (not the Flask debug server). I suspect there's a way to achieve what you want using GUnicorn, configured with asynchronous workers in a single thread.
You can doing something like this
from flask import Flask, current_app, jsonify
import time
app = Flask(__name__)
#app.before_request
def before_request():
Flask.custom_profiler = {"start": time.time()}
#app.after_request
def after_request(response):
current_app.custom_profiler["end"] = time.time()
print(current_app.custom_profiler)
print(f"""execution time: {current_app.custom_profiler["end"] - current_app.custom_profiler["start"]}""")
return response
#app.route('/', methods=['GET'])
def main():
return jsonify({
"message": "Hello world"
})
if __name__ == '__main__':
app.run()
And testing like this
→ curl http://localhost:5000
{"message":"Hello world"}
Flask message
→ python main.py
* Serving Flask app "main" (lazy loading)
* Environment: production
WARNING: This is a development server. Do not use it in a production deployment.
Use a production WSGI server instead.
* Debug mode: off
* Running on http://127.0.0.1:5000/ (Press CTRL+C to quit)
{'start': 1622960256.215391, 'end': 1622960256.215549}
execution time: 0.00015807151794433594
127.0.0.1 - - [06/Jun/2021 13:17:36] "GET / HTTP/1.1" 200 -

Categories

Resources