I'm building a serverless application using Python and Mongodb. In documentation I found that I need to write db connection outside handler function. I have used Mangum python package as adapter to handle API gateway.
from fastapi import FastAPI, Body, status, Depends
from mangum import Mangum
from motor.motor_asyncio import AsyncIOMotorClient
from fastapi.responses import JSONResponse
from app.utility.config import MONGODB_URL, MAX_CONNECTIONS_COUNT, MIN_CONNECTIONS_COUNT, MAX_DB_THREADS_WAIT_COUNT, MAX_DB_THREAD_QUEUE_TIMEOUT_COUNT
application= FastAPI()
client = AsyncIOMotorClient(str(MONGODB_URL),
maxPoolSize=MAX_CONNECTIONS_COUNT,
minPoolSize=MIN_CONNECTIONS_COUNT,
waitQueueMultiple = MAX_DB_THREADS_WAIT_COUNT,
waitQueueTimeoutMS = MAX_DB_THREAD_QUEUE_TIMEOUT_COUNT )
async def get_database() -> AsyncIOMotorClient:
return client
#application.post("/createStudent")
async def create_student(student = Body(...), db: AsyncIOMotorClient = Depends(get_database)):
new_student = await db["college"]["students"].insert_one(student)
created_student = await db["college"]["students"].find_one({"_id": new_student.inserted_id})
return JSONResponse(status_code=status.HTTP_201_CREATED, content=created_student)
#application.post("/createTeacher")
async def create_teacher(teacher = Body(...), db: AsyncIOMotorClient = Depends(get_database)):
new_teacher = await db["college"]["students"].insert_one(teacher)
created_teacher = await db["college"]["students"].find_one({"_id": new_teacher.inserted_id})
return JSONResponse(status_code=status.HTTP_201_CREATED, content=created_teacher)
handler = Mangum(application)
For every API request, new connection is created. How to cache db so that new request uses old connection. Every time new request is created so that lambda compute time is increased dramatically after db hits max connection limit.
There are examples for node js but for python I could not find anywhere
Related
I have a rest api server which makes call to some other Apis,I am accessing the data I get from the server on a react js frontend,But for certain usecases I need to fetch real time data from backed,is there any way do this together,below is my code
from flask import Flask,request
from flask_cors import CORS
from tuya_connector import TuyaOpenAPI, TUYA_LOGGER
app = Flask(__name__)
CORS(app)
#app.get("/api/device/<string:deviceid>")
def getdata(deviceid):
ACCESS_ID = ""
ACCESS_KEY = ""
API_ENDPOINT = ""
# Enable debug log
# Init OpenAPI and connect
openapi = TuyaOpenAPI(API_ENDPOINT, ACCESS_ID, ACCESS_KEY)
openapi.connect()
# Set up device_id
DEVICE_ID = deviceid
# Call APIs from Tuya
# Get the device information
response = openapi.get("/v1.0/devices/{}".format(DEVICE_ID))
return response
I want to have traditional request response service along with real time data fetching
Websockets endpoints are exactly what you're looking for. If that is not too late, I'd recommend switching to FastAPI which supports WebSockets "natively" (out-of-the-box) - https://fastapi.tiangolo.com/advanced/websockets
If you need to keep using Flask, there are a few packages that allow you to add WebSockets endpoints: https://flask-sock.readthedocs.io/en/latest/
With FastAPI, this is that simple:
#app.get("/")
async def get():
return {"msg": "This is a regular HTTP endpoint"}
#app.websocket("/ws")
async def websocket_endpoint(websocket: WebSocket):
await websocket.accept()
while True:
data = await websocket.receive_text()
await websocket.send_text(f"Message text was: {data}")
I'm using FastAPI with Redis. My app looks something like this
from fastapi import FastAPI
import redis
# Instantiate redis client
r = redis.Redis(host='localhost', port=6379, db=0, decode_responses=True)
# Instantiate fastapi app
app = FastAPI()
#app.get("/foo/")
async def foo():
x = r.get("foo")
return {"message": x}
#app.get("/bar/")
async def bar():
x = r.get("bar")
return {"message": x}
Is it bad practice to create r as a module-scoped variable like this? If so what are the drawbacks?
In Tiangolo's tutorial on setting up a SQL database connection he uses a dependency, which I guess in my case would look something like this
from fastapi import Depends, FastAPI
import redis
# Instantiate fastapi app
app = FastAPI()
# Dependency
def get_redis():
return redis.Redis(host='localhost', port=6379, db=0, decode_responses=True)
#app.get("/foo/")
async def foo(r = Depends(get_redis)):
x = r.get("foo")
return {"message": x}
#app.get("/bar/")
async def bar(r = Depends(get_redis)):
x = r.get("bar")
return {"message": x}
I'm a bit confused as to which of these methods (or something else) would be preferred and why.
Depends will evaluate every time your function got a request, so your second example will create a new connection for each request. As #JarroVGIT said, we can use connection pooling to maintain the connection from FastAPI to Redis and reduce open-closing connection costs.
Usually, I create a different file to define the connection. Let's say we have config/db.py:
import redis
def create_redis():
return redis.ConnectionPool(
host='localhost',
port=6379,
db=0,
decode_responses=True
)
pool = create_redis()
Then in the main.py
from fastapi import Depends, FastAPI
import redis
from config.db import pool
app = FastAPI()
def get_redis():
# Here, we re-use our connection pool
# not creating a new one
return redis.Redis(connection_pool=pool)
#app.get("/items/{item_id}")
def read_item(item_id: int, cache = Depends(get_redis)):
status = cache.get(item_id)
return {"item_name": status}
#app.put("/items/{item_id}")
def update_item(item_id: int, cache = Depends(get_redis)):
cache.set(item_id, "available")
return {"status": "available", "item_id": item_id}
Usually, I also split the dependencies file like the doc so we can call it from our routing module, but for simplicity, I will leave it like this.
You can check this repo to experiment by yourself. It has more comprehensive code and I have already created several scenarios that might help you understand the difference. And it will also cover how your first example may block other endpoints.
In your second example, every time your creating new redis instance and one time it reach max connection limit. If you put code like this that much more clean and re-usable,
from fastapi import FastAPI
import redis
class AppAPI(FastAPI):
def __init__(self):
self.redis_client = redis.Redis(host='localhost', port=6379, db=0, decode_responses=True)
#app.get("/foo/")
async def foo():
x = self.redis_client.get("foo")
return {"message": x}
https://github.com/redis/redis-py#connection-pools. You can define the pool at module level and import it wherever needed. All Redis connection will be created out of the pool.
pool = redis.ConnectionPool(host='localhost', port=6379, db=0)
r = redis.Redis(connection_pool=pool)
I need to add limits for my fastapi endpoints. I'm currently using Fastapi-limiter. But there is a problem. The developer no longer supports the project. As well as slowapi (request pulls hang for almost a year). I'm currently using the patched version, of course. I would like to know if there are analogues known to you or something else to solve my problem. As an example, I attach just a piece of code (usual endpoint)
I want to thank you in advance for the time devoted to my problem, thank you.
from fastapi import APIRouter, Query, HTTPException, Depends, Request
from fastapi_limiter import FastAPILimiter, default_identifier
from fastapi_limiter.depends import RateLimiter
router = APIRouter()
limits_enabled = settings.limits_enabled
async def custom_identifier(request: Request) -> None:
return None
if limits_enabled == 1:
#router.on_event("startup")
async def startup():
redis = await aioredis.from_url(
settings.redis_url,
encoding="utf-8",
decode_responses=True,
password=settings.redis_pass
)
await FastAPILimiter.init(redis)
#router.on_event("shutdown")
async def shutdown():
await FastAPILimiter.close()
limits = [Depends(RateLimiter(times=settings.limiter_times,
seconds=settings.limiter_seconds, identifier=custom_identifier))]
#router.get("/test", dependencies=limits, responses=example_vehicle_types_responses,
response_model=schemas.test,
description="test")
async def get_test():
return "test"
I have deployed a fastapi endpoint,
from fastapi import FastAPI, UploadFile
from typing import List
app = FastAPI()
#app.post('/work/test')
async def testing(files: List(UploadFile)):
for i in files:
.......
# do a lot of operations on each file
# after than I am just writing that processed data into mysql database
# cur.execute(...)
# cur.commit()
.......
# just returning "OK" to confirm data is written into mysql
return {"response" : "OK"}
I can request output from the API endpoint and its working fine for me perfectly.
Now, the biggest challenge for me to know how much time it is taking for each iteration. Because in the UI part (those who are accessing my API endpoint) I want to help them show a progress bar (TIME TAKEN) for each iteration/file being processed.
Is there any possible way for me to achieve it? If so, please help me out on how can I proceed further?
Thank you.
Approaches
Polling
The most preferred approach to track the progress of a task is polling:
After receiving a request to start a task on a backend:
Create a task object in the storage (e.g in-memory, redis and etc.). The task object must contain the following data: task ID, status (pending, completed), result, and others.
Run task in the background (coroutines, threading, multiprocessing, task queue like Celery, arq, aio-pika, dramatiq and etc.)
Response immediately the answer 202 (Accepted) by returning the previously received task ID.
Update task status:
This can be from within the task itself, if it knows about the task store and has access to it. Periodically, the task itself updates information about itself.
Or use a task monitor (Observer, producer-consumer pattern), which will monitor the status of the task and its result. And it will also update the information in the storage.
On the client side (front-end) start a polling cycle for the task status to endpoint /task/{ID}/status, which takes information from the task storage.
Streaming response
Streaming is a less convenient way of getting the status of request processing periodically. When we gradually push responses without closing the connection. It has a number of significant disadvantages, for example, if the connection is broken, you can lose information. Streaming Api is another approach than REST Api.
Websockets
You can also use websockets for real-time notifications and bidirectional communication.
Links:
Examples of polling approach for the progress bar and a more detailed description for django + celery can be found at these links:
https://www.dangtrinh.com/2013/07/django-celery-display-progress-bar-of.html
https://buildwithdjango.com/blog/post/celery-progress-bars/
I have provided simplified examples of running background tasks in FastAPI using multiprocessing here:
https://stackoverflow.com/a/63171013/13782669
Old answer:
You could run a task in the background, return its id and provide a /status endpoint that the front would periodically call. In the status response, you could return what state your task is now (for example, pending with the number of the currently processed file). I provided a few simple examples here.
Demo
Polling
Demo of the approach using asyncio tasks (single worker solution):
import asyncio
from http import HTTPStatus
from fastapi import BackgroundTasks
from typing import Dict, List
from uuid import UUID, uuid4
import uvicorn
from fastapi import FastAPI
from pydantic import BaseModel, Field
class Job(BaseModel):
uid: UUID = Field(default_factory=uuid4)
status: str = "in_progress"
progress: int = 0
result: int = None
app = FastAPI()
jobs: Dict[UUID, Job] = {} # Dict as job storage
async def long_task(queue: asyncio.Queue, param: int):
for i in range(1, param): # do work and return our progress
await asyncio.sleep(1)
await queue.put(i)
await queue.put(None)
async def start_new_task(uid: UUID, param: int) -> None:
queue = asyncio.Queue()
task = asyncio.create_task(long_task(queue, param))
while progress := await queue.get(): # monitor task progress
jobs[uid].progress = progress
jobs[uid].status = "complete"
#app.post("/new_task/{param}", status_code=HTTPStatus.ACCEPTED)
async def task_handler(background_tasks: BackgroundTasks, param: int):
new_task = Job()
jobs[new_task.uid] = new_task
background_tasks.add_task(start_new_task, new_task.uid, param)
return new_task
#app.get("/task/{uid}/status")
async def status_handler(uid: UUID):
return jobs[uid]
Adapted example for loop from question
Background processing function is defined as def and FastAPI runs it on the thread pool.
import time
from http import HTTPStatus
from fastapi import BackgroundTasks, UploadFile, File
from typing import Dict, List
from uuid import UUID, uuid4
from fastapi import FastAPI
from pydantic import BaseModel, Field
class Job(BaseModel):
uid: UUID = Field(default_factory=uuid4)
status: str = "in_progress"
processed_files: List[str] = Field(default_factory=list)
app = FastAPI()
jobs: Dict[UUID, Job] = {}
def process_files(task_id: UUID, files: List[UploadFile]):
for i in files:
time.sleep(5) # pretend long task
# ...
# do a lot of operations on each file
# then append the processed file to a list
# ...
jobs[task_id].processed_files.append(i.filename)
jobs[task_id].status = "completed"
#app.post('/work/test', status_code=HTTPStatus.ACCEPTED)
async def work(background_tasks: BackgroundTasks, files: List[UploadFile] = File(...)):
new_task = Job()
jobs[new_task.uid] = new_task
background_tasks.add_task(process_files, new_task.uid, files)
return new_task
#app.get("/work/{uid}/status")
async def status_handler(uid: UUID):
return jobs[uid]
Streaming
async def process_files_gen(files: List[UploadFile]):
for i in files:
time.sleep(5) # pretend long task
# ...
# do a lot of operations on each file
# then append the processed file to a list
# ...
yield f"{i.filename} processed\n"
yield f"OK\n"
#app.post('/work/stream/test', status_code=HTTPStatus.ACCEPTED)
async def work(files: List[UploadFile] = File(...)):
return StreamingResponse(process_files_gen(files))
Below is solution which uses uniq identifiers and globally available dictionary which holds information about the jobs:
NOTE: Code below is safe to use until you use dynamic keys values ( In sample uuid in use) and keep application within single process.
To start the app create a file main.py
Run uvicorn main:app --reload
Create job entry by accessing http://127.0.0.1:8000/
Repeat step 3 to create multiple jobs
Go to http://127.0.0.1/status page to see page statuses.
Go to http://127.0.0.1/status/{identifier} to see progression of the job by the job id.
Code of app:
from fastapi import FastAPI, UploadFile
import uuid
from typing import List
import asyncio
context = {'jobs': {}}
app = FastAPI()
async def do_work(job_key, files=None):
iter_over = files if files else range(100)
for file, file_number in enumerate(iter_over):
jobs = context['jobs']
job_info = jobs[job_key]
job_info['iteration'] = file_number
job_info['status'] = 'inprogress'
await asyncio.sleep(1)
pending_jobs[job_key]['status'] = 'done'
#app.post('/work/test')
async def testing(files: List[UploadFile]):
identifier = str(uuid.uuid4())
context[jobs][identifier] = {}
asyncio.run_coroutine_threadsafe(do_work(identifier, files), loop=asyncio.get_running_loop())
return {"identifier": identifier}
#app.get('/')
async def get_testing():
identifier = str(uuid.uuid4())
context['jobs'][identifier] = {}
asyncio.run_coroutine_threadsafe(do_work(identifier), loop=asyncio.get_running_loop())
return {"identifier": identifier}
#app.get('/status')
def status():
return {
'all': list(context['jobs'].values()),
}
#app.get('/status/{identifier}')
async def status(identifier):
return {
"status": context['jobs'].get(identifier, 'job with that identifier is undefined'),
}
I want to test my tornado python application with pytest.
for that purpose, I want to have a mock db for the mongo and to use motor "fake" client to simulate the calls to the mongodb.
I found alot of solution for pymongo but not for motor.
any idea?
I do not clearly understand your problem — why not just have hard-coded JSON data?
If you just want to have a class that would mock the following:
from motor.motor_tornado import MotorClient
client = MotorClient(MONGODB_URL)
my_db = client.my_db
result = await my_db['my_collection'].insert_one(my_json_load)
So I recommend creating a Class:
Class Collection():
database = []
async def insert_one(self,data):
database.append(data)
data['_id'] = "5063114bd386d8fadbd6b004" ## You may make it random or consequent
...
## Also, you may save the 'database' list to the pickle on disk to preserve data between runs
return data
async def find_one(self, data):
## Search in the list
return data
async def delete_one(self, data_id):
delete_one.deleted_count = 1
return
## Then create a collection:
my_db = {}
my_db['my_collecton'] = Collection()
### The following is the part of 'views.py'
from tornado.web import RequestHandler, authenticated
from tornado.escape import xhtml_escape
class UserHandler(RequestHandler):
async def post(self, name):
getusername = xhtml_escape(self.get_argument("user_variable"))
my_json_load = {'username':getusername}
result = await my_db['my_collection'].insert_one(my_json_load)
...
return self.write(result)
If you would clarify your question, I will develop my answer.