How to mocks logging pytest in a fastapi call - python

i am working in a project with fastAPI
As the title says, i have an endpoint which calls a logging event when a HTTPException occurs, and when the request is called and finished
Something like this:
#router.get(
"/chat/{chat_id}/messages/",
status_code=status.HTTP_200_OK,
)
async def get_messages(chat_message: GetMessageValidator = Depends(), request: Request = None):
logging.info(request.url.path+" request started") ##LOGGING
if chat_message.chat_id_validator(chat_message.chat_id):
logging.error(request.url.path+settings.GET_MESSAGES_CHAT_ID_ERROR) ##LOGGING
raise HTTPException(
status_code=404, detail=settings.GET_MESSAGES_CHAT_ID_ERROR
)
logging.info(request.url.path+" request OK") ##LOGGING
return chat_message
And i have build a test with pytest which call that endpoint, something like this:
#dataclass
class ChatMessage:
from_user: str
to_user: str
chat_id: str
body: str
#pytest.mark.asyncio()
async def test_pagination_get_messages(client: AsyncSession,
user_token_header):
conversation = [
ChatMessage(frank_id, pepe_id, chat_record.chat_id, 'Hello Pepe!')
]
page_1 = await client.get( ##ENDPOINT CALL
f"/api/v1/chat/messages/",
json={
"chat_id": str(chat_record.chat_id),
"quantity": 3
},
headers=user_token_header
)
assert page_1.status_code == 200
The pytest response is okay, but i don't want that the logging event works when i call the endpoint from a pytest, and i don't have an idea to avoid the logging event call when the pytest of that endpoint is running..
Can you give an idea or a solution of how to mock logging events in the endpoint when is called from the pytest?
Thanks!

Looking at the source code, logging.info() is a wrapper around logging.root.info(), which in turn calls logging.root._log(), as do the other logging functions. Therefore, you should be able to decorate your test with unittest.mock.patch:
from unittest.mock import patch
#pytest.mark.asyncio()
#patch("logging.root._log")
async def test_pagination_get_messages(...):
...
Another option would be to remove all logging handlers in a test setup function.
But before doing any of this, why though? Pytest catches printouts (unless you explicitly enable it), and logging to stdout/stderr isn't exactly resource heavy.

Related

How to mock client object

I am working on writing unittest for my fastapi project.
One endpoint includes getting a serviceNow ticket. Here is the code i want to test:
from aiosnow.models.table.declared import IncidentModel as Incident
from fastapi import APIRouter
router = APIRouter()
#router.post("/get_ticket")
async def snow_get_ticket(req: DialogflowRequest):
"""Retrieves the status of the ticket in the parameter."""
client = create_snow_client(
SNOW_TEST_CONFIG.servicenow_url, SNOW_TEST_CONFIG.user, SNOW_TEST_CONFIG.pwd
)
params: dict = req.sessionInfo["parameters"]
ticket_num = params["ticket_num"]
try:
async with Incident(client, table_name="incident") as incident:
response = await incident.get_one(Incident.number == ticket_num)
stage_value = response.data["state"].value
desc = response.data["description"]
[...data manipulation, unimportant parts]
What i am having trouble with is trying to mock the client response, every time the actual client gets invoked and it makes the API call which i dont want.
Here is the current version of my unittest:
from fastapi.testclient import TestClient
client = TestClient(app)
#patch("aiosnow.models.table.declared.IncidentModel")
def test_get_ticket_endpoint_valid_ticket_num(self, mock_client):
mock_client.return_value = {"data" : {"state": "new",
"description": "test"}}
response = client.post(
"/snow/get_ticket", json=json.load(self.test_request)
)
assert response.status_code == 200
I think my problem is patching the wrong object, but i am not sure what else to patch.
In your test your calling client.post(...) if you don't want this to go to the Service Now API this client should be mocked.
Edit 1:
Okay so the way your test is setup now the self arg is the mocked IncidentModel object. So only this object will be a mock. Since you are creating a brand new IncidentModel object in your post method it is a real IncidentModel object, hence why its actually calling the api.
In order to mock the IncidentModel.get_one method so that it will return your mock value any time an object calls it you want to do something like this:
def test_get_ticket_endpoint_valid_ticket_num(mock_client):
mock_client.return_value = {"data" : {"state": "new",
"description": "test"}}
with patch.object(aiosnow.models.table.declared.IncidentModel, "get_one", return_value=mock_client):
response = client.post(
"/snow/get_ticket", json=json.load(self.test_request)
)
assert response.status_code == 200
The way variable assignment works in python, changing aiosnow.models.table.declared.IncidentModel will not change the IncidentModel that you've imported into your python file. You have to do the mocking where you use the object.
So instead of #patch("aiosnow.models.table.declared.IncidentModel"), you want to do #patch("your_python_file.IncidentModel")

How to define multiple API endpoints in FastAPI with different paths but the same path parameter?

I'm working on a project which uses FastAPI. My router file looks like the following:
# GET API Endpoint 1
#router.get("/project/{project_id}/{employee_id}")
async def method_one(
project_id: str, organization_id: str, session: AsyncSession = Depends(get_db)
):
try:
return await CustomController.method_one(
session, project_id, employee_id
)
except Exception as e:
return custom_exception_handler(e)
# GET API Endpoint 2
#router.get("/project/details/{project_id}")
async def method_two(
project_id: str, session: AsyncSession = Depends(get_db)
):
try:
return await CustomController.method_two(
session=session, project_id=project_id
)
except Exception as e:
return custom_exception_handler(e)
# GET API Endpoint 3
#router.get("/project/metadata/{project_id}")
async def method_three(
project_id: str, session: AsyncSession = Depends(get_db)
):
try:
return await CustomController.method_three(
session=session, project_id=project_id
)
except Exception as e:
return custom_exception_handler(e)
The obvious expectation of workflow here is: when each of these API endpoints are triggered with their required path parameters, the controller method is executed, as defined in their body.
However, for some strange reason, when API endpoints 2 and 3 are triggered, they are executing the controller method in endpoint 1, i.e., CustomController.method_one().
Upon adding some print() statements in the method method_one() of the router, I've observed that method_one() is being called when API endpoint 2 is called, while it is actually supposed to call method_two() in the router. Same is the case with API endpoint 3.
I'm unable to understand why the method body of method_one() is getting executed, when API endpoints 2 and 3 are triggered. Am I missing out something on configuration, or something - can someone please correct me? Thanks!
In FastAPI, as described in this answer, because endpoints are evaluated in order (see FastAPI's about how order matters), it makes sure that the endpoint you defined first in your app—in this case, that is, /project/{project_id}/...—will be evaluated first. Hence, every time you call one of the other two endpoints, i.e., /project/details/... and /project/metadata/..., the first endpoint is triggered, using details or metadata as the project_id parameter.
Solution
Thus, you need to make sure that the other two endpoints are declared before the one for /project/{project_id}/.... For example:
# GET API Endpoint 1
#router.get("/project/details/{project_id}")
# ...
# GET API Endpoint 2
#router.get("/project/metadata/{project_id}")
# ...
# GET API Endpoint 3
#router.get("/project/{project_id}/{employee_id}")
# ...

FastApi : traceback.exc_format return none when using add_exception_handler

I develop with FastApi, and want to contain traceback info in response when error occur;
To do so, I define exception handlers in exception_handler.py :
from fastapi.responses import JSONResponse
from fastapi import status
from fastapi import FastAPI, Request
from traceback import format_exc, print_exc
def general_exception_handler(req: Request, exc: Exception):
'''
Exception handler for unspecified exceptions
'''
tracback_msg = format_exc()
return JSONResponse(
{
"code": status.HTTP_500_INTERNAL_SERVER_ERROR,
"message": f"error info: {tracback_msg}",
# "message": f"error info: {str(exc)}",
"data": "",
},
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
)
And attach those handler to fastappi app instance in server.py:
server.py is where I create app instance and attach extra function to it like middlewares or exception handlers.
from core import router # api routers are defined in router.py
from fastapi import FastAPI
from core.exception_handler import general_exception_handler
app = FastAPI(
debug=False,
docs_url=None,
redoc_url=None
)
# attach exception handler to app instance
app.add_exception_handler(Exception, general_exception_handler)
# include routers to app intance
app.include_router(router.router)
The problem is, when exception was raised, traceback message return by format_exc() is None;But when I used str(exc) like the annotated code, I got the exception info properly but of course without traceback info.
It will not work because the exception handler receives the exception as parameter instead of catching the exception itself, meaning that there is no stacktrace in this case.
If you want to have the stacktrace, you should create a middleware or a custom API router that will actually capture the exception and return the message the way you want. I usually prefer to use a custom API Route instead of using middleware because it is more explicit and gives you more flexibility.
You can write something like this
class MyRoute(APIRoute):
def get_route_handler(self) -> Callable:
original_route_handler = super().get_route_handler()
async def custom_route_handler(request: Request) -> Response:
try:
return await original_route_handler(request)
except Exception as exc:
tracback_msg = format_exc()
# your return goes here
return custom_route_handler
Then you override the default route handler from fastapi
app = FastAPI()
app.router.route_class = MyRoute
It should give you want you want
There's always format_exception, which takes an explicit exception argument, rather than grabbing the current one from sys.exc_info().

Unable to monkeypatch an rpc server class method

I need to monkeypatch a class method that is decorated with the #method annotation of the jsonrpcserver library. The class is implementing a rpc server that is started as an asyncio server and is launched using a pytest fixture like this
# conftest.py
#pytest.fixture(autouse=True, scope="module")
#pytest.mark.asyncio
async def rpc_server(...):
rpc_server = RpcServer(
addr="127.0.0.1",
port=9500,
...
)
task = asyncio.create_task(rpc_server.start())
yield rpc_server
task.cancel()
The test should monkeypatch one of the method of the RpcServer class
# test_rpc.py
#pytest.mark.asyncio
async def test_rpc_server_exception(
rpc_server: RpcServer,
...
monkeypatch: MonkeyPatch,
):
async def raise_runtime_error():
raise RuntimeError()
monkeypatch.setattr(
RpcServer, "method_to_be_patched", raise_runtime_error, raising=True
)
... # making an rpc request to launch method_to_be_patched
assert ...
method_to_be_patched is invoked by async_dispatch of the jsonrpcserver library once a new request is received and looks like this
# rpc_server.py
#method
async def method_to_be_patched(self, ...) -> str:
...
return ...
The problem is that monkeypatch is not patching anything and the test pass without raising any exception (like I need to). I've tried to monkeypatch RpcServer and the instance yield from the pytest fixture without any success yet by debugging it seems that the class method correctly points to the dummy function but still the original one is invoked.
EDIT: the issue arises because of python imports work. As far as I understood when importing like from ... import ... I'm creating a new reference so basically I'm patching the reference created from test_rpc.py and not the one in rpc_server.py (correct me if I'm wrong).
So I tried
# test_rpc.py
#pytest.mark.asyncio
async def test_rpc_server_exception(
rpc_server: RpcServer,
...
monkeypatch: MonkeyPatch,
):
async def raise_runtime_error():
raise RuntimeError()
import network # the package containing rpc_server.py
monkeypatch.setattr(
network.rpc_server.RpcServer, "method_to_be_patched", raise_runtime_error, raising=True
)
... # making an rpc request to launch method_to_be_patched
assert ...
but still not getting the intended behaviour.
The project tree is like this
/src
|rpc_server.py
/test
|conftest.py
/e2e
|test_rpc.py
The solution is to monkeypatch where the method is invoked so since I'm using jsonrpcserver here I had to monkeypatch the call method defined inside the async_dispatch module and now it is working as I expected

Unit test Flask view mocking out celery tasks

So, I have a flask view, which adds a celery task to a queue, and returns a 200 to the user.
from flask.views import MethodView
from app.tasks import launch_task
class ExampleView(MethodView):
def post(self):
# Does some verification of the incoming request, if all good:
launch_task(task, arguments)
return 'Accepted', 200
The issue is with testing the following, I don't want to have to have a celery instance etc. etc. I just want to know that after all the verification is ok, it returns 200 to the user. The celery launch_task() will be tested elsewhere.
Therefore I'm keen to mock out that launch_task() call so essentially it does nothing, making my unittest independent of the celery instance.
I've tried various incarnations of:
#mock.patch('app.views.launch_task.delay'):
def test_launch_view(self, mock_launch_task):
mock_launch_task.return_value = None
# post a correct dictionary to the view
correct_data = {'correct': 'params'}
rs.self.app.post('/launch/', data=correct_data)
self.assertEqual(rs.status_code, 200)
#mock.patch('app.views.launch_task'):
def test_launch_view(self, mock_launch_task):
mock_launch_task.return_value = None
# post a correct dictionary to the view
correct_data = {'correct': 'params'}
rs.self.app.post('/launch/', data=correct_data)
self.assertEqual(rs.status_code, 200)
But can't seem to get it to work, my view just exits with a 500 error. Any assistance would be appreciated!
I tried also any #patch decorator and it didn't work
And I found mock in setUp like:
import unittest
from mock import patch
from mock import MagicMock
class TestLaunchTask(unittest.TestCase):
def setUp(self):
self.patcher_1 = patch('app.views.launch_task')
mock_1 = self.patcher_1.start()
launch_task = MagicMock()
launch_task.as_string = MagicMock(return_value = 'test')
mock_1.return_value = launch_task
def tearDown(self):
self.patcher_1.stop()
The #task decorator replaces the function with a Task object (see documentation). If you mock the task itself you'll replace the (somewhat magic) Task object with a MagicMock and it won't schedule the task at all. Instead mock the Task object's run() method, like so:
# With CELERY_ALWAYS_EAGER=True
#patch('monitor.tasks.monitor_user.run')
def test_monitor_all(self, monitor_user):
"""
Test monitor.all task
"""
user = ApiUserFactory()
tasks.monitor_all.delay()
monitor_user.assert_called_once_with(user.key)

Categories

Resources