How to test httpError in fastAPI with pytest? - python

I'm trying make some api server with FastAPI.
I have one endpoint named /hello on my project, which gives:
{msg : "Hello World"}
with JSON format when 200 status.
However, It gives error msg when request fails.
Quite simple service. However, I want to test both cases, just for my study. So I also made test code with pytest.
Now I want to know: how can I raise HTTPException and test it on purpose?
#main.py (FAST API)
#app.get('/hello')
def read_main():
try:
return {"msg":"Hello World"}
except requests.exceptions.HTTPError as e:
raise HTTPException(status_code=400,detail='error occured')
#test.py
from fastapi.testclient import TestClient
client = TestClient(app)
# This test works
def test_read_main():
response = client.get("/hello")
assert response.json() == {"msg":"Hello World"}
assert response.status_code == 200
def test_errors():
# How can I test except in endpoint "/hello" ?
# The code below never works as I expect
# with pytest.raises(HTTPException) as e:
# raise client.get("/hello").raise_for_status()
# print(e.value)

The problem here is that your logic is way to simplistic to test. As luk2302 said; in the current form, your except block is never called and thus can never be tested. Replacing your logic with something more testable, allows us to force an Exception being thrown.
File: app.py
from fastapi import FastAPI
from fastapi.exceptions import HTTPException
import requests
app = FastAPI()
#We've put this in a seperate function so we can mock this.
def get_value():
return {"msg":"Hello World"}
#app.get('/hello')
def read_main():
try:
return get_value()
except requests.exceptions.HTTPError as e:
raise HTTPException(status_code=400,detail='error occured')
Note that the return value of your endpoint is now actually provided by the get_value() function.
The test.py file would look like this:
from fastapi import HTTPException
import app
from fastapi.testclient import TestClient
import requests
from pytest_mock import MockerFixture
client = TestClient(app.app)
def test_read_main():
response = client.get("/hello")
assert response.json() == {"msg":"Hello World"}
assert response.status_code == 200
def get_value_raise():
raise requests.exceptions.HTTPError()
def test_errors(mocker: MockerFixture):
mocker.patch("app.get_value", get_value_raise)
response = client.get("/hello")
assert response.status_code == 400
assert response.json() == {"detail": "error occured"}
Note that we replace the app.get_value function with a function that will definitely raise the type of exception that you are catching in your application logic. The response of the test client is (however) just an HTTP response, but with statuscode 400 and a detail in the json body. We assert for that.
The result:
(.venv) jarro#MBP-van-Jarro test_http_exception % pytest test.py
=================================================== test session starts ===================================================
platform darwin -- Python 3.10.4, pytest-7.1.2, pluggy-1.0.0
rootdir: /Users/jarro/Development/fastapi-github-issues/SO/test_http_exception
plugins: anyio-3.6.1, mock-3.8.2
collected 2 items
test.py .. [100%]
==================================================== 2 passed in 0.17s ====================================================
I used pytest, and by extension I used pytest-mocker to mock the get_value function.

Related

How can I test for Exception cases in FastAPI with Pytest?

I am struggling to write test cases that will trigger an Exception within one of my FastAPI routes. I was thinking pytest.Raises would do what I intend, however that by itself doesn't seem to be doing what I thought it would.
Since the TestClient runs the API client pretty much separately, it makes sense that I would have this issue - that being said, I am not sure what the best practice is to ensure a high code coverage in testing.
Here is my test function:
def test_function_exception():
with pytest.raises(Exception):
response = client.post("/")
assert response.status_code == 400
and here is the barebones route that I am hitting:
#router.post("/")
def my_function():
try:
do_something()
except Exception as e:
raise HTTPException(400, "failed to do something")
Is there anyway that I can catch this Exception without making changes to the API route? If changes are needed, what are the changes required to ensure thorough testing?
Following the discussion below the question, I assembled a working example for you. Typically, if you can't logically hit your except block, you can ensure that the try block is raising an Exception by monkey patching the function that is tried, and replace it with something that definitely will raise an exception. In the below example, I will change the function do_something() that is defined in app.py with replace_do_something() that will just raise an Exception when called.
You can put the following files in the same folder (not a module) and try it for yourself:
File app.py:
from fastapi import FastAPI, HTTPException
from fastapi.testclient import TestClient
import pytest
app = FastAPI()
def do_something():
return "world"
#app.get("/myroute")
async def myroute():
try:
text = do_something()
return {"hello": text}
except Exception:
raise HTTPException(400, "something went wrong")
File test_app.py:
import pytest
from fastapi.testclient import TestClient
from app import app
client = TestClient(app)
def replace_do_something():
raise Exception()
return
def test_read_main(monkeypatch: pytest.MonkeyPatch):
response = client.get("/myroute")
assert response.status_code == 200
assert response.json() == {"hello": "world"}
def test_read_main_with_error(monkeypatch: pytest.MonkeyPatch):
monkeypatch.setattr("app.do_something", replace_do_something)
# Here we replace any reference to do_something
# with replace_do_something. Note the 'app.' prefix!
response = client.get("/myroute")
assert response.status_code == 400
assert response.json() == {"detail": "something went wrong"}
You can call the test_app.py file with pytest (I also have pytest-cov installed to demonstrate the 100% coverage):
(venv) jarro#MacBook-Pro-van-Jarro fastapi-github-issues % pytest --cov=app SO/pytestwithmock/test_app.py
===================================================== test session starts =====================================================
platform darwin -- Python 3.10.5, pytest-7.1.2, pluggy-1.0.0
rootdir: /Users/jarro/Development/fastapi-github-issues
plugins: anyio-3.6.1, cov-3.0.0
collected 2 items
SO/pytestwithmock/test_app.py .. [100%]
---------- coverage: platform darwin, python 3.10.5-final-0 ----------
Name Stmts Miss Cover
----------------------------------------------
SO/pytestwithmock/app.py 13 0 100%
----------------------------------------------
TOTAL 13 0 100%
====================================================== 2 passed in 0.19s ======================================================

Unit Testing Replace remote API Server with predefined response

So, I have a server running FastAPI which will make a API call to a remote API upon request.
I am developping unit-testing for this application, but here comes the question:
Can I, for the purpose of the test, replace a legit remote API server response by a predefined response ?
Example of the tests runned:
from fastapi.testclient import TestClient
from web_api import app
client = TestClient(app)
def test_get_root():
response = client.get('/')
assert response.status_code == 200
assert response.json() == {"running": True}
And the my server
from fastapi import FastAPI
app = FastAPI()
#app.get("/")
def home():
return {"running": True}
This is a simple example, but on other endpoints of my API I would call an external remote API
def call_api(self, endpoint:str, params:dict):
url = self.BASEURL + urllib.parse.quote(endpoint)
try:
response = requests.get(url, params=params)
response.raise_for_status()
except requests.exceptions.HTTPError as error:
print(error)
return response
Because I want to test the response of MY API, I would like to replace the remote API with a predefined response.
Also, one user request can end-up in multiple background API requests with transformed pieces of data.
Edit
Here are some more details on the structure of the application:
#app.get("/stuff/.......",
# lots of params
)
def get_stuff_from_things(stuff:list, params):
api = API(api_key=...)
# Do some stuff with the params
things = generate_things_list(params)
api.search_things(params)
# Check the result
# do some other stuff
return some_response
class API:
BASE_URL = 'https://api.example.com/'
def search_things(self, params):
# Do some stuff
# like putting stuff in the params
for s in stuff:
s.update(self.get_thing(params)) # -> get_thing()
# Do some more stuff
return stuff
# get_thing <- search_things
def get_thing(self, params...):
# Some stuff
results = self.call_api('something', params) # -> call_api()
json = results.json()
# Some more stuff
things = []
for thing in json['things']:
t = Thing(thing)
things.append(t)
return things
# call_api <- get_thing
def call_api(self, endpoint:str, params:dict):
url = self.BASEURL + urllib.parse.quote(endpoint)
try:
response = requests.get(url, params=params)
response.raise_for_status()
except requests.exceptions.HTTPError as error:
print(error)
self.last_response = response
return response
Nb. That is pseudo-code, I simplified the functions by removing the parameters, etc.
I hope it is clear, thanks for your help.
A complex API method might look like this (please pay attention to the depends mechanism - it is crucial):
import urllib
import requests
from fastapi import FastAPI, Depends
app = FastAPI()
# this can be in a different file
class RemoteCallWrapper:
def call_api(self, baseurl: str, endpoint: str, params: dict):
url = baseurl + urllib.parse.quote(endpoint)
try:
response = requests.get(url, params=params)
response.raise_for_status()
except requests.exceptions.HTTPError as error:
print(error)
return response
#app.get("/complex_api")
def calls_other_api(remote_call_wrapper=Depends(RemoteCallWrapper)):
response = remote_call_wrapper.call_api("https://jsonplaceholder.typicode.com",
"/todos/1", None)
return {"result": response.json()}
Now, we wish to replace the remote call class. I wrote a helper library that simplifies the replacement for tests - pytest-fastapi-deps:
from fastapi.testclient import TestClient
from mock.mock import Mock
from requests import Response
from web_api import app, RemoteCallWrapper
client = TestClient(app)
class MyRemoteCallWrapper:
def call_api(self, baseurl: str, endpoint: str, params: dict):
the_response = Mock(spec=Response)
the_response.json.return_value = {"my": "response"}
return the_response
def test_get_root(fastapi_dep):
with fastapi_dep(app).override({RemoteCallWrapper: MyRemoteCallWrapper}):
response = client.get('/complex_api')
assert response.status_code == 200
assert response.json() == {"result": {"my": "response"}}
You override the RemoteCallWrapper with your MyRemoteCallWrapper implementation for the test, which has the same spec.
As asserted - the response changed to our predefined response.
It sounds like you'd want to mock your call_api() function.
With a small modification to call_api() (returning the result of .json()), you can easily mock the whole function while calling the endpoint in your tests.
I'll use two files, app.py and test_app.py, to demonstrate how I would do this:
# app.py
import requests
import urllib
from fastapi import FastAPI
app = FastAPI()
def call_api(self, endpoint: str, params: dict):
url = self.BASEURL + urllib.parse.quote(endpoint)
try:
response = requests.get(url, params=params)
response.raise_for_status()
except requests.exceptions.HTTPError as error:
print(error)
return response.json() # <-- This is the only change. Makes it easier to test things.
#app.get("/")
def home():
return {"running": True}
#app.get("/call-api")
def make_call_to_external_api():
# `endpoint` and `params` could be anything here and could be different
# depending on the query parameters when calling this endpoint.
response = call_api(endpoint="something", params={})
# Do something with the response...
result = response["some_parameter"]
return result
# test_app.py
from unittest import mock
from fastapi import status
from fastapi.testclient import TestClient
import app as app_module
from app import app
def test_call_api_endpoint():
test_response = {
"some_parameter": "some_value",
"another_parameter": "another_value",
}
# The line below will "replace" the result of `call_api()` with whatever
# is given in `return_value`. The original function is never executed.
with mock.patch.object(app_module, "call_api", return_value=test_response) as mock_call:
with TestClient(app) as client:
res = client.get("/call-api")
assert res.status_code == status.HTTP_200_OK
assert res.json() == "some_value"
# Make sure the function has been called with the right parameters.
# This could be dynamic based on how the endpoint has been called.
mock_call.assert_called_once_with(endpoint="something", params={})
If app.py and test_app.py are in the same directory you can run the tests simply by running pytest inside that directory.

Python - mock function and assert exception

I built an API with FastAPI that interacts with DynamoDB.
In the beginning of my journey in Test Driven Development, I have doubts about what to mock.
This is the get method, main.py:
router = FastAPI()
#router.get("/{device_id}")
def get_data(request: Request, device_id: str, query: DataQuery = Depends(DataQuery.depends)):
da_service = DaService()
try:
start_time, end_time = DaService.validate_dates(query.start, query.end)
return 'OK'
except WrongDataFormat as e:
raise HTTPException(status_code=400, detail=str(e))
except Exception as e:
raise HTTPException(status_code=500, detail='Internal Server Error')
In the test file I started by creating the success test, test_main.py:
from fastapi.testclient import TestClient
from unittest import mock
from utils.exceptions import WrongDataFormat
from endpoints.datalake import router
client = TestClient(router)
def test_success_response():
with mock.patch('endpoints.datalake.DataApiService.get_datalake_data'):
response = client.get('/xxxxx', params = {'start': '1629886483', 'end': '1629886504'})
assert response.status_code == 200
assert isinstance(response.json(), dict)
Now I want to create the test for when the exception WrongDataFormat is returned, but I'm not succeeding... This is what I have right now:
def test_exception_response_():
response = client.get('/xxxxx', params = {'2021-08-28', 'end': '2021-12-25'})
assert response.status_code == 400
How can I mock the function main.validate_dates to return the exception WrongDataFormat and assert it correctly?
If you want to test the status code and message of a response you have to use TestClient(app) where app is the FastAPI application. Converting the exception into the appropriate response is the task of the application, not the router (which is what you're testing with).
client = TestClient(app)
This way you can test the API of your application (which is the most useful surface to test, imho).

How to assert expected HTTPExceptions in FastAPI Pytest?

I have a simple router designed to throw an HTTPException:
#router.get('/404test')
async def test():
raise HTTPException(HTTP_404_NOT_FOUND, "404 test!")
I want to assert that the exception was thrown, as per FastaAPI docs:
def test_test():
response = client.get("/404test")
assert response.status_code == 404
The exception is thrown before the assertion gets evaluated, marking test as failed:
> raise HTTPException(HTTP_404_NOT_FOUND, "404 test!")
E fastapi.exceptions.HTTPException: (404, '404 test!')
What am I missing to properly anticipate HTTPExceptions in my test?
Assuming we have the following route set up in our fastapi app:
#router.get('/404test')
async def test():
raise HTTPException(HTTP_404_NOT_FOUND, "404 test!")
I was able to get a pytest to work with the following code snippet:
from fastapi import HTTPException
def test_test():
with pytest.raises(HTTPException) as err:
client.get("/404test")
assert err.value.status_code == 404
assert err.value.detail == "404 test!"
It seems that the err is the actual HTTPException object, not the json representation. When you catch this error you can then make assertions on that HTTPException object.
Make sure you run the assertions (assert) outside of the with statement block because when the error is raised, it stops all execution within the block after the http call so your test will pass but the assertions will never evaluate.
You can reference the details and the status code and any other attributes on the Exception with err.value.XXX.
May be you can do this using the following sample code.
~/Desktop/fastapi_sample $ cat service.py
from fastapi import FastAPI, HTTPException
app = FastAPI()
#app.get("/wrong")
async def wrong_url():
raise HTTPException(status_code=400, detail="404 test!")
~/Desktop/fastapi_sample $ cat test_service.py
from fastapi.testclient import TestClient
from fastapi_sample.service import app
client = TestClient(app)
def test_read_item_bad_token():
response = client.get("/wrong")
assert response.status_code == 400
assert response.json() == {"detail": "404 test!"}%
~/Desktop/fastapi_sample $ pytest
==================================================================== test session starts ====================================
platform darwin -- Python 3.7.9, pytest-6.1.0, py-1.9.0, pluggy-0.13.1
rootdir: /Users/i869007/Desktop/workspace/SAP/cxai/fastapi_postgres_tutorial
collected 1 item
test_service.py . [100%]
===================================================================== 1 passed in 0.78s ======================================

How to test that a model was used in a FastAPI route?

I'm trying to check if a specific model was used as an input parser for a FastAPI route. However, I'm not sure how to patch (or spy on) it.
I have the following file structure:
.
└── roo
├── __init__.py
├── main.py
└── test_demo.py
main.py:
from fastapi import FastAPI
from pydantic import BaseModel
app = FastAPI()
class ItemModel(BaseModel):
name: str
#app.post("/")
async def read_main(item: ItemModel):
return {"msg": f"Item: {item.name}"}
test_demo.py:
from fastapi.testclient import TestClient
from unittest.mock import patch
from roo.main import app, ItemModel
client = TestClient(app)
def test_can_creating_new_item_users_proper_validation_model():
with patch('roo.main.ItemModel', wraps=ItemModel) as patched_model:
response = client.post("/", json={'name': 'good'})
assert response.status_code == 200
assert response.json() == {"msg": "Item: good"}
assert patched_model.called
However, patched_model is never called (other asserts pass). I don't want to change the functionality or replace ItemModel in main.py, I just want to check if it was used.
My first approach to this was to wrap the read_main method and check that the item passed into the function is indeed an instance of ItemModel. But that was a dead-end approach because of the way FastAPI endpoints are prepared and stored: FastAPI stores a copy of the endpoint function objects in a list: (see fastapi/routing.py), then evaluates at request-time which endpoint to call.
from roo.main import app
def test_read_main():
assert 'read_main' in [r.endpoint.__name__ for r in app.routes]
# check that read_main was called *and* received an ItemModel instance?
My second approach involves spying or "breaking" the initialization of ItemModel, such that if the endpoint does indeed use that model, then a "broken" ItemModel would cause a request that hits that endpoint to fail. We "break" ItemModel by making use of the fact that (1) FastAPI calls the __init__ of your model during the request-response cycle, and (2) a 422 error response is propagated by default when the endpoint is unable to serialize a model properly:
class ItemModel(BaseModel):
name: str
def __init__(__pydantic_self__, **data: Any) -> None:
print("Make a POST request and confirm that this is printed out")
super().__init__(**data)
So in tests, just mock the __init__ method:
Example for pytest
import pytest
from fastapi.testclient import TestClient
from roo.main import app, ItemModel
def test_read_main(monkeypatch: pytest.MonkeyPatch):
client = TestClient(app)
def broken_init(self, **data):
pass # `name` and other fields won't be set
monkeypatch.setattr(ItemModel, '__init__', broken_init)
with pytest.raises(AttributeError) as exc:
client.post("/", json={'name': 'good'})
assert 422 == response.status_code
assert "'ItemModel' object has no attribute" in str(exc.value)
Example for pytest + pytest-mock's mocker.spy
from fastapi.testclient import TestClient
from pytest_mock import MockerFixture
from roo.main import app, ItemModel
def test_read_main(mocker: MockerFixture):
client = TestClient(app)
spy = mocker.spy(ItemModel, '__init__')
client.post("/", json={'name': 'good'})
spy.assert_called()
spy.assert_called_with(**{'name': 'good'})
Example for unittest
from fastapi.testclient import TestClient
from roo.main import app, ItemModel
from unittest.mock import patch
def test_read_main():
client = TestClient(app)
# Wrapping __init__ like this isn't really correct, but serves the purpose
with patch.object(ItemModel, '__init__', wraps=ItemModel.__init__) as mocked_init:
response = client.post("/", json={'name': 'good'})
assert 422 == response.status_code
mocked_init.assert_called()
mocked_init.assert_called_with(**{'name': 'good'})
Again, the tests check that the endpoint fails in either serializing into an ItemModel or in accessing item.name, which will only happen if the endpoint is indeed using ItemModel.
If you modify the endpoint from item: ItemModel into item: OtherModel:
class OtherModel(BaseModel):
name: str
class ItemModel(BaseModel):
name: str
#app.post("/")
async def read_main(item: OtherModel): # <----
return {"msg": f"Item: {item.name}"}
then running the tests should now fail because the endpoint is now creating the wrong object:
def test_read_main(mocker: MockerFixture):
client = TestClient(app)
spy = mocker.spy(ItemModel, '__init__')
client.post("/", json={'name': 'good'})
> spy.assert_called()
E AssertionError: Expected '__init__' to have been called.
test_demo_spy.py:11: AssertionError
with pytest.raises(AttributeError) as exc:
response = client.post("/", json={'name': 'good'})
> assert 422 == response.status_code
E assert 422 == 200
E +422
E -200
test_demo_pytest.py:15: AssertionError
The assertion errors for 422 == 200 is a bit confusing, but it basically means that even though we "broke" ItemModel, we still got a 200/OK response.. which means ItemModel is not being used.
Likewise, if you modified the tests first and mocked-out the __init__ of OtherModel instead of ItemModel, then running the tests without modifying the endpoint will result in similar failing tests:
def test_read_main(mocker: MockerFixture):
client = TestClient(app)
spy = mocker.spy(OtherModel, '__init__')
client.post("/", json={'name': 'good'})
> spy.assert_called()
E AssertionError: Expected '__init__' to have been called.
def test_read_main():
client = TestClient(app)
with patch.object(OtherModel, '__init__', wraps=OtherModel.__init__) as mocked_init:
response = client.post("/", json={'name': 'good'})
# assert 422 == response.status_code
> mocked_init.assert_called()
E AssertionError: Expected '__init__' to have been called.
The assertion here is less confusing because it says we expected that the endpoint will call OtherModel's __init__, but it wasn't called. It should pass after modifying the endpoint to use item: OtherModel.
One last thing to note is that since we are manipulating the __init__, then it can cause the "happy path" to fail, so it should now be tested separately. Make sure to undo/revert the mocks and patches:
Example for pytest
def test_read_main(monkeypatch: pytest.MonkeyPatch):
client = TestClient(app)
def broken_init(self, **data):
pass
# Are we really using ItemModel?
monkeypatch.setattr(ItemModel, '__init__', broken_init)
with pytest.raises(AttributeError) as exc:
response = client.post("/", json={'name': 'good'})
assert 422 == response.status_code
assert "'ItemModel' object has no attribute" in str(exc.value)
# Okay, really using ItemModel. Does it work correctly?
monkeypatch.undo()
response = client.post("/", json={'name': 'good'})
assert response.status_code == 200
assert response.json() == {"msg": "Item: good"}
Example for pytest + pytest-mock's mocker.spy
from pytest_mock import MockerFixture
from fastapi.testclient import TestClient
from roo.main import app, ItemModel
def test_read_main(mocker: MockerFixture):
client = TestClient(app)
# Are we really using ItemModel?
spy = mocker.spy(ItemModel, '__init__')
client.post("/", json={'name': 'good'})
spy.assert_called()
spy.assert_called_with(**{'name': 'good'})
# Okay, really using ItemModel. Does it work correctly?
mocker.stopall()
response = client.post("/", json={'name': 'good'})
assert response.status_code == 200
assert response.json() == {"msg": "Item: good"}
Example for unittest
def test_read_main():
client = TestClient(app)
# Are we really using ItemModel?
with patch.object(ItemModel, '__init__', wraps=ItemModel.__init__) as mocked_init:
response = client.post("/", json={'name': 'good'})
assert 422 == response.status_code
mocked_init.assert_called()
mocked_init.assert_called_with(**{'name': 'good'})
# Okay, really using ItemModel. Does it work correctly?
response = client.post("/", json={'name': 'good'})
assert response.status_code == 200
assert response.json() == {"msg": "Item: good"}
All in all, you might want to consider if/why it's useful to check for which model is exactly used. Normally, I just check that passing-in valid request params returns the expected valid response, and likewise, that invalid requests returns an error response.

Categories

Resources