How to setup a Bearer Token Authentication in AsyncOAuth2Client with authlib - python

I'm calling an API that has a bearer token authentication.
With regular requests package I have successfully implemented it, but then I had to upgrade the class to run concurrent requests.
I found authlib.integrations.httpx_client.AsyncOAuth2Client to have the OAuth2 piece, and then authlib.oauth2.rfc6750.BearerTokenValidator to have the Bearer token piece.
But then I'm not able to run it correctly.
In the meantime I did try aiohttp, but moved to httpx since it seemed better with the OAuth2 authentication.
Still, my first time meeting asyncio, httpx and friends, so all suggestions are welcome.
The successful part with requests comes first:
class BearerAuth(requests.auth.AuthBase):
def __init__(self, token):
self.token = token
def __call__(self, a_request):
a_request.headers['Authorization'] = f'Bearer {self.token}'
return a_request
class MySession(Session):
def __init__(self):
super().__init__()
self.set_token()
def set_token():
auth_enc = encode64('{username}:{password}'.format(**access_dict))
the_headers = {'Authorization': f'Basic {auth_enc}'}
auth_resp = self.post(AUTH_URL, headers=the_headers)
self.token = the_resp.json()
def call_api(self):
for _ in range(tries):
a_resp = self.get(API_URL, auth=BearerAuth(self.token['access_token']))
if a_resp.status_code == 401:
self.set_token()
continue
elif a_resp.status_code == 200:
return a_resp
else:
return None
The unsuccessful part with AsyncOauth2Client is next:
class AsyncBearerAuth(BearerTokenValidator):
def __init__(self, token):
self.token = token
def authenticate_token(self, token):
return token
def __call__(self, a_request):
a_request.headeers['Authorization'] = f'Bearer {self.token}'
return a_request
class MyAsynClient(AsyncOAuth2Client):
def __init__(self):
AsyncOAuth2Client.__init__(self, AUTH_KEY, AUTH_SECRET)
# self.create_authorization_url(AUTH_URL)
async def set_token(self):
auth_data = { 'grant_type' : 'password',
'username' : AUTH_USERNAME,
'password' : AUTH_PASSWORD } } }
self.token = await self.fetch_token(AUTH_URL, **auth_data)
async def call_api(self):
if not hasattr(self, 'token'):
await self.set_token()
for _ in range(tries):
the_resp = await self.get(API_URL,
auth=AsyncBearerAuth(self.token['access_token']))
if the_resp.status_code == 401:
await self.set_token()
continue
elif the_resp.status_code == 200:
return the_resp
else:
return None
def main():
async with MyAsyncClient() as client:
the_tasks = []
for _ in range(10):
a_task = asyncio.create_task( client.call_api() )
the_tasks.append(a_task)
results = await asyncio.gather(*tasks, return_exceptions=True)
do_something(results)
The error lies in this piece:
the_resp = await self.get(API_URL,
auth=AsyncBearerAuth(self.token['access_token']))
and it says:
~\anaconda3\lib\site-packages\httpx\_client.py in _send_single_request(self, request)
1683 Sends a single request, without handling any redirections.
1684 """
-> 1685 transport = self._transport_for_url(request.url)
1686 timer = Timer()
1687 await timer.async_start()
And if I remove the 'call' from AsyncBearerAuth the error that I get is:
~\anaconda3\lib\site-packages\httpx\_auth.py in auth_flow(self, request)
113
114 def auth_flow(self, request: Request) -> typing.Generator[Request, Response, None]:
--> 115 yield self._func(request)
116
117
TypeError: __call__() missing 2 required positional arguments: 'scope' and 'request'
Other questions that I didn't fully understand are:
Am I right in inheriting from BearerTokenValidator?
I'm following the documentation on Bearer Token Usage but I'm not sure about the Validator object here.
I have also tried commenting and uncommenting the create_authorization_url with no success.
Thank you for your help.

Related

how to assert that fastAPI cache is working?

So I'm writing a function that use fastAPI cache to avoid making a bunch of post calls, then I'm wondering if is possible to write a test to validate that the functions is just called once and then reuse the cached value.
from fastapi_cache.decorator import cache
#cache(expire=60)
async def get_auth_token() -> str:
## just to exemplify
return "token"
class TestAuth(IsolatedAsyncioTestCase):
async def test_get_token_success(self):
"""Test get token"""
# success
_token = await get_auth_token()
assert _token is not None
assert _token == "token"
# here is the trick
# calling again should not call post again
_token = await get_auth_token()
mock_post.assert_called_once()
but I'm getting this error
AssertionError: Expected 'post' to have been called once. Called 2 times.
Another idea: return token with timestamp, that you can check timestamp to verify whether it is get from function or cache.
For example:
import time
from fastapi_cache.decorator import cache
#cache(expire=60)
async def get_auth_token() -> str:
## just to exemplify
return str(time.time())
class TestAuth(IsolatedAsyncioTestCase):
async def test_get_token_success(self):
"""Test get token"""
timestamp = str(time.time())
# success
token1 = await get_auth_token()
assert token1 is not None
assert timestamp <= token1 <= str(time.time())
# here is the trick
# calling again should not call post again
token2 = await get_auth_token()
assert token1 == token2
# check expire
time.sleep(60)
token3 = await get_auth_token()
assert token3 != token1

How to mock async response

I'm trying to test an async request but I didn't find how to do. I tried with patch decorator, with AsyncMock... Everytime, I had either aexit error or AsyncMock can't be used in await expression... Where am I wrong ?
class RequestService:
async def requestPostPicture(self, session: aiohttp.ClientSession, photoData: dict):
try:
with aiohttp.MultipartWriter('form-data') as mpwriter:
part = mpwriter.append(photoData['file'][1],{'content-type': photoData['file'][2]})
part.set_content_disposition('form-data', name='file', filename=photoData['file'][0])
async with session.post('https://www.api-url.com', data=mpwriter, headers=self.headers) as resp:
if isinstance(resp, dict):
return resp
apiResponse = await resp.json
return apiResponse
except Exception as error:
return {'error': str(error)}
My test :
class TestRequestService(IsolatedAsyncioTestCase):
#patch('aiohttp.ClientSession.post')
async def testRequestPostPictureDict(self, mockPost):
mockPost.__aenter__.return_value = {"error": "test"}
requestservice = RequestService()
pictureTest = {'file': ('photodatatest.jpg', 'photodatatest', 'image/jpeg')}
connector = aiohttp.TCPConnector(limit=15)
async with aiohttp.ClientSession(connector=connector) as sessionPicture:
returnValue = await requestservice.requestPostPicture(sessionPicture, pictureTest)
self.assertEqual(returnValue, {'error': 'test'})
async def testRequestPostPictureDict(self):
mock = aiohttp.ClientSession
mock.post = MagicMock()
mock.post.return_value.__aenter__.return_value = {'error': 'test'}

How to mock simultaneous requests that are been made with asyncio.gather using pytest, aiohttp and aioresponses?

I have a piece of code which uses asyncio.gather to make simultaneous requests:
estimated_income, judicial_records = await asyncio.gather(
*(client.get_estimated_income(), client.get_judicial_records()), return_exceptions=True
)
# `client.get_estimated_income()` calls `CREDIT_BUREAU_URL`
# `client.get_judicial_records()` calls `NATIONAL_ARCHIVES_URL`
In my tests I'm trying to simulate some scenarios by mocking the requests status:
mock_aioresponse.get(NATIONAL_ARCHIVES_URL, status=200)
mock_aioresponse.get(CREDIT_BUREAU_URL, status=400)
If I run a single test, it works as expected but if I run more than one (and the others don't even have to use mock_aioresponse) I reach that piece of code twice and start to get some Connection refused errors in the second time (the first one works just fine) - which propagates to the tests making they fail.
The weirdest thing to me is reaching that function twice if I run more than one test.
How can I use aioresponses to accomplish my test cases?
CODE:
# main.py
#app.get(
"/leads/{lead_id}/sales_pipeline",
response_model=LeadRead,
responses={status.HTTP_404_NOT_FOUND: {"model": NotFoundResponse}},
)
def sales_pipeline(lead_id: int, db: Session = Depends(get_db)):
lead = db.get(Lead, lead_id)
if not lead:
raise HTTPException(status_code=404, detail="Lead not found")
pipeline_history = PipelineHistory(lead_id=lead.id)
db.add(pipeline_history)
db.commit()
db.refresh(pipeline_history)
# dispatch an event to handlers.py
dispatch(event_name=SALES_PIPELINE_ENTRYPOINT_EVENT_NAME, payload={"id": pipeline_history.id})
return lead
# handlers.py
async def _check_if_lead_is_able_to_become_prospect(
client: LeadExternalSystemsClient,
) -> Tuple[Optional[bool], Optional[str]]:
error_messages: List[str] = []
estimated_income, judicial_records = await asyncio.gather(
*(client.get_estimated_income(), client.get_judicial_records()), return_exceptions=True
)
if isinstance(estimated_income, LeadExternalSystemsClient.LeadExternalSystemsException):
error_messages.append("Credit Bureau network error")
if isinstance(judicial_records, LeadExternalSystemsClient.LeadExternalSystemsException):
error_messages.append("National Archives network error")
# more code
# `LeadExternalSystemsClient` class at client.py
class LeadExternalSystemsClient:
class LeadExternalSystemsException(Exception):
pass
def __init__(self, lead: Lead, timeout: int = 30):
self.lead = lead
self._session = ClientSession(
timeout=ClientTimeout(total=timeout),
connector=TCPConnector(limit=30, ssl=False),
raise_for_status=True,
)
async def __aenter__(self) -> "LeadExternalSystemsClient":
return self
async def __aexit__(self, *_, **__) -> None:
await self._session.close()
async def _request(self, method: str, url: str) -> Any:
try:
response = self._session.request(method=method, url=url)
return await response.json()
except ClientError as exception:
raise self.LeadExternalSystemsException(str(exception))
async def get_estimated_income(self) -> Dict[str, float]:
result = await self._request(method="GET", url=CREDIT_BUREAU_URL)
# more code
async def get_judicial_records(self) -> List[Dict[str, str]]:
result = await self._request(method="GET", url=NATIONAL_ARCHIVES_URL)
# more code
# tests
#pytest.mark.usefixtures("mock_engine_for_test")
def test_estimated_income_network_error(client, lead, mocker, mock_aioresponse):
# GIVEN
mocker.patch(
"app.consumers.handlers.LeadExternalSystemsClient.personal_information_is_valid",
return_value=True,
)
mock_aioresponse.get(NATIONAL_ARCHIVES_URL, status=200)
mock_aioresponse.get(CREDIT_BUREAU_URL, status=400)
# WHEN
response = client.get(f"/leads/{lead.id}/sales_pipeline")
result = client.get(f"/leads/{lead.id}").json()
# THEN
assert response.status_code == status.HTTP_200_OK
assert result["is_prospect"] is False
assert len(result["pipeline_histories"]) == 1
assert result["pipeline_histories"][0]["started_at"] is not None
assert result["pipeline_histories"][0]["finished_at"] is not None
assert result["pipeline_histories"][0]["extra_infos"] == "Credit Bureau network error"
assert result["pipeline_histories"][0]["status"] == PipelineStatus.NETWORK_ERROR.name
Looks like the solution is to pass repeat=True to aioresponses().get()
https://bytemeta.vip/repo/pnuckowski/aioresponses/issues/205

A class with a mutable variable. API token

I am trying to create an API client, but the problem is that the token changes every hour.
I want to create a class that contains a "token" variable that should change every hour.
The idea is to get a token when first run the script, create this object and use it.
I have a function that successfully receives a new token on execution.
class AccessToken:
def __init__(self):
self.token = get_new_access_token()
def new_token(self):
self.token = get_new_access_token()
print(self.token)
while True:
time.sleep(3600)
new_token()
def get_new_access_token():
body = {"client_id": config.ESPORT_ID,
"client_secret": config.ESPORT_SECRET_KEY_API
}
resp = requests.post(f'https://.........../oauth/token', json=body)
return resp
One way is to record when the token was made, and compare timestamp to see if token is valid.
class AccessToken:
TOKEN_TTL = 60 * 60 - 60 (`-60`: buffer)
def __init__(self):
self.new_token()
def refresh_token(self):
self.token = get_new_access_token()
self.token_gen_time = time.time() # When token was made
def get_token(self):
if time.time() > self.token_gen_time + self.TOKEN_TTL:
# token can be expired.
self.refresh_token()
return self.token
access_token = AccessToken()
while True:
time.sleep(5)
token = access_token.get_token()
# do something with token
class AccessToken:
TOKEN_TTL = 60 * 60 - 60
def __init__(self):
self.token = get_new_access_token()
self.token_gen_time = time.time()
def refresh_token(self):
self.token = get_new_access_token()
self.token_gen_time = time.time() # When token was made
def get_token(self):
if time.time() > self.token_gen_time + self.TOKEN_TTL:
# token can be expired.
self.refresh_token()
return self.token

Get starlette request body in the middleware context

I have such middleware
class RequestContext(BaseHTTPMiddleware):
async def dispatch(self, request: Request, call_next: RequestResponseEndpoint):
request_id = request_ctx.set(str(uuid4())) # generate uuid to request
body = await request.body()
if body:
logger.info(...) # log request with body
else:
logger.info(...) # log request without body
response = await call_next(request)
response.headers['X-Request-ID'] = request_ctx.get()
logger.info("%s" % (response.status_code))
request_ctx.reset(request_id)
return response
So the line body = await request.body() freezes all requests that have body and I have 504 from all of them. How can I safely read the request body in this context? I just want to log request parameters.
I would not create a Middleware that inherits from BaseHTTPMiddleware since it has some issues, FastAPI gives you a opportunity to create your own routers, in my experience this approach is way better.
from fastapi import APIRouter, FastAPI, Request, Response, Body
from fastapi.routing import APIRoute
from typing import Callable, List
from uuid import uuid4
class ContextIncludedRoute(APIRoute):
def get_route_handler(self) -> Callable:
original_route_handler = super().get_route_handler()
async def custom_route_handler(request: Request) -> Response:
request_id = str(uuid4())
response: Response = await original_route_handler(request)
if await request.body():
print(await request.body())
response.headers["Request-ID"] = request_id
return response
return custom_route_handler
app = FastAPI()
router = APIRouter(route_class=ContextIncludedRoute)
#router.post("/context")
async def non_default_router(bod: List[str] = Body(...)):
return bod
app.include_router(router)
Works as expected.
b'["string"]'
INFO: 127.0.0.1:49784 - "POST /context HTTP/1.1" 200 OK
In case you still wanted to use BaseHTTP, I recently ran into this problem and came up with a solution:
Middleware Code
from starlette.middleware.base import BaseHTTPMiddleware
from starlette.requests import Request
import json
from .async_iterator_wrapper import async_iterator_wrapper as aiwrap
class some_middleware(BaseHTTPMiddleware):
async def dispatch(self, request:Request, call_next:RequestResponseEndpoint):
# --------------------------
# DO WHATEVER YOU TO DO HERE
#---------------------------
response = await call_next(request)
# Consuming FastAPI response and grabbing body here
resp_body = [section async for section in response.__dict__['body_iterator']]
# Repairing FastAPI response
response.__setattr__('body_iterator', aiwrap(resp_body)
# Formatting response body for logging
try:
resp_body = json.loads(resp_body[0].decode())
except:
resp_body = str(resp_body)
async_iterator_wrapper Code from
TypeError from Python 3 async for loop
class async_iterator_wrapper:
def __init__(self, obj):
self._it = iter(obj)
def __aiter__(self):
return self
async def __anext__(self):
try:
value = next(self._it)
except StopIteration:
raise StopAsyncIteration
return value
I really hope this can help someone! I found this very helpful for logging.
Big thanks to #Eddified for the aiwrap class
You can do this safely with a generic ASGI middleware:
from typing import Iterable, List, Protocol, Generator
import pytest
from starlette.responses import Response
from starlette.testclient import TestClient
from starlette.types import ASGIApp, Scope, Send, Receive, Message
class Logger(Protocol):
def info(self, message: str) -> None:
...
class BodyLoggingMiddleware:
def __init__(
self,
app: ASGIApp,
logger: Logger,
) -> None:
self.app = app
self.logger = logger
async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:
if scope["type"] != "http":
await self.app(scope, receive, send)
return
done = False
chunks: "List[bytes]" = []
async def wrapped_receive() -> Message:
nonlocal done
message = await receive()
if message["type"] == "http.disconnect":
done = True
return message
body = message.get("body", b"")
more_body = message.get("more_body", False)
if not more_body:
done = True
chunks.append(body)
return message
try:
await self.app(scope, wrapped_receive, send)
finally:
while not done:
await wrapped_receive()
self.logger.info(b"".join(chunks).decode()) # or somethin
async def consume_body_app(scope: Scope, receive: Receive, send: Send) -> None:
done = False
while not done:
msg = await receive()
done = "more_body" not in msg
await Response()(scope, receive, send)
async def consume_partial_body_app(scope: Scope, receive: Receive, send: Send) -> None:
await receive()
await Response()(scope, receive, send)
class TestException(Exception):
pass
async def consume_body_and_error_app(scope: Scope, receive: Receive, send: Send) -> None:
done = False
while not done:
msg = await receive()
done = "more_body" not in msg
raise TestException
async def consume_partial_body_and_error_app(scope: Scope, receive: Receive, send: Send) -> None:
await receive()
raise TestException
class TestLogger:
def __init__(self, recorder: List[str]) -> None:
self.recorder = recorder
def info(self, message: str) -> None:
self.recorder.append(message)
#pytest.mark.parametrize(
"chunks, expected_logs", [
([b"foo", b" ", b"bar", b" ", "baz"], ["foo bar baz"]),
]
)
#pytest.mark.parametrize(
"app",
[consume_body_app, consume_partial_body_app]
)
def test_body_logging_middleware_no_errors(chunks: Iterable[bytes], expected_logs: Iterable[str], app: ASGIApp) -> None:
logs: List[str] = []
client = TestClient(BodyLoggingMiddleware(app, TestLogger(logs)))
def chunk_gen() -> Generator[bytes, None, None]:
yield from iter(chunks)
resp = client.get("/", data=chunk_gen())
assert resp.status_code == 200
assert logs == expected_logs
#pytest.mark.parametrize(
"chunks, expected_logs", [
([b"foo", b" ", b"bar", b" ", "baz"], ["foo bar baz"]),
]
)
#pytest.mark.parametrize(
"app",
[consume_body_and_error_app, consume_partial_body_and_error_app]
)
def test_body_logging_middleware_with_errors(chunks: Iterable[bytes], expected_logs: Iterable[str], app: ASGIApp) -> None:
logs: List[str] = []
client = TestClient(BodyLoggingMiddleware(app, TestLogger(logs)))
def chunk_gen() -> Generator[bytes, None, None]:
yield from iter(chunks)
with pytest.raises(TestException):
client.get("/", data=chunk_gen())
assert logs == expected_logs
if __name__ == "__main__":
import os
pytest.main(args=[os.path.abspath(__file__)])
Turns out await request.json() can only be called once per the request cycle. So if you need to access the request body in multiple middlewares for filtering or authentication etc then there's a work around which is to create a custom middleware that copies the contents of request body in request.state. The middleware should be loaded as early as necessary. Each middleware next in chain or controller can then access the request body from request.state instead of calling await request.json() again. Here's a example:
class CopyRequestMiddleware(BaseHTTPMiddleware):
async def dispatch(self, request: Request, call_next):
request_body = await request.json()
request.state.body = request_body
response = await call_next(request)
return response
class LogRequestMiddleware(BaseHTTPMiddleware):
async def dispatch(self, request: Request, call_next):
# Since it'll be loaded after CopyRequestMiddleware it can access request.state.body.
request_body = request.state.body
print(request_body)
response = await call_next(request)
return response
The controller will access request body from request.state as well
request_body = request.state.body
Just because such solution not stated yet, but it's worked for me:
from typing import Callable, Awaitable
from starlette.middleware.base import BaseHTTPMiddleware
from starlette.requests import Request
from starlette.responses import StreamingResponse
from starlette.concurrency import iterate_in_threadpool
class LogStatsMiddleware(BaseHTTPMiddleware):
async def dispatch( # type: ignore
self, request: Request, call_next: Callable[[Request], Awaitable[StreamingResponse]],
) -> Response:
response = await call_next(request)
response_body = [section async for section in response.body_iterator]
response.body_iterator = iterate_in_threadpool(iter(response_body))
logging.info(f"response_body={response_body[0].decode()}")
return response
def init_app(app):
app.add_middleware(LogStatsMiddleware)
iterate_in_threadpool actually making from iterator object async Iterator
If you look on implementation of starlette.responses.StreamingResponse you'll see, that this function used exactly for this
If you only want to read request parameters, best solution i found was to implement a "route_class" and add it as arg when creating the fastapi.APIRouter, this is because parsing the request within the middleware is considered problematic
The intention behind the route handler from what i understand is to attach exceptions handling logic to specific routers, but since it's being invoked before every route call, you can use it to access the Request arg
Fastapi documentation
You could do something as follows:
class MyRequestLoggingRoute(APIRoute):
def get_route_handler(self) -> Callable:
original_route_handler = super().get_route_handler()
async def custom_route_handler(request: Request) -> Response:
body = await request.body()
if body:
logger.info(...) # log request with body
else:
logger.info(...) # log request without body
try:
return await original_route_handler(request)
except RequestValidationError as exc:
detail = {"errors": exc.errors(), "body": body.decode()}
raise HTTPException(status_code=422, detail=detail)
return custom_route_handler
The issue is in Uvicorn. The FastAPI/Starlette::Request class does cache the body, but the Uvicorn function RequestResponseCycle::request() does not, so if you instantiate two or more Request classes and ask for the body(), only the instance that asks for the body first will have a valid body.
I solved creating a mock function that returns a cached copy of the request():
class LogRequestsMiddleware:
def __init__(self, app:ASGIApp) -> None:
self.app = app
async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:
receive_cached_ = await receive()
async def receive_cached():
return receive_cached_
request = Request(scope, receive = receive_cached)
# do what you need here
await self.app(scope, receive_cached, send)
app.add_middleware(LogRequestsMiddleware)

Categories

Resources