A class with a mutable variable. API token - python

I am trying to create an API client, but the problem is that the token changes every hour.
I want to create a class that contains a "token" variable that should change every hour.
The idea is to get a token when first run the script, create this object and use it.
I have a function that successfully receives a new token on execution.
class AccessToken:
def __init__(self):
self.token = get_new_access_token()
def new_token(self):
self.token = get_new_access_token()
print(self.token)
while True:
time.sleep(3600)
new_token()
def get_new_access_token():
body = {"client_id": config.ESPORT_ID,
"client_secret": config.ESPORT_SECRET_KEY_API
}
resp = requests.post(f'https://.........../oauth/token', json=body)
return resp

One way is to record when the token was made, and compare timestamp to see if token is valid.
class AccessToken:
TOKEN_TTL = 60 * 60 - 60 (`-60`: buffer)
def __init__(self):
self.new_token()
def refresh_token(self):
self.token = get_new_access_token()
self.token_gen_time = time.time() # When token was made
def get_token(self):
if time.time() > self.token_gen_time + self.TOKEN_TTL:
# token can be expired.
self.refresh_token()
return self.token
access_token = AccessToken()
while True:
time.sleep(5)
token = access_token.get_token()
# do something with token

class AccessToken:
TOKEN_TTL = 60 * 60 - 60
def __init__(self):
self.token = get_new_access_token()
self.token_gen_time = time.time()
def refresh_token(self):
self.token = get_new_access_token()
self.token_gen_time = time.time() # When token was made
def get_token(self):
if time.time() > self.token_gen_time + self.TOKEN_TTL:
# token can be expired.
self.refresh_token()
return self.token

Related

how to load test a grpc server with locust

i have a simple grpc server that has two services:
signin, ping, encapsulated in the following class that also has a private method to authenticate the requests:
class Listener(pingpong_pb2_grpc.PingPongServiceServicer):
def __init__(self):
self.counter = counter_g
self.last_print_time = time.time()
def __str__(self):
return self.__class__.__name__
def auth_request(self, request, context):
metadata_dict = dict(context.invocation_metadata())
if metadata_dict.get("authorization").split(" ")[1] == "jf90845h5gfip345t8":
pass
else:
print("Auth Failed")
context.abort(grpc.StatusCode.UNAUTHENTICATED, "Auth Failed")
def signin(self, request, context):
"""The signin function is the rpc call that is called by the client"""
if request.username == "test" and request.password == "test":
print('Signin Success')
return pingpong_pb2.SignInResponse(token="jf90845h5gfip345t8", success=True)
else:
print('Signin Failed')
return pingpong_pb2.SignInResponse(token="bad token", success=False)
def ping(self, request, context):
"""The ping function is the rpc call that is called by the client"""#
self.auth_request(request, context)
self.counter += 1
if self.counter > 1000:
print("1000 calls in %3f seconds" % (time.time() - self.last_print_time))
self.last_print_time = time.time()
self.counter = 0
response = pingpong_pb2.Pong(count=request.count + 1)
return response
in order to make the grpc tasks report back execution time and success/failure events, i wrote this decorator:
def grpctask(func):
def wrapper(*args, **kwargs):
# get task's function name
task_name = func.__name__
start = time.time()
result = None
try:
result = func(*args, **kwargs)
except grpc.RpcError as e:
total = int((time.time() - start) * 1000)
events.request_failure.fire(request_type="grpc",
name=task_name,
response_time=total,
response_length=0,
exception=e)
else:
total = int((time.time() - start) * 1000)
events.request_success.fire(request_type="grpc",
name=task_name,
response_time=total,
response_length=5)
return result
return wrapper
my user behaviour is as follows:
every 31 seconds the user should execute:\ (behaviour 1)
ping_server_1
ping_server_2
ping_server_3
(note that each funtion is diffrent that have similar names only)
every 43 seconds the user should excute:\ (behaviour 2)
hello_server_1
hello_server_2
the two user actions should be independent, meaning that the user may execute both at the same time (not really parallel, just wait time between behaviour 1 and 2 should be zero ) \
i wrote the following script, nesting ping_server_1, ping_server_2, ping_server_3 inside a task, made locust not able to show data for each of those sub tasks"
from locust import TaskSet, between, task, User, events, HttpUser, constant, SequentialTaskSet
import random
import grpc
from google.protobuf import json_format
from client import PingClient
import time
from tools import grpctask
class TaskOne(SequentialTaskSet):
#task
class PingTest(SequentialTaskSet):
host = "localhost:9999"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.stub = None
self.vacancy_id = None
self.token = None
self.ping_client = PingClient(host="localhost:9999")
def on_start(self):
self.connect_to_server()
self.login()
def connect_to_server(self):
# use the ping client to connect to the server
self.ping_client.connect_to_server()
def login(self):
# use the ping client to login
self.ping_client.set_token()
#task
#grpctask
def ping_server(self):
self.ping_client.ping()
#task
#grpctask
def ping_server_2(self):
self.ping_client.ping()
#task
#grpctask
def ping_server_3(self):
self.ping_client.ping()
self.interrupt()
#task
def empty(self):
print("PingTest is empty")
self.interrupt()
class TaskTwo(SequentialTaskSet):
#task
class HelloServer(TaskSet):
host = "localhost:9999"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.stub = None
self.vacancy_id = None
self.token = None
self.ping_client = PingClient(host="localhost:9999")
def on_start(self):
self.connect_to_server()
self.login()
def connect_to_server(self):
# use the ping client to connect to the server
self.ping_client.connect_to_server()
def login(self):
# use the ping client to login
self.ping_client.set_token()
#task
#grpctask
def hello_server(self):
self.ping_client.ping()
#task
#grpctask
def hello_server_2(self):
self.ping_client.ping()
self.interrupt()
#task
def empty(self):
print("TaskTwo is empty")
self.interrupt()
class PingUser(User):
# force TaskOne to be executed every 31 seconds,
# and TaskTwo to be executed every 43 seconds
tasks = [TaskOne, TaskTwo]
is there a way to define a wait time for TaskOne and TaskTwo independetly from each other?
if not, what can be done to achieve the user behaviour described above while still treating each function as a task to get metrics for each function (task) (write each action as one function wont give metrics on each function)

how to assert that fastAPI cache is working?

So I'm writing a function that use fastAPI cache to avoid making a bunch of post calls, then I'm wondering if is possible to write a test to validate that the functions is just called once and then reuse the cached value.
from fastapi_cache.decorator import cache
#cache(expire=60)
async def get_auth_token() -> str:
## just to exemplify
return "token"
class TestAuth(IsolatedAsyncioTestCase):
async def test_get_token_success(self):
"""Test get token"""
# success
_token = await get_auth_token()
assert _token is not None
assert _token == "token"
# here is the trick
# calling again should not call post again
_token = await get_auth_token()
mock_post.assert_called_once()
but I'm getting this error
AssertionError: Expected 'post' to have been called once. Called 2 times.
Another idea: return token with timestamp, that you can check timestamp to verify whether it is get from function or cache.
For example:
import time
from fastapi_cache.decorator import cache
#cache(expire=60)
async def get_auth_token() -> str:
## just to exemplify
return str(time.time())
class TestAuth(IsolatedAsyncioTestCase):
async def test_get_token_success(self):
"""Test get token"""
timestamp = str(time.time())
# success
token1 = await get_auth_token()
assert token1 is not None
assert timestamp <= token1 <= str(time.time())
# here is the trick
# calling again should not call post again
token2 = await get_auth_token()
assert token1 == token2
# check expire
time.sleep(60)
token3 = await get_auth_token()
assert token3 != token1

How to setup a Bearer Token Authentication in AsyncOAuth2Client with authlib

I'm calling an API that has a bearer token authentication.
With regular requests package I have successfully implemented it, but then I had to upgrade the class to run concurrent requests.
I found authlib.integrations.httpx_client.AsyncOAuth2Client to have the OAuth2 piece, and then authlib.oauth2.rfc6750.BearerTokenValidator to have the Bearer token piece.
But then I'm not able to run it correctly.
In the meantime I did try aiohttp, but moved to httpx since it seemed better with the OAuth2 authentication.
Still, my first time meeting asyncio, httpx and friends, so all suggestions are welcome.
The successful part with requests comes first:
class BearerAuth(requests.auth.AuthBase):
def __init__(self, token):
self.token = token
def __call__(self, a_request):
a_request.headers['Authorization'] = f'Bearer {self.token}'
return a_request
class MySession(Session):
def __init__(self):
super().__init__()
self.set_token()
def set_token():
auth_enc = encode64('{username}:{password}'.format(**access_dict))
the_headers = {'Authorization': f'Basic {auth_enc}'}
auth_resp = self.post(AUTH_URL, headers=the_headers)
self.token = the_resp.json()
def call_api(self):
for _ in range(tries):
a_resp = self.get(API_URL, auth=BearerAuth(self.token['access_token']))
if a_resp.status_code == 401:
self.set_token()
continue
elif a_resp.status_code == 200:
return a_resp
else:
return None
The unsuccessful part with AsyncOauth2Client is next:
class AsyncBearerAuth(BearerTokenValidator):
def __init__(self, token):
self.token = token
def authenticate_token(self, token):
return token
def __call__(self, a_request):
a_request.headeers['Authorization'] = f'Bearer {self.token}'
return a_request
class MyAsynClient(AsyncOAuth2Client):
def __init__(self):
AsyncOAuth2Client.__init__(self, AUTH_KEY, AUTH_SECRET)
# self.create_authorization_url(AUTH_URL)
async def set_token(self):
auth_data = { 'grant_type' : 'password',
'username' : AUTH_USERNAME,
'password' : AUTH_PASSWORD } } }
self.token = await self.fetch_token(AUTH_URL, **auth_data)
async def call_api(self):
if not hasattr(self, 'token'):
await self.set_token()
for _ in range(tries):
the_resp = await self.get(API_URL,
auth=AsyncBearerAuth(self.token['access_token']))
if the_resp.status_code == 401:
await self.set_token()
continue
elif the_resp.status_code == 200:
return the_resp
else:
return None
def main():
async with MyAsyncClient() as client:
the_tasks = []
for _ in range(10):
a_task = asyncio.create_task( client.call_api() )
the_tasks.append(a_task)
results = await asyncio.gather(*tasks, return_exceptions=True)
do_something(results)
The error lies in this piece:
the_resp = await self.get(API_URL,
auth=AsyncBearerAuth(self.token['access_token']))
and it says:
~\anaconda3\lib\site-packages\httpx\_client.py in _send_single_request(self, request)
1683 Sends a single request, without handling any redirections.
1684 """
-> 1685 transport = self._transport_for_url(request.url)
1686 timer = Timer()
1687 await timer.async_start()
And if I remove the 'call' from AsyncBearerAuth the error that I get is:
~\anaconda3\lib\site-packages\httpx\_auth.py in auth_flow(self, request)
113
114 def auth_flow(self, request: Request) -> typing.Generator[Request, Response, None]:
--> 115 yield self._func(request)
116
117
TypeError: __call__() missing 2 required positional arguments: 'scope' and 'request'
Other questions that I didn't fully understand are:
Am I right in inheriting from BearerTokenValidator?
I'm following the documentation on Bearer Token Usage but I'm not sure about the Validator object here.
I have also tried commenting and uncommenting the create_authorization_url with no success.
Thank you for your help.

Return value from a pubsub callback function

I'm trying to get a value from a pubsub callback function.
If i print message.data in the callback function i can see the data. I've tried though stream_pull_future and making a python class but with no success.
project = 'project_id'
topic = 'topic'
subscription = "sub"
timeout = 10.0
subscriber = pubsub_v1.SubscriberClient()
subscription_path = subscriber.subscription_path(
project, subscription
)
def test_callback():
callback_class = CallBackMethod()
streaming_pull_future = subscriber.subscribe(
subscription_path, callback=callback_class.callback()
)
time.sleep(30)
print("streaming", streaming_pull_future.result())
print("streaming_timeout", streaming_pull_future.result(timeout=timeout))
decoded_string = streaming_pull_future.decode('utf-8')
print("decoded_string", decoded_string)
# other stuff with string
class CallBackMethod:
def __init__(self, data=None):
self.data = data
#classmethod
def callback(cls, message=None):
info("Got message {}".format(message))
if message is None:
return
return cls(message.data)

Python: Asyncio NATS.io blocking

I have troubles to make Python Asyncio NATS.io running sequentialy. I have two classes: Account and Bridge
Account holds the logic of application and it is communicating thought Bridge with external service via NATS.io.
Main file:
loop = asyncio.get_event_loop()
account = Account(loop, options)
asyncio.async(account.start())
loop.run_forever()
Account class:
class Account:
bridge = Bridge()
def connect(self):
result = self.bridge.connect(self.id)
return result
Bridge class:
def connect(self, account_id):
data = None
try:
response = yield from self.nc.timed_request("bank.account.connect",
BankRequest(
method="connect",
data={...}
), 10)
data = json.loads(response.data.decode())
except ErrTimeout:
status = Messages.REQUEST_TIMED_OUT
return Result(data=data)
I need to call account.connect() from anywhere inside account class and get result of connection (sequentialy). now I'm getting generator object
your connect() methods should probably be coroutines:
class Account:
bridge = Bridge() # you probably want to put this in `def __init__(self)`!
#asyncio.coroutine
def connect(self):
result = yield from self.bridge.connect(self.id)
return result
class Bridge:
#asyncio.coroutine
def connect(self, account_id):
data = None
try:
response = yield from self.nc.timed_request("bank.account.connect",
BankRequest(
method="connect",
data={...}
), 10)
data = json.loads(response.data.decode())
except ErrTimeout:
status = Messages.REQUEST_TIMED_OUT
return Result(data=data)
and:
resp = yield from account.connect()

Categories

Resources