Mock entire client class with pytest - python

I have a class that inherits from another class in which we build a client:
class Client(ClientLibrary):
def __init__(self, hosts=[{'host':<HOST_ADDRESS>, 'port':<PORT>}], **kwargs):
''' alternative constructor, where i'd pass in some defaults to simplify connection'''
super().__init__(hosts, *args, **kwargs)
def some_method(self):
...
I want to test this class, and already have a test server set up that I want to connect to for testing. My initial approach was to create a MockClient that inherits from the original Client but swaps out the hosts parameter for the test host like so:
# I create a mock client that inherits from the original `Client` class, but passes in the host and port of the test server.
class MockClient(Client):
def __init__(self, hosts=[{'host':MOCK_HOST, 'port':MOCK_PORT}]):
super().__init__(hosts=hosts)
The idea was then that i'd use this mock client in the tests, however I have faced a lot of issues where I am testing functions that encapsulate the original Client class. I have tried patching it but keep on running into issues.
Is there a better way to approach this? And can this be done using pytest fixtures?
I want to be able to perform the following sorts of tests:
class TestFunctionThatUtilisesClient:
def test_in_which_class_is_constructed_explicitly(self):
client = Client()
r = client.some_method()
assert r == 'something'
def test_in_which_class_is_constructed_implicitly(self):
r = another_method() # Client() is called somewhere in here
assert r == 'something else'

Related

Overriding FastAPI dependencies that have parameters

I'm trying to test my FastAPI endpoints by overriding the injected database using the officially recommended method in the FastAPI documentation.
The function I'm injecting the db with is a closure that allows me to build any desired database from a MongoClient by giving it the database name whilst (I assume) still working with FastAPI depends as it returns a closure function's signature. No error is thrown so I think this method is correct:
# app
def build_db(name: str):
def close():
return build_singleton_whatever(MongoClient, args....)
return close
Adding it to the endpoint:
# endpoint
#app.post("/notification/feed")
async def route_receive_notifications(db: Database = Depends(build_db("someDB"))):
...
And finally, attempting to override it in the tests:
# pytest
# test_endpoint.py
fastapi_app.dependency_overrides[app.build_db] = lambda x: lambda: x
However, the dependency doesn't seem to override at all and the test ends up creating a MongoClient with the IP of the production database as in normal execution.
So, any ideas on overriding FastAPI dependencies that are given parameters in their endpoints?
I have tried creating a mock closure function with no success:
def mock_closure(*args):
def close():
return args
return close
app.dependency_overrides[app.build_db] = mock_closure('otherDB')
And I have also tried providing the same signature, including the parameter, with still no success:
app.dependency_overrides[app.build_db('someDB')] = mock_closure('otherDB')
Edit note I'm also aware I can create a separate function that creates my desired database and use that as the dependency, but I would much prefer to use this dynamic version as it's more scalable to using more databases in my apps and avoids me writing essentially repeated functions just so they can be cleanly injected.
I use next fixtures for main db overriding to db for testing:
from sqlalchemy.ext.asyncio import AsyncSession, create_async_engine
from settings import get_settings
#pytest.fixture()
async def get_engine():
engine = create_async_engine(get_settings().test_db_url)
yield engine
await engine.dispose()
#pytest.fixture()
async def db_session(get_engine) -> AsyncSession:
async with get_engine.begin() as connection:
async with async_session(bind=connection) as session:
yield session
await session.close()
#pytest.fixture()
def override_get_async_session(db_session: AsyncSession) -> Callable:
async def _override_get_async_session():
yield db_session
return _override_get_async_session
There are two issues with your implementation getting in your way:
As you are calling build_db right in the route_receive_notifications function definition, the latter receives nested close function as a dependency. And it's impossible to override it. To fix this you would need to avoid calling your dependency right away and still provide it with db name. For that you can either define a new dependency to inject name into build_db:
# app
def get_db_name():
return "someDB"
def build_db(name: str = Depends(get_db_name)):
...
# endpoint
#app.post("/notification/feed")
async def route_receive_notifications(db: Database = Depends(build_db)):
...
or use functools.partial (shorter but less elegant):
# endpoint
from functools import partial
#app.post("/notification/feed")
async def route_receive_notifications(db: Database = Depends(partial(build_db, "someDB"))):
...
FastAPI requires dependency overriding function to have the same signature as the original dependency. Simply switching from *args to a single parameter is enough, although using the same argument name and type makes it easier to support in future. Of course you need to provide the function itself as a value for dependency_overrides without calling it:
def mock_closure(name: str):
def close():
return name
return close
app.dependency_overrides[app.build_db] = mock_closure

where do i add code in tornado websocket server?

I just jump into websocket programing with basic knowledge of "Asynchronous" and "Threads", i have something like this
import tornado.httpserver
import tornado.websocket
import tornado.ioloop
import tornado.web
import socket
import uuid
import json
import datetime
class WSHandler(tornado.websocket.WebSocketHandler):
clients = []
def open(self):
self.id = str(uuid.uuid4())
self.user_info = self.request.remote_ip +' - '+ self.id
print (f'[{self.user_info}] Conectado')
client = {"sess": self, "id" : self.id}
self.clients.append(client.copy())
def on_message(self, message):
print (f'[{self.user_info}] Mensaje Recivido: {message}')
print (f'[{self.user_info}] Respuesta al Cliente: {message[::-1]}')
self.write_message(message[::-1])
self.comm(message)
def on_close(self):
print (f'[{self.user_info}] Desconectado')
for x in self.clients:
if x["id"] == self.id :
self.clients.remove(x)
def check_origin(self, origin):
return True
application = tornado.web.Application([
(r'/', WSHandler),
])
if __name__ == "__main__":
http_server = tornado.httpserver.HTTPServer(application)
http_server.listen(80)
myIP = socket.gethostbyname(socket.gethostname())
print ('*** Websocket Server Started at %s***' % myIP)
tornado.ioloop.IOLoop.instance().start()
my question is where do I add code ?, should I add everything inside the WShandler class, or outside, or in another file ? and when to use #classmethod?. for now there is no problem with the code when i add code inside the handler but i have just few test clients.
maybe not the full solution but just a few thoughts..
You can maybe look at the tornado websocket chat example,
here.
First good change is, that their clients (waiters) is a set()
which makes sure that every client is only contained once by default. And it is defined and accessed as a class variable. So you don't use self.waiters but cls.waiters or ClassName.waiters (in this case ChatSocketHandler.waiters) to access it.
class ChatSocketHandler(tornado.websocket.WebSocketHandler):
waiters = set()
Second change is that they update every client (you could choose here
to send the update not to all but only some) as a #classmethod, since
they dont want to receive the instance (self) but the class (cls) and
refer to the class variables (in their case waiters, cache and cach_size)
We can forget about the cache and cache size here.
So like this:
#classmethod
def send_updates(cls, chat):
logging.info("sending message to %d waiters", len(cls.waiters))
for waiter in cls.waiters:
try:
waiter.write_message(chat)
except:
logging.error("Error sending message", exc_info=True)
On every API call a new instance of your handler will be created, refered to as self. And every parameter in self is really unique to the instance and related to the actual client, calling your methods. This is good to identify a client on each call.
So a instance based client list like (self.clients) would always be empty on each call. And adding a client would only add it to this instance's view of the world.
But sometimes you want to have some variables like the list of clients the same for all instances created from your class.
This is where class variables (the ones you define directly under the class definition) and the #classmethod decorator come into play.
#classmethod makes the method call independant from the a instance. This means that you can only access class variables in those methods. But in the case of a
message broker this is pretty much what we want:
add clients to the class variable which is the same for all instances of your handler. And since it is defined as a set, each client is unique.
when receiving messages, send them out to all (or a subset of clients)
so on_message is a "normal" instance method but it calls something like: send_updates() which is a #classmethod in the end.
send_updates() iterates over all (or a subset) of clients (waiters) and uses this to send the actual updates in the end.
From the example:
#classmethod
def send_updates(cls, chat):
logging.info("sending message to %d waiters", len(cls.waiters))
for waiter in cls.waiters:
try:
waiter.write_message(chat)
except:
logging.error("Error sending message", exc_info=True)
Remember that you added waiters with waiters.append(self) so every waiter is really an instance and you are "simply" calling the instances (the instance is representing a caller) write_message() method. So this is not broadcasted but send to every caller one by one. This would be the place where you can separate by some criteria like topics or groups ...
So in short: use #classmethod for methods that are independant from a specific instance (like caller or client in your case) and you want to make actions for "all" or a subset of "all" of your clients. But you can only access class variables in those methods. Which should be fine since it's their purpose ;)

Calling a python class method from an instance of a class doesn't work as I expect

I'm new to Python, and I think I'm trying to do something simple. However, I am confused with the results I am getting. I am declaring a class that has 2 class methods, add and remove, which in my simple example add or remove a client from a list class variable. Here's my code:
Service.py
from Client import Client
class Service:
clients = []
#classmethod
def add(cls, client):
cls.clients.append(client)
#classmethod
def remove(cls, client):
if client in cls.clients:
cls.clients.remove(client)
if __name == '__main__'
a = Client()
b = Client()
c = Client()
Service.add(a)
Service.add(b)
Service.add(c)
print(Service.clients)
c.kill()
print(Service.clients)
Service.remove(c)
print(Service.clients)
Client.py
class Client:
def kill(self):
from Service import Service
Service.remove(self)
I would expect calling c.kill() would remove the instance from the clients list.
However, when I evaluate the clients list, it is showing 0 items. when I call Service.remove(c), it shows the correct list, and removes it as expected. I am not sure what I am missing here.
If it matters, I am currently using PyCharm with my code running in a Virtualenv with Python 3.6.5.
Your current code is using circular imports, as both files utilize each other. Also, instead of relying on the client to destroy the connections, use a contextmanager to facilitate the updating of clients, and at the end of the procedure, empty clients:
import contextlib
class Client:
pass
class Service:
clients = []
#classmethod
def add(cls, client):
cls.clients.append(client)
#classmethod
#contextlib.contextmanager
def thread(cls):
yield cls
cls.clients = []
with Service.thread() as t:
t.add(Client())
t.add(Client())

Python Mock imported library suds

I have a class that is using a suds client in several places to make some xml calls to another server. No problems when the code is running, however, I cannot figure out how to mock the suds client creation in the class constructor such that it will create a mocked object and not use a real socket. We have tried multiple permutations of mock.patch. mocker.patch, etc; but the ones that run result in a socket error and the rest result in AttributeError or ImportError.
This is a dumbed down version of the class:
from suds.client import Client
from suds.transport.https import HttpAuthenticated
class MYClass(object):
def __init__(self, host, usern, passw, provisioning_timeout=90):
wsdl_url = 'https://{host}/server/GetWsdl?wsdl'.format(host=host)
transport = CustomTransport()
try:
self.client = Client(wsdl_url, transport=transport, timeout=5)
def run_wsdl(self, data):
result = self.client.service.testwsdl(data=data)
return result
And this is what I am trying to run through a unit test
from me import my_class
from mock import patch
#These are some of the many permutations we've tried
#patch('my_class.MYClass.suds.client.Client') #ImportError, no suds
#patch('my_class.Client') #Socket use error in __init__
#patch('my_class.suds.client.Client') #ImportError, no suds
def test_sip_stuff(mock_client):
with patch.object(mock_client.client.service, 'testwsdl') as mockwsdl:
mockwsdl.return_value = good_wsdl_data
test_instance = my_class.MYClass(
host='10.10.10.20',
usern='user',
passw='pass'
)
return_value = test_instance.run_wsdl(data='something')
assert return_value == good_wsdl_data

Unit test Flask view mocking out celery tasks

So, I have a flask view, which adds a celery task to a queue, and returns a 200 to the user.
from flask.views import MethodView
from app.tasks import launch_task
class ExampleView(MethodView):
def post(self):
# Does some verification of the incoming request, if all good:
launch_task(task, arguments)
return 'Accepted', 200
The issue is with testing the following, I don't want to have to have a celery instance etc. etc. I just want to know that after all the verification is ok, it returns 200 to the user. The celery launch_task() will be tested elsewhere.
Therefore I'm keen to mock out that launch_task() call so essentially it does nothing, making my unittest independent of the celery instance.
I've tried various incarnations of:
#mock.patch('app.views.launch_task.delay'):
def test_launch_view(self, mock_launch_task):
mock_launch_task.return_value = None
# post a correct dictionary to the view
correct_data = {'correct': 'params'}
rs.self.app.post('/launch/', data=correct_data)
self.assertEqual(rs.status_code, 200)
#mock.patch('app.views.launch_task'):
def test_launch_view(self, mock_launch_task):
mock_launch_task.return_value = None
# post a correct dictionary to the view
correct_data = {'correct': 'params'}
rs.self.app.post('/launch/', data=correct_data)
self.assertEqual(rs.status_code, 200)
But can't seem to get it to work, my view just exits with a 500 error. Any assistance would be appreciated!
I tried also any #patch decorator and it didn't work
And I found mock in setUp like:
import unittest
from mock import patch
from mock import MagicMock
class TestLaunchTask(unittest.TestCase):
def setUp(self):
self.patcher_1 = patch('app.views.launch_task')
mock_1 = self.patcher_1.start()
launch_task = MagicMock()
launch_task.as_string = MagicMock(return_value = 'test')
mock_1.return_value = launch_task
def tearDown(self):
self.patcher_1.stop()
The #task decorator replaces the function with a Task object (see documentation). If you mock the task itself you'll replace the (somewhat magic) Task object with a MagicMock and it won't schedule the task at all. Instead mock the Task object's run() method, like so:
# With CELERY_ALWAYS_EAGER=True
#patch('monitor.tasks.monitor_user.run')
def test_monitor_all(self, monitor_user):
"""
Test monitor.all task
"""
user = ApiUserFactory()
tasks.monitor_all.delay()
monitor_user.assert_called_once_with(user.key)

Categories

Resources