How to wrap a function from an inherited class? - python

I want to use my own class RetrySession that is inherited from requests.Session, but with more robust try/else checking (for non-200 status codes, timeouts, etc.) and extra logging around the get() function.
# retry_session.py
import requests
import logging
logger = logging.getLogger(__name__)
# Oversimplified...
class RetrySession(requests.Session):
def get_(self, url, **kwargs):
logger.info(url)
return self.get(url, **kwargs)
Is there any way to keep the original function name of get() without like temporary copying the function? Right now, I'm doing something like:
from retry_session import *
# Logging defined here
s = RetrySession()
# Would like to do s.get(...) instead of s.get_(...)
r = s.get_("https://httpbin.org/get")

You need to use super() to call the method from the superclass, also there is no new keyword to instanciate object
class RetrySession(requests.Session):
def get(self, url, **kwargs):
print("Requesting", url)
return super().get(url, **kwargs)
if __name__ == '__main__':
s = RetrySession()
r = s.get("https://httpbin.org/get")

Related

How to define a global error handler in gRPC python

Im trying to catch any exception that is raised in any servicer so I can make sure that I only propagate known exceptions and not unexpected ones like ValueError, TypeError etc.
I'd like to be able to catch any raised error, and format them or convert them to other errors to better control the info that is exposed.
I don't want to have to enclose every servicer method with try/except.
I've tried with an interceptor, but im not able to catch the errors there.
Is there a way of specifying an error handler for the grpc Server? like what you do with flask or any other http server?
gRPC Python currently don't support server-side global error handler. The interceptor won't execute the server handler inside the intercept_service function, so there is no way to try/except.
Also, I found the gRPC Python server interceptor implementation is different from what they proposed original at L13-Python-Interceptors.md#server-interceptors. If the implementation stick to the original design, we can use interceptor as global error handler easily with handler and request/request_iterator.
# Current Implementation
intercept_service(self, continuation, handler_call_details)
# Original Design
intercept_unary_unary_handler(self, handler, method, request, servicer_context)
intercept_unary_stream_handler(self, handler, method, request, servicer_context)
intercept_stream_unary_handler(self, handler, method, request_iterator, servicer_context)
intercept_stream_stream_handler(self, handler, method, request_iterator, servicer_context)
Please submit a feature request issue to https://github.com/grpc/grpc/issues.
Maybe this will help you :)
def _wrap_rpc_behavior(handler, fn):
if handler is None:
return None
if handler.request_streaming and handler.response_streaming:
behavior_fn = handler.stream_stream
handler_factory = grpc.stream_stream_rpc_method_handler
elif handler.request_streaming and not handler.response_streaming:
behavior_fn = handler.stream_unary
handler_factory = grpc.stream_unary_rpc_method_handler
elif not handler.request_streaming and handler.response_streaming:
behavior_fn = handler.unary_stream
handler_factory = grpc.unary_stream_rpc_method_handler
else:
behavior_fn = handler.unary_unary
handler_factory = grpc.unary_unary_rpc_method_handler
return handler_factory(fn(behavior_fn,
handler.request_streaming,
handler.response_streaming),
request_deserializer=handler.request_deserializer,
response_serializer=handler.response_serializer)
class TracebackLoggerInterceptor(grpc.ServerInterceptor):
def intercept_service(self, continuation, handler_call_details):
def latency_wrapper(behavior, request_streaming, response_streaming):
def new_behavior(request_or_iterator, servicer_context):
try:
return behavior(request_or_iterator, servicer_context)
except Exception as err:
logger.exception(err, exc_info=True)
return new_behavior
return _wrap_rpc_behavior(continuation(handler_call_details), latency_wrapper)
As some of the previous comments suggested, I tried the meta-class approach which works quite well.
Attached is a simple example to demonstrate how to intercept the grpc calls.
You could extend this by providing the metaclass a list of decorators which you could apply on each function.
Also, it would be wise to be more selective regarding the methods you apply the wrapper to. A good option would be to list the methods of the autogenerated base class and only wrap those.
from types import FunctionType
from functools import wraps
def wrapper(method):
#wraps(method)
def wrapped(*args, **kwargs):
# do stuff here
return method(*args, **kwargs)
return wrapped
class ServicerMiddlewareClass(type):
def __new__(meta, classname, bases, class_dict):
new_class_dict = {}
for attribute_name, attribute in class_dict.items():
if isinstance(attribute, FunctionType):
# replace it with a wrapped version
attribute = wrapper(attribute)
new_class_dict[attribute_name] = attribute
return type.__new__(meta, classname, bases, new_class_dict)
# In order to use
class MyGrpcService(grpc.MyGrpcServicer, metaclass=ServicerMiddlewareClass):
...

modify a function of a class from another class

In pymodbus library in server.sync, SocketServer.BaseRequestHandler is used, and defines as follow:
class ModbusBaseRequestHandler(socketserver.BaseRequestHandler):
""" Implements the modbus server protocol
This uses the socketserver.BaseRequestHandler to implement
the client handler.
"""
running = False
framer = None
def setup(self):
""" Callback for when a client connects
"""
_logger.debug("Client Connected [%s:%s]" % self.client_address)
self.running = True
self.framer = self.server.framer(self.server.decoder, client=None)
self.server.threads.append(self)
def finish(self):
""" Callback for when a client disconnects
"""
_logger.debug("Client Disconnected [%s:%s]" % self.client_address)
self.server.threads.remove(self)
def execute(self, request):
""" The callback to call with the resulting message
:param request: The decoded request message
"""
try:
context = self.server.context[request.unit_id]
response = request.execute(context)
except NoSuchSlaveException as ex:
_logger.debug("requested slave does not exist: %s" % request.unit_id )
if self.server.ignore_missing_slaves:
return # the client will simply timeout waiting for a response
response = request.doException(merror.GatewayNoResponse)
except Exception as ex:
_logger.debug("Datastore unable to fulfill request: %s; %s", ex, traceback.format_exc() )
response = request.doException(merror.SlaveFailure)
response.transaction_id = request.transaction_id
response.unit_id = request.unit_id
self.send(response)
# ----------------------------------------------------------------------- #
# Base class implementations
# ----------------------------------------------------------------------- #
def handle(self):
""" Callback when we receive any data
"""
raise NotImplementedException("Method not implemented by derived class")
def send(self, message):
""" Send a request (string) to the network
:param message: The unencoded modbus response
"""
raise NotImplementedException("Method not implemented by derived class")
setup() is called when a client is connected to the server, and finish() is called when a client is disconnected. I want to manipulate these methods (setup() and finish()) in another class in another file which use the library (pymodbus) and add some code to setup and finish functions. I do not intend to modify the library, since it may cause strange behavior in specific situation.
---Edited ----
To clarify, I want setup function in ModbusBaseRequestHandler class to work as before and remain untouched, but add sth else to it, but this modification should be done in my code not in the library.
The simplest, and usually best, thing to do is to not manipulate the methods of ModbusBaseRequestHandler, but instead inherit from it and override those methods in your subclass, then just use the subclass wherever you would have used the base class:
class SoupedUpModbusBaseRequestHandler(ModbusBaseRequestHandler):
def setup(self):
# do different stuff
# call super().setup() if you want
# or call socketserver.BaseRequestHandler.setup() to skip over it
# or call neither
Notice that a class statement is just a normal statement, and can go anywhere any other statement can, even in the middle of a method. So, even if you need to dynamically create the subclass because you won't know what you want setup to do until runtime, that's not a problem.
If you actually need to monkeypatch the class, that isn't very hard—although it is easy to screw things up if you aren't careful.
def setup(self):
# do different stuff
ModbusBaseRequestHandler.setup = setup
If you want to be able to call the normal implementation, you have to stash it somewhere:
_setup = ModbusBaseRequestHandler.setup
def setup(self):
# do different stuff
# call _setup whenever you want
ModbusBaseRequestHandler.setup = setup
If you want to make sure you copy over the name, docstring, etc., you can use `wraps:
#functools.wraps(ModbusBaseRequestHandler.setup)
def setup(self):
# do different stuff
ModbusBaseRequestHandler.setup = setup
Again, you can do this anywhere in your code, even in the middle of a method.
If you need to monkeypatch one instance of ModbusBaseRequestHandler while leaving any other instances untouched, you can even do that. You just have to manually bind the method:
def setup(self):
# do different stuff
myModbusBaseRequestHandler.setup = setup.__get__(myModbusBaseRequestHandler)
If you want to call the original method, or wraps it, or do this in the middle of some other method, etc., it's otherwise basically the same as the last version.
It can be done by Interceptor
from functools import wraps
def iterceptor(func):
print('this is executed at function definition time (def my_func)')
#wraps(func)
def wrapper(*args, **kwargs):
print('this is executed before function call')
result = func(*args, **kwargs)
print('this is executed after function call')
return result
return wrapper
#iterceptor
def my_func(n):
print('this is my_func')
print('n =', n)
my_func(4)
more explanation can be found here

Mocking two functions in a method of another class

I've read multiple posts but I just not getting it and I've used the same methodology elsewhere and it worked but that was a method in another class, not a function.
plugins/iaw_api/helpers.py:
import requests
import json
def https_post_json(request_url, headers, data, timeout=20):
response = requests.post(request_url, json=data, headers=headers, timeout=timeout)
return json.loads(response.content)
plugins/iaw_api/iaw_api_runners.py:
from plugins.iaw_api.helpers import https_post_json
class IawApiRunner(object):
def __init__(self, runner_class_in):
self._runner_data_class = runner_class_in
self.result = None
def run_job(self):
if type(self._runner_data_class) == SomeRunner:
self._this_runner()
def _this_runner(self):
_task_id = https_post_json(<request-content>)
print _task_id
tests/iaw_api/tests.py:
from plugins.iaw_api.helpers import https_post_json
from plugins.iaw_api.iaw_api_runners import IawApiRunner
class TestApiRunners(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestApiRunners, self).__init__(*args, **kwargs)
self.test_runner = IawApiRunner(SomeRunner)
def test_single_run_no_error(self):
with mock.patch('plugins.iaw_api.helpers.https_post_json') as MockPost:
MockPost.return_value = {'status_id': '59f0681cf9c32000132c7e89'}
self.test_runner.run_job()
This is simplified and I've yet to add the assertions. The issue is when I run the test the actual http_post goes off to the API instead of the response being the mock return value.
You have to patch the name used in the code you are testing; in this case, it is https_post_json (imported directly into the module namespace). Your patch would be correct if iaw_api_runners.py just used import plugins and _this_runner ran _task_id = plugins.iaw_api.helpers.https_post_json(...).

Best practice for repeatedly initializing a class with identical parameters (Pyramid)?

I want to streamline/reduce my code, so I try to put initializations of classes with repeated parameters in their own, extended classes. This is a REST API based on Pyramid & Cornice.
How would I initialize a pyramid.httpexceptions.HTTPUnauthorized when I'm always adding the same headers on initialization? This also applies to other HTTP responses where I initialize them repeatedly without changing their parameters.
Currently I've come up with this to extend the class:
class _401(HTTPUnauthorized):
def basic_jwt_header(self):
self.headers.add('WWW-Authenticate','JWT')
self.headers.add('WWW-Authenticate', 'Basic realm="Please log in"')
return self
def jwt_header(self):
self.headers.add('WWW-Authenticate','JWT')
return self
which I use in a view like this:
#forbidden_view_config()
def authenticate(request):
response = _401()
return _401.basic_jwt_header(response)
But it does not feel and look right. Is there a better, cleaner way?
Create an __init__ method on the class:
class _401(HTTPUnauthorized):
def __init__(self):
# call base class __init__ first, which will set up the
# headers instance variable
super(_401, self).__init__()
# in Python 3, just use this:
# super().__init__()
# now add the headers that you always enter
self.headers.add('WWW-Authenticate','JWT')
self.headers.add('WWW-Authenticate', 'Basic realm="Please log in"')
resp = _401()
print resp.headers
Since you are using two different methods after instantiating your _401 instance, then you might be better off using class-level factory methods, which will do both the instance creation and setting the desired headers:
class _401(HTTPUnauthorized):
#classmethod
def basic_jwt_header(cls):
ret = cls()
ret.headers.add('WWW-Authenticate','JWT')
ret.headers.add('WWW-Authenticate', 'Basic realm="Please log in"')
return ret
#classmethod
def jwt_header(cls):
ret = cls()
ret.headers.add('WWW-Authenticate','JWT')
return ret
resp = _401.basic_jwt_header()
print resp.headers
Now there's no need for creating __init__, or calling super() or whatever. We use cls instead of the explicit _401 class in support of any future subclassing of _401.

How can I mock a class method of a celery Task

Using python 2.7, celery 3.0.24 and mock 1.0.1. I have this:
class FancyTask(celery.Task):
#classmethod
def helper_method1(cls, name):
"""do some remote request depending on name"""
return 'foo' + name + 'bar'
def __call__(self, *args, **kwargs):
funcname = self.name.split()[-1]
bigname = self.helper_method1(funcname)
return bigname
#celery.task(base=FancyTask)
def task1(*args, **kwargs):
pass
#celery.task(base=FancyTask)
def task2(*args, **kwargs):
pass
how can I patch helper_method1 while testing either task?
I've tried something like:
import mock
from mymodule import tasks
class TestTasks(unittest.TestCase):
def test_task1(self):
task = tasks.task1
task.helper_method1 = mock.MagickMock(return_value='42')
res = task.delay('blah')
task.helper_method1.assert_called_with('blah')
and the test is failing. The original function is the one being called. And no, this question didn't help me.
(I don't have a celery instance up and running so it's difficult for me to test this)
The target function in your application code is a classmethod. The function your test code is mocking is an instance method.
Does changing the test_task1 like this help -
def test_task1(self):
FancyTask.helper_method1 = mock.MagickMock(return_value='42')
task = tasks.task1
res = task.delay('blah')
task.helper_method1.assert_called_with('blah')
You probably also need to change the assert_called_with so it is called from the class level instead of the instance level.
change
task.helper_method1.assert_called_with('blah')
to
FancyTask.helper_method1.assert_called_with('blah')

Categories

Resources