I tried to implement a thread safe singleton for my python code. I tried these 2 pieces of code but both of them get stuck when the class with the metaclass of the Singleton is being called from my unittests.
1 (check-lock-check):
import functools
import threading
from typing import Callable
def synchronized(thread_lock: threading.Lock):
""" Synchronization decorator """
def wrapper(function: Callable):
#functools.wraps(function)
def inner_wrapper(*args: list, **kw: dict):
with thread_lock:
return function(*args, **kw)
return inner_wrapper
return wrapper
class Singleton(type):
_instances = {}
_lock = threading.Lock()
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._locked_call(*args, **kwargs)
return cls._instances[cls]
#synchronized(_lock)
def _locked_call(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
2 (simple lock):
from threading import Lock
class Singleton(type):
_instances = {}
_lock: Lock = Lock()
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
with cls._lock:
instance = super().__call__(*args, **kwargs)
cls._instances[cls] = instance
return cls._instances[cls]
Does someone know why my code get stuck on this implementation when I run it locally (for unittests for example)? Because once the app is deployed it's actually uses multithreading everything is fine.
And do you have suggestions for something else that could work with what I need?
Thanks.
Related
I am attempting to write a quick decorator to manage logging returns of various functions. I am not super well versed in decorators so any help you can provide would be very helpful!
from functools import update_wrapper
from typing import Any, Optional
from logging import getLogger
from time import perf_counter
from datetime import datetime
class logger:
def __init__(self, func:callable, response:str = "debug"):
self.logger = getLogger()
self.func = func
self.response = response
update_wrapper(self, func)
def __call__(self, *args, **kwargs):
return getattr(self, self.response)
def debug(self, *args, **kwargs):
self.logger.debug(f"Running {__name__} with id: {id(self)} at {datetime.now()}")
start = perf_counter()
value = self.func(*args, **kwargs)
end = perf_counter()
self.logger.debug(f"""Completed {__name__} with id: {id(self)} at {datetime.now()}.
Total Time to run: {end - start:.6f}s""")
return value
def info(self, *args, **kwargs):
self.logger.info(f"Running {__name__} at {datetime.now()}.")
return self.func(*args, **kwargs)
#logger(response="debug")
def stuff(x):
return x*x
stuff(2)
The error I am receiving is:
TypeError: __init__() missing 1 required positional argument: 'func',
clearly, it doesn't like the required callable and the response requirement. However, I see in all other class-based decorator setups that func needs to be called as part of the __init__ and I have also seen you can pass decorators addition information. What am I doing wrong here?
EDIT:
The purpose of getattr(self, self.response) is so that the function returned by __call__ is either the function along with the debug or info logging. This allows me to utilize the decorator #logging for both logging and debug, yet yields two different results depending on the response value specified in the decorator (i.e #logging(response="info")).
Solution:
class logger:
def __init__(self, response:str = "debug"):
self.logger = getLogger()
self.response = response
def __call__(self, func:callable):
update_wrapper(self, func)
self.func = func
return getattr(self, self.response)
def debug(self, *args, **kwargs):
self.logger.debug(f"Running {self.func.__name__} (type:{type(self.func)}) with id: {id(self)} at {datetime.now()}")
start = perf_counter()
value = self.func(*args, **kwargs)
end = perf_counter()
self.logger.debug(f"""Completed {self.func.__name__} with id: {id(self)} at {datetime.now()}.
Total Time to run: {end - start:.6f}s""")
return value
def info(self, *args, **kwargs):
self.logger.info(f"Running {self.func.__name__} at {datetime.now()}.")
return self.func(*args, **kwargs)
I don't know what your code should do, in particular it is not clear (to me) which kind of arguments should be passed to getattr(self, self.response)(*args, **kwargs). I am saying this to understand the proper workflow of the decorator.
So your code will never work. Here some possible examples of decoration:
the __call__way: #logger(response="debug")
class logger_1:
def __init__(self, response:str = "debug"):
print(response)
def __call__(self, func):
self.func = func
return self # ? depends on what are you doing
def debug(self, *args, **kwargs):
# ...
def info(self, *args, **kwargs):
#...
#logger_1(response="debug")
def stuff(x):
return x*x
A level more of "abstraction": #logger(response="debug").('some_parameter').debug_method
class logger_2:
def __init__(self, response:str = "debug"):
print(response)
def __call__(self, *args, **kwargs):
self.updated_response = getattr(self, self.response)(*args, **kwargs) # just an example
return self
def debug_method(self, func):
self.func = func
# ...
return func
def debug(self, *args, **kwargs):
# ...
def info(self, *args, **kwargs):
#...
#logger_2(response="debug")('some_parameter').debug_method
def stuff(x):
return x*x
NB: logger_2(response="debug").('some_parameter').debug_method is not taking argument because it waits to be "feed" with the target function stuff
These are examples of syntax which constraint the workflow, so you need to be careful when design your decorator
I want to use a third module in python like this :
import some_module
class MyService():
def __init__(self):
self.some_module_obj = some_module.some_obj()
def run(self,some_parameter):
self.some_module_obj.some_attritude(some_parameter)
I know that some_module.some_obj and its method some_attritude is not thread safe , My question is how to make MyService thread safe ?
update 1:
I see Artiom Kozyrev's code , Is the code below right ?
import some_module
import threading
GLOBAL_LOCK = threading.Lock()
class Singleton(type):
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
with GLOBAL_LOCK:
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(
*args, **kwargs
)
return cls._instances[cls]
class MyService(metaclass=Singleton):
def __init__(self):
self.some_module_obj = some_module.some_obj()
self.rlock = threading.RLock()
def run(self,some_parameter):
result = None
with self.rlock:
result = self.some_module_obj.some_attritude(some_parameter)
return result
I am trying to create a Singleton class in Python using this code:
class Singleton(type):
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
def clear(cls):
cls._instances = {}
class MyClass(metaclass=Singleton):
def my_attribute(*args):
if len(args) == 1:
MyClass.i_attribute = args[0]
elif len(args) == 0:
try:
return MyClass.i_attribute
except:
MyClass.i_attribute = 0
but the clear() method does not seem to work:
MyClass.my_attribute(42)
MyClass.clear()
MyClass.my_attribute() # still returns 42, but I expect 0
How do I delete the instance of MyClass so that I am back to 0 instances?
The singleton metaclass collects in the _instances attribute all the instantiated children. Therefore, if you want to clear the _instances attributes only for a specific class, you can:
Redefine the Singleton class, to make _instances an attribute of the class instantiated by the metaclass:
class Singleton(type):
"""
Singleton metaclass, which stores a single instance of the children class in the children class itself.
The metaclass exposes also a clearing mechanism, that clear the single instance:
* clear: use as follows 'ClassToClear.clear()
"""
def __init__(cls, name, bases, methods):
cls._instance = None
super().__init__(name, bases, methods)
def __call__(cls, *args, **kwargs):
if cls._instance:
return cls._instance
cls._instance = super().__call__(*args, **kwargs)
return cls._instance
def clear(cls):
cls._instance = None
Using this new definition of the singleton, you can write a clear method that can be called by any of the classes initialized with the Singleton metaclass and it will clear the _instance attribute.
So in your case, MyClass.clear() would reset the _instance attribute to None.
Add a clear method, which removes only the children class from the Singleton._instances dictionary:
class SingletonRegistry(type):
"""
Singleton metaclass, which implements a registry of all classes that are created through this metaclass and
the corresponding instance of that class (added at the first creation).
The metaclass exposes also a clearing mechanism, that clears a specific class from the registry:
* clear: use as follows 'ClassToClear.clear()
* clear_all: use as follows 'SingletonRegistry.clear_all()
"""
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(SingletonRegistry, cls).__call__(*args, **kwargs)
return cls._instances[cls]
def clear(cls):
_ = cls._instances.pop(cls, None)
def clear_all(*args, **kwargs):
SingletonRegistry._instances = {}
In this case, if you would like to clear only one specific child class, then you could write MyClass.clear(), which will cause the MyClass key to be removed from Singleton._instances dictionary.
This structure allows also to clear all key in the _instances dictionary by writing SingletonRegistry.clear_all().
user2357112 is right. Here is the correct code:
class Singleton(type):
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
def clear(cls):
cls._instances = {}
class MyClass(metaclass=Singleton):
def my_attribute(*args):
my = MyClass()
if len(args) == 1:
my.i_attribute = args[0]
elif len(args) == 0:
try:
return my.i_attribute
except:
my.i_attribute = 0
return my.i_attribute
I'm trying to extend my python knowledge. So I just wrote my very first singleton metaclass:
class Singleton(type):
_instance = None
def __call__(cls, *args, **kwargs):
if not cls._instance:
cls._instance = super().__call__(*args, **kwargs)
return cls._instance
I just checked (for feedback) the good old stackoverflow. Lets see 'how others do it' and I found this solution:
class Singleton(type):
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super().__call__(*args, **kwargs)
return cls._instances[cls]
Can somebody explain me why (the hack) do we need that dictionary?
This is to support inheritance. Using your solution, inheriting from a class built with the Singleton metaclass does not allow the subclass to have its own singelton.
class Singleton(type):
_instance = None
def __call__(cls, *args, **kwargs):
if not cls._instance:
cls._instance = super().__call__(*args, **kwargs)
return cls._instance
class FirstSingleton(metaclass=Singleton):
pass
class SecondSingleton(FirstSingleton):
pass
x = FirstSingleton()
y = SecondSingleton()
x is y # True
As you see, the calls FirstSingleton() and SecondSingleton() both returned the same instance.
But using a dictionary allows a class and its subclasses to have different singletons.
class Singleton(type):
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super().__call__(*args, **kwargs)
return cls._instances[cls]
class FirstSingleton(metaclass=Singleton):
pass
class SecondSingleton(FirstSingleton):
pass
x = FirstSingleton()
y = SecondSingleton()
x is y # False
The class and the subclass each returned their own instance of a singleton.
I have a decorator declared as a class:
class predicated(object):
def __init__(self, fn):
self.fn = fn
self.fpred = lambda *args, **kwargs: True
def predicate(self, predicate):
self.fpred = predicate
return self
def validate(self, *args, **kwargs):
return self.fpred(*args, **kwargs)
def __call__(self, *args, **kwargs):
if not self.validate(*args, **kwargs):
raise PredicateNotMatchedError("predicate was not matched")
return self.fn(*args, **kwargs)
... and when I use it to wrap a method in a class, calling that method does not seem to set the instance of the object as the first argument. While this behavior is not exactly unexpected, how would I go about getting self to be frozen when the method becomes an instance method?
Simplified example:
class test_decorator(object):
def __init__(self, fn):
self.fn = fn
def __call__(self, *args, **kwargs):
return self.fn(*args, **kwargs)
class Foo(object):
#test_decorator
def some_method(self):
print(self)
Foo().some_method()
Expected instance of foo, instead get an error saying 0 arguments were passed.
Figured it out - needed to define a __get__ method in order to create a MethodType binding like so:
def __get__(self, obj, objtype=None):
return MethodType(self, obj, objtype)
which creates a MethodType object when invoking the method on an object that freezes the self argument.