How to mark a global as deprecated in Python? - python

I've seen decorators that let you mark a function a deprecated so that a warning is given whenever that function is used. I'd like to do the same thing but for a global variable, but I can't think of a way to detect global variable accesses. I know about the globals() function, and I could check its contents, but that would just tell me if the global is defined (which it still will be if the function is deprecated and not all out removed) not if it's actually being used. The best alternative I can think of is something like this:
# myglobal = 3
myglobal = DEPRECATED(3)
But besides the problem of how to get DEPRECATED to act exactly like a '3', I'm not sure what DEPRECATED could do that would let you detect every time it's accessed. I think the best it could do is iterate through all of the global's methods (since everything in Python is an object, so even '3' has methods, for converting to string and the like) and 'decorate' them to all be deprecated. But that's not ideal.
Any ideas? Has anyone else tackled this problem?

You can't do this directly, since theres no way of intercepting the module access. However, you can replace that module with an object of your choosing that acts as a proxy, looking for accesses to certain properties:
import sys, warnings
def WrapMod(mod, deprecated):
"""Return a wrapped object that warns about deprecated accesses"""
deprecated = set(deprecated)
class Wrapper(object):
def __getattr__(self, attr):
if attr in deprecated:
warnings.warn("Property %s is deprecated" % attr)
return getattr(mod, attr)
def __setattr__(self, attr, value):
if attr in deprecated:
warnings.warn("Property %s is deprecated" % attr)
return setattr(mod, attr, value)
return Wrapper()
oldVal = 6*9
newVal = 42
sys.modules[__name__] = WrapMod(sys.modules[__name__],
deprecated = ['oldVal'])
Now, you can use it as:
>>> import mod1
>>> mod1.newVal
42
>>> mod1.oldVal
mod1.py:11: UserWarning: Property oldVal is deprecated
warnings.warn("Property %s is deprecated" % attr)
54
The downside is that you are now performing two lookups when you access the module, so there is a slight performance hit.

You could make your module into a class (see e.g this SO question) and make that deprecated global into a property, so you can execute some of your code when it's accessed and provide the warning you desire. However, this does seem a bit of an overkill.

Behold:
Code
from types import *
def wrapper(f, warning):
def new(*args, **kwargs):
if not args[0].warned:
print "Deprecated Warning: %s" % warning
args[0].warned = True
return f(*args, **kwargs)
return new
class Deprecated(object):
def __new__(self, o, warning):
print "Creating Deprecated Object"
class temp(o.__class__): pass
temp.__name__ = "Deprecated_%s" % o.__class__.__name__
output = temp.__new__(temp, o)
output.warned = True
wrappable_types = (type(int.__add__), type(zip), FunctionType)
unwrappable_names = ("__str__", "__unicode__", "__repr__", "__getattribute__", "__setattr__")
for method_name in dir(temp):
if not type(getattr(temp, method_name)) in wrappable_types: continue
if method_name in unwrappable_names: continue
setattr(temp, method_name, wrapper(getattr(temp, method_name), warning))
output.warned = False
return output
Output
>>> a=Deprecated(1, "Don't use 1")
Creating Deprecated Object
>>> a+9
Deprecated Warning: Don't use 1
10
>>> a*4
4
>>> 2*a
2
This can obviously be refined, but the gist is there.

This is one of the main rationale for PEP 562 (implemented in Python 3.7):
Typical workarounds are assigning __class__ of a module object to a
custom subclass of types.ModuleType or replacing the sys.modules item
with a custom wrapper instance. It would be convenient to simplify
this procedure by recognizing __getattr__ defined directly in a module
that would act like a normal __getattr__ method, except that it will
be defined on module instances. For example:
# lib.py
from warnings import warn
deprecated_names = ["old_function", ...]
def _deprecated_old_function(arg, other):
...
def __getattr__(name):
if name in deprecated_names:
warn(f"{name} is deprecated", DeprecationWarning)
return globals()[f"_deprecated_{name}"]
raise AttributeError(f"module {__name__} has no attribute {name}")
# main.py
from lib import old_function # Works, but emits the warning

Related

Class-level classmethod() can only be called on a method_descriptor or instance method

util.py:
import inspect
class Singleton(type):
_instances=[]
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
class MetaResult(Singleton):
def __getattribute__(cls, name):
return super().__getattribute__(name)
class Result(metaclass=MetaResult):
#staticmethod
def res_decorator(func):
def funcwrap(*args, **kwargs):
sig = inspect.signature(func)
bound_sig = sig.bind(*args, **kwargs)
bound_sig.apply_defaults()
#additional code to extract function arguments
return funcwrap
check_params.py
from util import Result as _Result
from abc import ABCMeta as _ABCMeta
class paramparse(metaclass=_ABCMeta)
#classmethod
#_Result.res_decorator
def parse_flash_params(cls, flash_config_path):
#some code
Now, I cythonize the file check_params.py with following setup :
cythonize.py
import os as _os
from pathlib import Path as _Path
from distutils.core import setup as _setup
from Cython.Distutils import build_ext as _build_ext
files_to_compile = []
def cython_build(source_path):
for dirpath, _, fnames in os.walk(source_path):
for fname in [x for x in fnames if f.endswith('.py'):
fname = _Path(fname)
files_to_compile.append(fname)
for e in files_to_compile:
e.cython_directives = {'binding':True, 'language_level':3}
_setup(name="Proj1",cmdclass={'build_ext':_build_ext}, ext_modules=files_to_compile)
cythonized as:
python cythonize.py --path C:\directory_where_check_params_exist
generates a pyd file on which the following unit tests were attempted to run:
Now, coming to the usage, in the unit tests:
unit_test_check_params.py
from check_params import * #getting error here , details outside the code
# unit tests written here
check_params.pyx:112: in init check_params
???
E
TypeError: Class-level classmethod() can only be called on a method_descriptor or instance method.
So when i debug this, the error appears as being caused because of the classmethod descriptor over decorator (def parse_flash_params) in check_params.py
Please let me know if you need more information.
This still isn't a hugely helpful example in since the code you provide still doesn't actually work. However:
The case
#classmethod
#_Result.res_decorator
is definitely a Cython bug. In the function __Pyx_Method_ClassMethod Cython has a lot of type checks to ensure that the type is a method (a function defined in a class), while actually it only needs to be callable, and this should only really be checked at call time. As a quick workround you can edit the relevant internal Cython file (CythonFunction.c) to replace the lines
PyErr_SetString(PyExc_TypeError,
"Class-level classmethod() can only be called on "
"a method_descriptor or instance method.");
return NULL;
with
return PyClassMethod_New(method);
This seems to me closer to what Python does, where it accepts any object and only checks for callability when the function is actually called.
In the longer term you should report it as a bug in Cython with an example that actually works to demonstrate the problem. This way it can actually be fixed. I don't think you need Result and the staticclass bit - res_decorator as an isolated function should demonstrate the problem.
The second possible order
#_Result.res_decorator
#classmethod
doesn't work in unCythonized Python either since the direct result of a classmethod decorator isn't callable. It only becomes callable when it becomes a bound method, which happens later. Therefore this isn't a bug in Cython.
Final addendum:
A cleaner workaround is to force Cython to use the builtin classmethod, instead of its own version that's causing bugs
try:
myclassmethod = __builtins__.classmethod
except AttributeError:
myclassmethod = __builtins__['classmethod']
class paramparse(metaclass=_ABCMeta):
#myclassmethod
#_Result.res_decorator
def parse_flash_params(cls, flash_config_path):
pass
The try ... except block is because __builtins__ behaves slightly differently in Cython and in a Python module, which is fine because it's an implementation detail anyway.

Pickling dynamically created types

I've been trying to get some dynamically created types (i.e. ones created by calling 3-arg type()) to pickle and unpickle nicely. I've been using this module switching trick to hide the details from users of the module and give clean semantics.
I've learned several things already:
The type must be findable with getattr on the module itself
The type must be consistent with what getattr finds, that is to say if we call pickle.dumps(o) then it must be true that type(o) == getattr(module, 'name of type')
Where I'm stuck though is that there still seems to be something odd going on - it seems to be calling __getstate__ on something unexpected.
Here's the simplest setup I've got that reproduces the issue, testing with Python 3.5, but I'd like to target back to 3.3 if possible:
# module.py
import sys
import functools
def dump(self):
return b'Some data' # Dummy for testing
def undump(self, data):
print('Undump: %r' % data) # Do nothing for testing
# Cheaty demo way to make this consistent
#functools.lru_cache(maxsize=None)
def make_type(name):
return type(name, (), {
'__getstate__': dump,
'__setstate__': undump,
})
class Magic(object):
def __init__(self, path):
self.path = path
def __getattr__(self, name):
print('Getting thing: %s (from: %s)' % (name, self.path))
# for simple testing all calls to make_type must end in last x.y.z.last
if name != 'last':
if self.path:
return Magic(self.path + '.' + name)
else:
return Magic(name)
return make_type(self.path + '.' + name)
# Make the switch
sys.modules[__name__] = Magic('')
And then a quick way to exercise that:
import module
import pickle
f=module.foo.bar.woof.last()
print(f.__getstate__()) # See, *this* works
print('Pickle starts here')
print(pickle.dumps(f))
Which then gives:
Getting thing: foo (from: )
Getting thing: bar (from: foo)
Getting thing: woof (from: foo.bar)
Getting thing: last (from: foo.bar.woof)
b'Some data'
Pickle starts here
Getting thing: __spec__ (from: )
Getting thing: _initializing (from: __spec__)
Getting thing: foo (from: )
Getting thing: bar (from: foo)
Getting thing: woof (from: foo.bar)
Getting thing: last (from: foo.bar.woof)
Getting thing: __getstate__ (from: foo.bar.woof)
Traceback (most recent call last):
File "test.py", line 7, in <module>
print(pickle.dumps(f))
TypeError: 'Magic' object is not callable
I wasn't expecting to see anything looking up __getstate__ on module.foo.bar.woof, but even if we force that lookup to fail by adding:
if name == '__getstate__': raise AttributeError()
into our __getattr__ it still fails with:
Traceback (most recent call last):
File "test.py", line 7, in <module>
print(pickle.dumps(f))
_pickle.PicklingError: Can't pickle <class 'module.Magic'>: it's not the same object as module.Magic
What gives? Am I missing something with __spec__? The docs for __spec__ pretty much just stress setting it appropriately, but don't seem to actually explain much.
More importantly the bigger question is how am I supposed to go about making types I programatically generated via a pseudo module's __getattr__ implementation pickle properly?
(And obviously once I've managed to get pickle.dumps to produce something I expect pickle.loads to call undump with the same thing)
To pickle f, pickle needs to pickle f's class, module.foo.bar.woof.last.
The docs don't claim support for pickling arbitrary classes. They claim the following:
The following types can be pickled:
...
classes that are defined at the top level of a module
module.foo.bar.woof.last isn't defined at the top level of a module, even a pretend module like module. In this not-officially-supported case, the pickle logic ends up trying to pickle module.foo.bar.woof, either here:
elif parent is not module:
self.save_reduce(getattr, (parent, lastname))
or here
else if (parent != module) {
PickleState *st = _Pickle_GetGlobalState();
PyObject *reduce_value = Py_BuildValue("(O(OO))",
st->getattr, parent, lastname);
status = save_reduce(self, reduce_value, NULL);
module.foo.bar.woof can't be pickled for multiple reasons. It returns a non-callable Magic instance for all unsupported method lookups, like __getstate__, which is where your first error comes from. The module-switching thing prevents finding the Magic class to pickle it, which is where your second error comes from. There are probably more incompatibilities.
As it seems, and is already proven that making the class callable is just a drifting out another wrong direction, thankfully to this hack, I could find a getaround to make the class reiterable by its TYPE. following the context of the error <class 'module.Magic'>: it's not the same object as module.Magic the pickler doesn't iterate through the same call that renders a different type from the other one, this is a major common problem with pickling self instanciating classes, for this instance, an object by its class, there for the solution is patching the class with its type #mock.patch('module.Magic', type(module.Magic)) this is a short answer for a something.
Main.py
import module
import pickle
import mock
f=module1.foo.bar.woof.last
print(f().__getstate__()) # See, *this* works
print('Pickle starts here')
#mock.patch('module1.Magic', type(module1.Magic))
def pickleit():
return pickle.dumps(f())
print(pickleit())
Magic class
class Magic(object):
def __init__(self, value):
self.path = value
__class__: lambda x:x
def __getstate__(self):
print ("Shoot me! i'm at " + self.path )
return dump(self)
def __setstate__(self,value):
print ('something will never occur')
return undump(self,value)
def __spec__(self):
print ("Wrong side of the planet ")
def _initializing(self):
print ("Even farther lost ")
def __getattr__(self, name):
print('Getting thing: %s (from: %s)' % (name, self.path))
# for simple testing all calls to make_type must end in last x.y.z.last
if name != 'last':
if self.path:
return Magic(self.path + '.' + name)
else:
return Magic(name)
print('terminal stage' )
return make_type(self.path + '.' + name)
Even assuming this is not more of striking the ball by the edge of the bat, I could see the content dumped into my console.

What are "Bootstrap issues" of functionls wraps decorator?

Python 3.2 introduces a new function recursive_repr in reprlib module.
When I looked into the source code I found this code:
def recursive_repr(fillvalue='...'):
'Decorator to make a repr function return fillvalue for a recursive call'
def decorating_function(user_function):
repr_running = set()
def wrapper(self):
key = id(self), get_ident()
if key in repr_running:
return fillvalue
repr_running.add(key)
try:
result = user_function(self)
finally:
repr_running.discard(key)
return result
# Can't use functools.wraps() here because of bootstrap issues
wrapper.__module__ = getattr(user_function, '__module__')
wrapper.__doc__ = getattr(user_function, '__doc__')
wrapper.__name__ = getattr(user_function, '__name__')
wrapper.__annotations__ = getattr(user_function, '__annotations__', {})
return wrapper
return decorating_function
What I didn't understand is what are Bootstrap issues and why can't #wraps(user_function) be applied to wrapper?
It looks to me like the "bootstrap issues" comes from a circular dependency. In this case, functools imports collections, which in turn imports reprlib. If reprlib tried to import functools.wraps, it would implicitly try to import itself, which would not work. The Python programming FAQ (2.7, but I don't think this has changed since) says that circular imports will fail when modules use the from module import function form, which these modules do.
"Bootstrapping" refers to the phrase "picking yourself up by your own bootstraps", which is clearly impossible. In this context it means that you can't use wraps() here, because this function is itself part of the definition of wraps().

How can I get a list of all classes within current module in Python?

I've seen plenty of examples of people extracting all of the classes from a module, usually something like:
# foo.py
class Foo:
pass
# test.py
import inspect
import foo
for name, obj in inspect.getmembers(foo):
if inspect.isclass(obj):
print obj
Awesome.
But I can't find out how to get all of the classes from the current module.
# foo.py
import inspect
class Foo:
pass
def print_classes():
for name, obj in inspect.getmembers(???): # what do I do here?
if inspect.isclass(obj):
print obj
# test.py
import foo
foo.print_classes()
This is probably something really obvious, but I haven't been able to find anything. Can anyone help me out?
Try this:
import sys
current_module = sys.modules[__name__]
In your context:
import sys, inspect
def print_classes():
for name, obj in inspect.getmembers(sys.modules[__name__]):
if inspect.isclass(obj):
print(obj)
And even better:
clsmembers = inspect.getmembers(sys.modules[__name__], inspect.isclass)
Because inspect.getmembers() takes a predicate.
I don't know if there's a 'proper' way to do it, but your snippet is on the right track: just add import foo to foo.py, do inspect.getmembers(foo), and it should work fine.
What about
g = globals().copy()
for name, obj in g.iteritems():
?
I was able to get all I needed from the dir built in plus getattr.
# Works on pretty much everything, but be mindful that
# you get lists of strings back
print dir(myproject)
print dir(myproject.mymodule)
print dir(myproject.mymodule.myfile)
print dir(myproject.mymodule.myfile.myclass)
# But, the string names can be resolved with getattr, (as seen below)
Though, it does come out looking like a hairball:
def list_supported_platforms():
"""
List supported platforms (to match sys.platform)
#Retirms:
list str: platform names
"""
return list(itertools.chain(
*list(
# Get the class's constant
getattr(
# Get the module's first class, which we wrote
getattr(
# Get the module
getattr(platforms, item),
dir(
getattr(platforms, item)
)[0]
),
'SYS_PLATFORMS'
)
# For each include in platforms/__init__.py
for item in dir(platforms)
# Ignore magic, ourselves (index.py) and a base class.
if not item.startswith('__') and item not in ['index', 'base']
)
))
import pyclbr
print(pyclbr.readmodule(__name__).keys())
Note that the stdlib's Python class browser module uses static source analysis, so it only works for modules that are backed by a real .py file.
If you want to have all the classes, that belong to the current module, you could use this :
import sys, inspect
def print_classes():
is_class_member = lambda member: inspect.isclass(member) and member.__module__ == __name__
clsmembers = inspect.getmembers(sys.modules[__name__], is_class_member)
If you use Nadia's answer and you were importing other classes on your module, that classes will be being imported too.
So that's why member.__module__ == __name__ is being added to the predicate used on is_class_member. This statement checks that the class really belongs to the module.
A predicate is a function (callable), that returns a boolean value.
This is the line that I use to get all of the classes that have been defined in the current module (ie not imported). It's a little long according to PEP-8 but you can change it as you see fit.
import sys
import inspect
classes = [name for name, obj in inspect.getmembers(sys.modules[__name__], inspect.isclass)
if obj.__module__ is __name__]
This gives you a list of the class names. If you want the class objects themselves just keep obj instead.
classes = [obj for name, obj in inspect.getmembers(sys.modules[__name__], inspect.isclass)
if obj.__module__ is __name__]
This is has been more useful in my experience.
Another solution which works in Python 2 and 3:
#foo.py
import sys
class Foo(object):
pass
def print_classes():
current_module = sys.modules[__name__]
for key in dir(current_module):
if isinstance( getattr(current_module, key), type ):
print(key)
# test.py
import foo
foo.print_classes()
I think that you can do something like this.
class custom(object):
__custom__ = True
class Alpha(custom):
something = 3
def GetClasses():
return [x for x in globals() if hasattr(globals()[str(x)], '__custom__')]
print(GetClasses())`
if you need own classes
I frequently find myself writing command line utilities wherein the first argument is meant to refer to one of many different classes. For example ./something.py feature command —-arguments, where Feature is a class and command is a method on that class. Here's a base class that makes this easy.
The assumption is that this base class resides in a directory alongside all of its subclasses. You can then call ArgBaseClass(foo = bar).load_subclasses() which will return a dictionary. For example, if the directory looks like this:
arg_base_class.py
feature.py
Assuming feature.py implements class Feature(ArgBaseClass), then the above invocation of load_subclasses will return { 'feature' : <Feature object> }. The same kwargs (foo = bar) will be passed into the Feature class.
#!/usr/bin/env python3
import os, pkgutil, importlib, inspect
class ArgBaseClass():
# Assign all keyword arguments as properties on self, and keep the kwargs for later.
def __init__(self, **kwargs):
self._kwargs = kwargs
for (k, v) in kwargs.items():
setattr(self, k, v)
ms = inspect.getmembers(self, predicate=inspect.ismethod)
self.methods = dict([(n, m) for (n, m) in ms if not n.startswith('_')])
# Add the names of the methods to a parser object.
def _parse_arguments(self, parser):
parser.add_argument('method', choices=list(self.methods))
return parser
# Instantiate one of each of the subclasses of this class.
def load_subclasses(self):
module_dir = os.path.dirname(__file__)
module_name = os.path.basename(os.path.normpath(module_dir))
parent_class = self.__class__
modules = {}
# Load all the modules it the package:
for (module_loader, name, ispkg) in pkgutil.iter_modules([module_dir]):
modules[name] = importlib.import_module('.' + name, module_name)
# Instantiate one of each class, passing the keyword arguments.
ret = {}
for cls in parent_class.__subclasses__():
path = cls.__module__.split('.')
ret[path[-1]] = cls(**self._kwargs)
return ret
import Foo
dir(Foo)
import collections
dir(collections)
The following can be placed at the top of the file:
def get_classes():
import inspect, sys
return dict(inspect.getmembers(
sys.modules[__name__],
lambda member: inspect.isclass(member) and member.__module__ == __name__
))
Note, this can be placed at the top of the module because we've wrapped the logic in a function definition. If you want the dictionary to exist as a top-level object you will need to place the definition at the bottom of the file to ensure all classes are included.
Go to Python Interpreter. type help ('module_name') , then press Enter.
e.g. help('os') .
Here, I've pasted one part of the output below:
class statvfs_result(__builtin__.object)
| statvfs_result: Result from statvfs or fstatvfs.
|
| This object may be accessed either as a tuple of
| (bsize, frsize, blocks, bfree, bavail, files, ffree, favail, flag, namemax),
| or via the attributes f_bsize, f_frsize, f_blocks, f_bfree, and so on.
|
| See os.statvfs for more information.
|
| Methods defined here:
|
| __add__(...)
| x.__add__(y) <==> x+y
|
| __contains__(...)
| x.__contains__(y) <==> y in x

Extension methods in Python

Does Python have extension methods like C#? Is it possible to call a method like:
MyRandomMethod()
on existing types like int?
myInt.MyRandomMethod()
You can add whatever methods you like on class objects defined in Python code (AKA monkey patching):
>>> class A(object):
>>> pass
>>> def stuff(self):
>>> print self
>>> A.test = stuff
>>> A().test()
This does not work on builtin types, because their __dict__ is not writable (it's a dictproxy).
So no, there is no "real" extension method mechanism in Python.
It can be done with Forbidden Fruit (https://pypi.python.org/pypi/forbiddenfruit)
Install forbiddenfruit:
pip install forbiddenfruit
Then you can extend built-in types:
>>> from forbiddenfruit import curse
>>> def percent(self, delta):
... return self * (1 + delta / 100)
>>> curse(float, 'percent', percent)
>>> 1.0.percent(5)
1.05
Forbidden Fruit is fundamentally dependent on the C API, it works only on cpython implementations and won’t work on other python implementations, such as Jython, pypy, etc.
not sure if that what you're asking but you can extend existing types and then call whatever you like on the new thing:
class int(int):
def random_method(self):
return 4 # guaranteed to be random
v = int(5) # you'll have to instantiate all you variables like this
v.random_method()
class int(int):
def xkcd(self):
import antigravity
print(42)
>>>v.xkcd()
Traceback (most recent call last):
File "<pyshell#81>", line 1, in <module>
v.xkcd()
AttributeError: 'int' object has no attribute 'xkcd'
c = int(1)
>>> c.random_method()
4
>>> c.xkcd()
42
hope that clarifies your question
The following context manager adds the method like Forbidden Fruit would without the limitations of it. Besides that it has the additional benefit of removing the extension method afterwards:
class extension_method:
def __init__(self, obj, method):
method_name = method.__name__
setattr(obj, method_name, method)
self.obj = obj
self.method_name = method_name
def __enter__(self):
return self.obj
def __exit__(self, type, value, traceback):
# remove this if you want to keep the extension method after context exit
delattr(self.obj, self.method_name)
Usage is as follows:
class C:
pass
def get_class_name(self):
return self.__class__.__name__
with extension_method(C, get_class_name):
assert hasattr(C, 'get_class_name') # the method is added to C
c = C()
print(c.get_class_name()) # prints 'C'
assert not hasattr(C, 'get_class_name') # the method is gone from C
I've had great luck with the method described here:
http://mail.python.org/pipermail/python-dev/2008-January/076194.html
I have no idea if it works on builtins though.
Another option is to override the meta-class. This allows you to, among other things, specify functions that should exist in all classes.
This article starts to discuss it:
http://www.onlamp.com/pub/a/python/2003/04/17/metaclasses.html

Categories

Resources