If I am importing a module from a 3rd party, but the syntax they use does not line up with mine, is there a good way to pep8 it?
Example: I need to use a 3rd party module that I cannot edit and their naming convention isn't so great.
Example:
thisIsABase_function(self,a,b)
I have some code that pepifies the name to pep8, but I was wondering how I can make the functions accessible by that new pep8 name?
def _pep8ify(name):
"""PEP8ify name"""
import re
if '.' in name:
name = name[name.rfind('.') + 1:]
if name[0].isdigit():
name = "level_" + name
name = name.replace(".", "_")
if '_' in name:
return name.lower()
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
Is there a way I can PEP8 these names on import?
You can use a context manager to automatically pep8ify the symbols from an imported module like:
Example:
with Pep8Importer():
import funky
Code:
class Pep8Importer(object):
#staticmethod
def _pep8ify(name):
"""PEP8ify name"""
import re
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
def __enter__(self):
# get list of current modules in namespace
self.orig_names = set(dir(sys.modules[__name__]))
def __exit__(self, exc_type, exc_val, exc_tb):
""" Pep8ify names in any new modules
Diff list of current module names in namespace.
pep8ify names at the first level in those modules
Ignore any other new names under the assumption that they
were imported/created with the name as desired.
"""
if exc_type is not None:
return
new_names = set(dir(sys.modules[__name__])) - self.orig_names
for module_name in (n for n in new_names if not n.startswith('_')):
module = sys.modules[module_name]
for name in dir(module):
pep8ified = self._pep8ify(name)
if pep8ified != name and not name.startswith('_'):
setattr(module, pep8ified, getattr(module, name))
print("In mModule: {}, added '{}' from '{}'".format(
module_name, pep8ified, name))
Test Code:
with Pep8Importer():
import funky
print(funky.thisIsABase_function)
print(funky.this_is_a_base_function)
funky.py
thisIsABase_function = 1
Results:
In module: funky, added 'this_is_a_base_function' from 'thisIsABase_function'
1
1
I think something like this does what you want:
# somemodule.py
def func_a():
print('hello a')
def func_b():
print('hello b')
# yourcode.py
import inspect
import importlib
def pepimports(the_module_name):
mymodule = importlib.import_module(the_module_name)
myfuncs = inspect.getmembers(f, inspect.isfunction)
for f in myfuncs:
setattr(mymodule, _pep8ify(f[1].__name__) , f[1])
return mymodule
mymodule = pepimports('some_module_name')
# you can now call the functions from mymodule
# (the original names still exist, so watch out for clashes)
mymodule.pepified_function()
It's a bit hackish, but I've tried it (python 3.5) and it seems to work (at least on a trivial example).
Related
Python Code:
class Importer:
from importlib import __import__, reload
from sys import modules
libname = ""
import_count = 0
module = None
def __init__(self, name):
self.libname = name
self.import_count = 0
def importm(self):
if self.libname not in self.modules:
self.module = __import__(self.libname)
else:
print("must reload")
self.module = self.reload(self.module)
self.import_count += 1
# test out Importer
importer = Importer("module")
importer.importm() # prints Hello
importer.importm() # prints Hello
importer.importm() # prints Hello (again)
print(importer.import_count)
The above Python (3.8.1) code is at OnlineGDB, which if you run, will give an error:
TypeError: reload() takes 1 positional argument but 2 were given
When I open up the importlib library in Python, I see this:
# ... (previous code; unnecessary)
_RELOADING = {}
def reload(module): ## this is where reload is defined (one argument)
"""Reload the module and return it.
The module must have been successfully imported before.
"""
if not module or not isinstance(module, types.ModuleType): ## check for type of module
raise TypeError("reload() argument must be a module")
try:
name = module.__spec__.name
except AttributeError:
name = module.__name__
if sys.modules.get(name) is not module: ## other code you (probably) don't have to care about
msg = "module {} not in sys.modules"
raise ImportError(msg.format(name), name=name)
if name in _RELOADING:
return _RELOADING[name]
_RELOADING[name] = module
try:
parent_name = name.rpartition('.')[0]
if parent_name:
try:
parent = sys.modules[parent_name]
except KeyError:
msg = "parent {!r} not in sys.modules"
raise ImportError(msg.format(parent_name),
name=parent_name) from None
else:
pkgpath = parent.__path__
else:
pkgpath = None
target = module
spec = module.__spec__ = _bootstrap._find_spec(name, pkgpath, target)
if spec is None:
raise ModuleNotFoundError(f"spec not found for the module {name!r}", name=name)
_bootstrap._exec(spec, module)
# The module may have replaced itself in sys.modules!
return sys.modules[name]
finally:
try:
del _RELOADING[name]
except KeyError:
pass
# ... (After code; unnecessary)
All double hashtag (##) comments are mine
It is clearly visible that reload DOES have 1 argument, and it checks if that argument is a module. In the OGDB (OnineGDB) code, I am only passing one argument (pretty sure) and it is of type module (most likely). If I remove that argument (you can edit the OGDB), it gives:
TypeError: reload() argument must be module
So for some reason, Python keeps thinking I have one more argument than I do actually have. The only way I made it work was editing the importlib file to have reload have two arguments (not a good idea).
I tried running PDB, not helpful.
Can anyone spot anything obviously wrong, like actually having two arguments?
What I needed to do is put the imports outside the class for it to work. Here is the new OGDB. Credits to #L3viathan. Code below:
from importlib import __import__, reload
from sys import modules
class Importer:
libname = ""
import_count = 0
module = None
def __init__(self, name):
self.libname = name
self.import_count = 0
def importm(self):
if self.libname not in modules:
self.module = __import__(self.libname)
else:
print("must reload")
self.module = reload(self.module)
self.import_count += 1
# test out Importer
importer = Importer("module")
importer.importm() # prints Hello
importer.importm() # prints Hello
importer.importm() # prints Hello (again)
print(importer.import_count)
You're having an issue because you're calling self.reload(self.module), which is actually equivalent to calling reload(self, self.module). To see this, try running the following:
class Example:
def some_method(*args):
print(args)
def some_other_method(self):
self.some_method(1)
an_example = Example()
example.some_other_method()
You should see that this prints out 2 arguments, not 1, (the first of which is a reference to self) despite us only passing one argument to some_method, and some_method having no self argument.
It would be better to import the reload method within your importm method (or outside the class altogether!), like so:
def importm(self):
from importlib import __import__, reload
if self.libname not in self.modules:
self.module = __import__(self.libname)
else:
print("must reload")
self.module = reload(self.module)
self.import_count += 1
import mymodule
reload(mymodule)
is how it would work ... Im not sure what your question is from that big wall of text above this is typically used to reset state to its initial state
mymodule.py
x = 5
main.py
from importlib import reload # in py2 you did not need to import it
import mymodule
print(mymodule.x) # 5
mymodule.x = 8
print(mymodule.x) # 8
reload(mymodule)
print(mymodule.x) # 5 again
When using numpy there's a lot of np.* in the code, e.g.
import numpy as np
y = np.sin(np.abs(np.linspace(0, 2*np.pi)))
This clutters formulas and makes them less readable. One could come around this using a wildcard import
from numpy import *
y = sin(abs(linspace(0, 2*pi)))
However, wildcard import is almost always a bad idea.
I'm wondering if there is the possibility to make a wildcard import into a context limited to the formula (or math code block). This would maintain readability and would limit the namespace pollution to a small code area which can be controlled more easily. I would like something like this:
with import_wildcard(numpy):
y2 = sin(abs(linspace(0, 2*pi)))
Questions:
Is there some language construct to allow this.
Is the request itself reasonable or am I overlooking a potential problem?
Solution 1: Temporary wildcard import:
class import_wildcard(object):
"""Contextmanager to temporary import a package content into the global namespace."""
def __init__(self, packagename):
self.packagename = packagename
self.package = __import__(self.packagename, globals(), locals())
self.globals_backup = {}
def __enter__(self):
_globals = globals()
for name in self.package.__dict__:
if name in _globals:
self.globals_backup[name] = _globals[name]
_globals.update(self.package.__dict__)
def __exit__(self, exc_type, exc_value, exc_tb):
_globals = globals()
for name in self.package.__dict__:
if name not in self.globals_backup:
del _globals[name]
_globals.update(self.globals_backup)
self.globals_backup.clear()
with import_wildcard('numpy'):
y = sin(abs(linspace(0, 2*pi)))
So far, I've not come across significant drawbacks. Except of course variables defined outside the context with the same name as some function in numpy will not be accessible in the context.
Solution 2: Temporarily promote specified objects
Based on the feedback, here's another approach which is more explicit. Instead of makeing a temporary wildcard import, we only temporarily promote specified objects to the global namespace.
class global_context(object):
def __init__(self, *objects):
"""Return a context manager that has the given objects available in the global namespace.
You can directly pass in an object if it has a __name__, otherwise use the string name.
"""
def parse_object(obj):
if isinstance(obj, str):
ns, name = obj.split('.')
return name, getattr(globals()[ns], name)
else:
return obj.__name__, obj
self.identifiers = dict(parse_object(o) for o in objects)
self.globals_backup = {}
def __enter__(self):
_globals = globals()
for name, fn in self.identifiers.items():
if name in _globals:
self.globals_backup[name] = _globals[name]
_globals.update(self.identifiers)
def __exit__(self, exc_type, exc_value, exc_tb):
_globals = globals()
for name in self.identifiers:
if name not in self.globals_backup:
del _globals[name]
_globals.update(self.globals_backup)
self.globals_backup.clear()
Usage:
import numpy as np
with global_context(np.sin, np.abs, np.linspace, 'np.pi'):
y = sin(abs(linspace(0, 2*pi)))
I'll leave the first solution as well so that advantages and disadvantages of each approach can be discussed more easily, and people can vote for each solution.
I try to get the list of class from python file using python. After a few search, I get the code which I think it's work as follow
def get_class_from_file(class_obj, file, path='app', exclude=[]):
class_list = []
module = importlib.import_module(path + '.' + file)
for x in dir(module) :
app_cls = getattr( importlib.import_module(path + '.' + file), x )
try :
if app_cls and issubclass(app_cls, class_obj) and app_cls != class_obj and app_cls not in exclude:
class_list.append( (file, x) )
except TypeError :
pass
return class_list
However, I found out that the code don't get only the list of the class, but It still keep showing me the superclass of the class inside the file, here is example
file_1.py
class A:
pass
class B(A):
pass
file_2.py
class C(B):
pass
class D:
pass
when I call the function as
class_list = get_class_from_file(A, 'file_2')
I expect the result would be [C], but It return [C, B] as B is one of super class of C
Please help me fix this, I just want class inside the given file, not any superclass of them. By the way, I use exclude for fixing it at first, but It isn't give me a long run solution.
The problem is that imported modules are also found. You can check a class'
__module__ attribute to see if it originates from the current module or was imported into it.
You also have importlib.import_module(path + '.' + file) twice, I removed one of them. I renamed x to name.
def get_class_from_file(class_obj, file, path='app', exclude=[]):
class_list = []
module_path = path + '.' + file
module = importlib.import_module(module_path)
for name in dir(module) :
app_cls = getattr(module, name)
try:
if (issubclass(app_cls, class_obj) and
app_cls != class_obj and
app_cls not in exclude and
app_cls.__module__ == module_path):
class_list.append( (file, name) )
except TypeError:
# Not a class
pass
return class_list
Something along the lines of:
In one file:
FOO_CONST = 'bar'
def some_function(baz)
return do_something_with(baz)
from somewhere import something
blah = something()
In another file:
my_stuff = import_as_dict("module")
And my_stuff would essentially be the equivalent of:
{
'FOO_CONST': 'bar',
'some_function': <function my_function at ...>,
'blah': 'I am the result of calling the function "somewhere.something".',
}
Any libraries that could do that out of the box?
UPDATE:
Since vars(module) == module.__dict__ I have upvoted two answers but have accepted the one with a bit more data. This is the code which returns pretty much exactly what I had in mind:
my_stuff = {(key, var) for key, var in vars(module).items() if not key.startswith('__')}
How about using vars?
import module
my_stuff = vars(module)
import module
my_stuff = module.__dict__
Note that this dict is the actual dict the module uses to hold its attributes. If you do my_stuff['foo'] = 3, module has a new foo attribute equal to 3.
If you just want to get attributes by names determined at runtime, you don't really need the dict. You could do
thing = getattr(module, thingname)
If you only care about the way you access module members, you can use:
my_sys = import_as_dict("sys")
print my_os["sys"]
With the following code:
import imp
class DictProxy(object):
def __init__(self, target):
super(DictProxy, self).__init__()
self.target = target
def __getitem__(self, key):
return getattr(self.target, key)
def __setitem__(self, key, value):
setattr(self.target, key, value)
def import_as_dict(module_name):
file_, pathname, description = imp.find_module(module_name)
module = imp.load_module(module_name, file_, pathname, description)
dict_module = DictProxy(module)
return dict_module
Using imp for the imports will keep your global context clean.
Hey all, Linux has a lot of great features in procfs and sysfs, and tools like vmstat extend that quite a bit, but I have a need to collect data from a variety of these systems and was hoping to leverage a unified Python utility instead of hacking together a bunch of disparate scripts.
In order to do that I first need to identify whether or not Python has the bits and pieces I need to adequately parse/process the different data collection points. So, the essence of my question:
Is there a python module that handles/parses the sysfs objects already?
I've looked for such a beast via Google, usenet, and various forums, but I haven't yet found anything intelligent or functional. So, before I carve one out, I figured I'd check here first.
Try this one:
from os import listdir
from os.path import isdir, isfile, islink, join, realpath, normpath
from keyword import iskeyword
_norm = lambda name: name + ('_' if iskeyword(name) else '')
def _denorm(name):
if name.endswith('_') and iskeyword(name[:-1]):
return name[:-1]
else:
return name
def _norm_path(path):
return normpath(realpath(path))
class SysFsObject(object):
__slots__ = ['_path', '__dict__']
#staticmethod
def __id_args__(path='/sys'):
return _norm_path(path)
def __init__(self, path='/sys'):
self._path = _norm_path(path)
if not self._path.startswith('/sys'):
raise RuntimeError("Using this on non-sysfs files is dangerous!")
self.__dict__.update(dict.fromkeys(_norm(i) for i in listdir(self._path)))
def __repr__(self):
return "<SysFsObject %s>" % self._path
def __setattr__(self, name, val):
if name.startswith('_'):
return object.__setattr__(self, name, val)
name = _denorm(name)
p = realpath(join(self._path, name))
if isfile(p):
file(p, 'w').write(str(val))
else:
raise RuntimeError
def __getattribute__(self, name):
if name.startswith('_'):
return object.__getattribute__(self, name)
name = _denorm(name)
p = realpath(join(self._path, name))
if isfile(p):
data = open(p, 'r').read()[:-1]
try:
return int(data)
except ValueError:
return data
elif isdir(p):
return SysFsObject(p)
It's not polished in any way, but IIRC it works :)
From filmor's answer, but with the int() casting removed:
from os import listdir
from os.path import isdir, isfile, islink, join, realpath, normpath
from keyword import iskeyword
_norm = lambda name: name + ('_' if iskeyword(name) else '')
def _denorm(name):
if name.endswith('_') and iskeyword(name[:-1]):
return name[:-1]
else:
return name
def _norm_path(path):
return normpath(realpath(path))
class SysFsObject(object):
__slots__ = ['_path', '__dict__']
#staticmethod
def __id_args__(path='/sys'):
return _norm_path(path)
def __init__(self, path='/sys'):
self._path = _norm_path(path)
if not self._path.startswith('/sys'):
raise RuntimeError("Using this on non-sysfs files is dangerous!")
self.__dict__.update(dict.fromkeys(_norm(i) for i in listdir(self._path)))
def __repr__(self):
return "<SysFsObject %s>" % self._path
def __setattr__(self, name, val):
if name.startswith('_'):
return object.__setattr__(self, name, val)
name = _denorm(name)
p = realpath(join(self._path, name))
if isfile(p):
file(p, 'w').write(val)
else:
raise RuntimeError
def __getattribute__(self, name):
if name.startswith('_'):
return object.__getattribute__(self, name)
name = _denorm(name)
p = realpath(join(self._path, name))
if isfile(p):
return open(p, 'r').read()[:-1]
elif isdir(p):
return SysFsObject(p)
Arbitrarily casting to int is unexpected and even dangerous. For example, if you were to use that code on any of the cpulist files prevalent in sysfs, a string such as "0-7" would always be returned on multi-processor systems. Then someday, someone uses your code on a single-core system and reading the exact same sysfs file that now contains "0" returns an int.
In other words, any function that calls that code and expects to receive the native data type of sysfs (strings) must explicitly cast to str().
Not really sure why you need something specific, they are all text files for the most part, you can just mess with them directly.
There aren't any python modules that does that as far as I know.