I have a python program with multiple modules. There is a module that contains code to initialize a UDP Interface to send messages. I would like this Interface to be used across all the modules. The only way I've thought of is passing the object to the other modules through a predefined function.
Main Module:
from mod1 import *
from mod2 import *
from Interfaces import *
class MainClass():
def __init__(self):
# Initialize Modules
self.m1 = Module1()
self.m2 = Module2()
self.intf = UdpInterface()
self._init_module_interfaces()
def _init_module_interfaces(self):
self.m1.InitIntf(self.intf)
self.m2.InitIntf(self.intf)
if __name__ == '__main__':
main = MainClass()
Submodule:
class Module1():
def __init__(self):
...
def InitIntf(self, intf):
self.intf = intf
Is there a better way to do this?
Related
I am trying to dynamically import modules and get it as global variable.
I am using maya 2020 python interpreter (Python 2.7)
I have a test module called "trigger_test_script.py" under "/home/arda.kutlu/Downloads/" folder.
When I dont import any custom class and run this:
###########################################################################[START]
import sys
import imp
class TestClass(object):
def __init__(self):
self.filePath = None
self.asName = None
def action(self):
exec("global %s" % self.asName, globals())
foo = "imp.load_source('%s', '/home/arda.kutlu/Downloads/trigger_test_script.py')" %self.asName
cmd = "{0}={1}".format(self.asName, foo)
exec(cmd, globals())
###########################################################################[END]
test = TestClass()
test.filePath = "/home/arda.kutlu/Downloads/trigger_test_script.py"
test.asName = "supposed_to_be_global"
test.action()
print(supposed_to_be_global)
I get the exact result that I want:
<module 'trigger_test_script' from '/home/arda.kutlu/Downloads/trigger_test_script.pyc'>
However, when I save TestClass (the part between hashes) into a file and import it like this:
import testClass
test = testClass.TestClass()
test.filePath = "/home/arda.kutlu/Downloads/trigger_test_script.py"
test.asName = "supposed_to_be_global"
test.action()
print(supposed_to_be_global)
the variable which 'supposed_to_be_global' is not becoming global and I get the NameError.
I always assumed that these two usages should return the same result but clearly I am missing something.
I appreciate any help, thanks.
I don't quite understand your last comment about having several modules with different action() methods is a problem. So ignoring that, here's how to make what's in your question work, The part in the hashes will work both in-line or if put in a separate module and imported.
###########################################################################[START]
import imp
class TestClass(object):
def __init__(self):
self.filePath = None
self.asName = None
def action(self):
foo = imp.load_source(self.asName, self.filePath)
return foo
###########################################################################[END]
#from testclass import TestClass
test = TestClass()
test.filePath = "/home/arda.kutlu/Downloads/trigger_test_script.py"
test.asName = "supposed_to_be_global"
supposed_to_be_global = test.action()
print(supposed_to_be_global)
I'm trying to access a initialized class in the main application from other modules but don't know how to do it.
Background: i want to update a dataframe with data during the whole execution in the main application.
I have to following application structure (this is an simplified version of the code in my application):
constraints
- test_function.py (separate module which should be able to update the initialized class in the main app)
functions
- helper.py (the class which contains the dataframe logic)
main.py (my main application code)
main.py:
import functions.helper
gotstats = functions.helper.GotStats()
gotstats.add(solver_stat='This is a test')
gotstats.add(type='This is a test Type!')
print(gotstats.result())
import constraints.test_function
constraints.test_function.test_function()
helper.py:
class GotStats(object):
def __init__(self):
print('init() called')
import pandas as pd
self.df_got_statistieken = pd.DataFrame(columns=['SOLVER_STAT','TYPE','WAARDE','WAARDE_TEKST','LOWER_BOUND','UPPER_BOUND','OPTIMALISATIE_ID','GUROBI_ID'])
def add(self,solver_stat=None,type=None,waarde=None,waarde_tekst=None,lower_bound=None,upper_bound=None,optimalisatie_id=None,gurobi_id=None):
print('add() called')
self.df_got_statistieken = self.df_got_statistieken.append({'SOLVER_STAT': solver_stat,'TYPE': type, 'WAARDE': waarde, 'OPTIMALISATIE_ID': optimalisatie_id, 'GUROBI_ID': gurobi_id}, ignore_index=True)
def result(self):
print('result() called')
df_got_statistieken = self.df_got_statistieken
return df_got_statistieken
test_function.py:
import sys, os
sys.path.append(os.getcwd())
def test_function():
import functions.helper
gotstats = functions.helper.GotStats()
gotstats.add(solver_stat='This is a test from the seperate module')
gotstats.add(type='This is a test type from the seperate module!')
print(gotstats.result())
if __name__ == "__main__":
test_function()
In main i initialize the class with "gotstats = functions.helper.GotStats()". After that i can correctly use its functions and add dataframe rows by using the add function.
I would like that test_function() is able to add dataframe rows to that same object but i don't know how to do this (in current code the test_function.py just creates a new class in it's local namespace which i don't want). Do i need to extend the class object with an function to get the active one (like logging.getLogger(name))?
Any help in the right direction would be appreciated.
Make your test_function accept the instance as a parameter and pass it to the function when you call it:
main.py:
import functions.helper
from constraints.test_function import test_function
gotstats = functions.helper.GotStats()
gotstats.add(solver_stat='This is a test')
gotstats.add(type='This is a test Type!')
print(gotstats.result())
test_function(gotstats)
test_function.py:
import sys, os
import functions.helper
sys.path.append(os.getcwd())
def test_function(gotstats=None):
if gotstats is None:
gotstats = functions.helper.GotStats()
gotstats.add(solver_stat='This is a test from the seperate module')
gotstats.add(type='This is a test type from the seperate module!')
print(gotstats.result())
The doc of multiprocessing.set_start_method note that:
Note that this should be called at most once, and it should be protected inside the if name == 'main' clause of the main module.
However, if I put multiprocessing.set_start_method('spawn') in a pytest module fixture, I do not know will does it work perfectly.
Indeed, as stated in the documentation, you will be in trouble if you try to call multiprocessing.set_start_method() from multiple unit tests functions. Moreover, this will affect your whole program and may interoperate badly with the entire tests suit.
However, there exists a workaround which is described in the documentation too:
Alternatively, you can use get_context() to obtain a context
object. Context objects have the same API as the multiprocessing
module, and allow one to use multiple start methods in the same
program.
import multiprocessing as mp
def foo(q):
q.put('hello')
if __name__ == '__main__':
ctx = mp.get_context('spawn')
q = ctx.Queue()
p = ctx.Process(target=foo, args=(q,))
p.start()
print(q.get())
p.join() ```
This method can be used per-test to avoid compatibility issues discussed. It can be combined with "monkeypatching" or "mocking" to test your class with different start methods:
# my_class.py
import multiprocessing
class MyClass:
def __init__(self):
self._queue = multiprocessing.Queue()
def process(self, x):
# Very simplified example of a method using a multiprocessing Queue
self._queue.put(x)
return self._queue.get()
# tests/test_my_class.py
import multiprocessing
import my_class
def test_spawn(monkeypatch):
ctx = multiprocessing.get_context('spawn')
monkeypatch.setattr(my_class.multiprocessing, "Queue", ctx.Queue)
obj = my_class.MyClass()
assert obj.process(6) == 6
def test_fork(monkeypatch):
ctx = multiprocessing.get_context('fork')
monkeypatch.setattr(my_class.multiprocessing, "Queue", ctx.Queue)
obj = my_class.MyClass()
assert obj.process(6) == 6
If you really do always want to use the same start method, you can set it in a session-scoped fixture in the file conftest.py in the root of your source tree. E.g.
# conftest.py
import multiprocessing
import pytest
#pytest.fixture(scope="session", autouse=True)
def always_spawn():
multiprocessing.set_start_method("spawn")
I know this is possible using thread local in python but for some reason I am unable to find the exact syntax to achieve this. I have following sample code to test this but this is not working -
module1.py
import threading
def print_value():
local = threading.local() // what should I put here? this is actually creating a new thread local instead of returning a thread local created in main() method of module2.
print local.name;
module2.py
import module1
if __name__ == '__main__':
local = threading.local()
local.name = 'Shailendra'
module1.print_value()
Edit1 - Shared data should be available to only a thread which will invoke these functions and not to all the threads in the system. One example is request id in a web application.
In module 1, define a global variable that is a threading.local
module1
import threading
shared = threading.local()
def print_value():
print shared.name
module2
import module1
if __name__ == '__main__':
module1.shared.name = 'Shailendra'
module1.print_value()
If it's within the same process, why not use a singleton?
import functools
def singleton(cls):
''' Use class as singleton. '''
cls.__new_original__ = cls.__new__
#functools.wraps(cls.__new__)
def singleton_new(cls, *args, **kw):
it = cls.__dict__.get('__it__')
if it is not None:
return it
cls.__it__ = it = cls.__new_original__(cls, *args, **kw)
it.__init_original__(*args, **kw)
return it
cls.__new__ = singleton_new
cls.__init_original__ = cls.__init__
cls.__init__ = object.__init__
return cls
#singleton
class Bucket(object):
pass
Now just import Bucket and bind some data to it
from mymodule import Bucket
b = Bucket()
b.name = 'bob'
b.loves_cats = True
I am working on a GUI that needs to do some heavy computation in the background and then update the GUI when the calculation is complete. The multiprocessing module seems to be a good solution since I can use the *apply_async* method to specify the target and callback function. The callback function is used to update the GUI with the result. However I am having difficulty when trying to combine multiprocessing with a dynamically loaded module as in the following code. The error message is ImportError: No module named calc.
Is the error due to the fact that multiprocessing just doesn't work with dynamically loaded modules? If not, are there any ideas on a better approach?
from PySide.QtCore import *
from PySide.QtGui import *
import multiprocessing
import time
import sys
import os
import logging
import imp
PluginFolder = "plugins"
plugins = {}
def f(x):
y = x*x
time.sleep(2) #Simulate processing time.
return y
def load_plugin(name):
'''Load the python module 'name'
'''
location = os.path.join('.', PluginFolder)
info = imp.find_module(name, [location])
plugin = {"name": name, "info": info}
plugins[name] = imp.load_module(name, *plugin["info"])
class MainWindow(QMainWindow):
def __init__(self):
super(MainWindow, self).__init__()
self.pool = multiprocessing.Pool()
load_plugin('calc') #load ./plugins/calc.py
button1 = QPushButton('Calculate', self)
button1.clicked.connect(self.calculate)
button2 = QPushButton('Test', self)
button2.clicked.connect(self.run_test)
self.text = QTextEdit()
vbox1 = QVBoxLayout()
vbox1.addWidget(button1)
vbox1.addWidget(button2)
vbox1.addWidget(self.text)
myframe = QFrame()
myframe.setLayout(vbox1)
self.setCentralWidget(myframe)
self.show()
self.raise_()
def calculate(self):
#self.pool.apply_async(f, [10], callback=self.update_gui) #This works
#result = plugins['calc'].f(10) #this works
#self.update_gui(result)
self.pool.apply_async(plugins['calc'].f, [10], callback=self.update_gui) #This doesn't
def update_gui(self, result):
self.text.append('Calculation complete. Result = %d\n' % result)
def run_test(self):
self.text.append('Testing\n')
if __name__ == '__main__':
app = QApplication(sys.argv)
gui = MainWindow()
app.exec_()
In ./plugins/calc.py, the function f is defined as in the above code.
This doesn't work since you're loading your calc module as a top-level module. Since no module calc is present in your sys.path or in current directory, it can't be found by import statement. Replacing import statement with following will do the trick:
plugins[name] = imp.load_module('{}.{}'.format(PluginFolder, name),
*plugin["info"])
For a plugin.calc being importable, plugins has to be a python module, i.e. contain a __init__.py file.
Any import <module> statement in your plugins files, such as in plugins/calc.py, will lead to a warning,
RuntimeWarning: Parent module 'plugins' not found while handling absolute import import <module>
The reason is that import process looks if parent module contains <module>, and while inside calc.py, can't find parent plugins module. You can rid of the error explicetely specifying plugins module location with, for example import plugins statement in main code.