how can i just initialize or is even possible to just initialize just on of these objects?
startsimulation = {}
startsimulation['obj'] = StartSimulation(client_socket)
startsimulation['threadSimulation'] = Thread(target=startsimulation['obj'].start_simulation, daemon=True)
startreading = {}
startreading['obj'] = StartReading(client_socket)
startreading['threadReading'] = Thread(target=startreading['obj'].start_reading, daemon=True)
because then in my code with the two initializations i get things like this ps:it's not wrong but it's not efficient
startsimulation['obj'].client = client_socket
startsimulation['obj'].send_handler_connect()
startsimulation['obj'].is_connected = True
startreading['obj'].client = client_socket
startreading['obj'].send_handler_connect()
startreading['obj'].is_connected = True
I would suggest using a class, like that:
class Wrapper():
def __init__(self, obj):
self.obj = obj
self.thread = Thread(target=obj.start_reading, daemon=True)
def send_handler_connect(self):
self.obj.send_handler_connect()
obj1 = Wrapper(StartSimulation(client_socket))
obj2 = Wrapper(StartReading(client_socket))
I assume you might expect something like
a = b = 5
But you can not apply such assignment chaining into your application, as both startsimulation and startreading clearly referring to different instances, so assigning threads to them must be done with separate Thread calls.
But later on, you can simplify it to:
startreading['obj'].is_connected = startsimulation['obj'].is_connected = True
Or, if your aim is not to be explicit but brevity, you might handle this with function.
Related
I have created a class where all the functions work and can be put into different processes and edited. All of the functions work except the add value function. This is my class:
class data(object):
def __init__(self):
Manager = multiprocessing.Manager()
self.lock = multiprocessing.Lock()
self.changeAmt = 0
self.jump = multiprocessing.Value(ctypes.c_float, 0.02)
self.has_best = multiprocessing.Value(ctypes.c_bool, False)
self.best_list = Manager.list()
self.command_original = 'start sequence'
self.command = multiprocessing.Value(ctypes.c_wchar_p, self.command_original)
self.get_best = multiprocessing.Value(ctypes.c_bool, False)
self.done = multiprocessing.Value(ctypes.c_bool, False)
def get(self, name, model):
self.model = model
if model != 'Array':
with multiprocessing.Lock():
exec('self.now = self.'+name)
return self.now.value
if self.model == 'Array':
exec('self.now = self.'+name)
return self.now
def edit(self, name, model, changeAmt):
self.changeAmt = changeAmt
if model != 'Array':
with multiprocessing.Lock():
exec('self.'+name+'.value = self.changeAmt')
if model == 'Array':
for i in range(len(changeAmt)):
exec('self.'+name+'.append(changeAmt[i])')
def addValue(self, name, value):
self.now = value
exec('self.'+name+' = self.now')
Which can then be used correctly with these lines of code in the main function:
ult = data()
p1 = Process(target = try2, args=(ult,))
p1.start()
p2 = Process(target = nextTry, args=(ult,))
p2.start()
p1.join()
p2.join()
When I go to use the addValue function in my code, the first process works great and can return the value when the get function is called. However, when I go to call the get function for the second process it fails. One of my ideas on a solution is to use the multiprocessing Queue method and have both processes add the value. Although, I have a feeling even if both functions have added the value it will not change if one of them changes the value. Is what I am trying to accomplish do-able, or should I just try to initialize all of my variables from the start?
I found that the only way to do this is to use a multiprocessing dictionary, I have posted the working example of an initializer and creator below.
def __init__(self):
Manager = multiprocessing.Manager()
self.allDict = Manager.dict()
self.allDict['status'] = 'nothing'
self.allDict['done'] = False
self.allDict['changeAmt'] = 0
self.allDict['command'] = 'start sequence'
self.allDict['get_best'] = False
then if you wanted to add a regular value:
def addValue(self, name, value):
self.allDict[name] = value
This honestly was probably one of the original reasons for the manager dictionary. Anyways, I hope if anyone needs this information and stumbles across this post it helps.
Assume I have two classes that use threads
class foo(threading.Thread):
def __init__(self):
threading.Thread.__init__(self,name="foo=>bar")
self.var1 = {}
def run(self):
while True
value, name = getvalue() // name is an string
self.var1[name] = value
bar(self)
class bar(threading.Thread):
def __init__(self,fooInstance):
threading.Thread.__init__(self,name="bar")
def run(self):
while True
arg = myfunction() // somefunction (not shown for simplicity)
val = myOtherfunction(fooInstance.var1[arg]) //other function
print(val)
f = foo()
f.start()
The variable var1 in foo will change over time and bar needs to be aware of these changes. It makes sense to me, but I wonder if there is something fundamental here that could fail eventually. is this correct in python?
The actual sharing part is the same question as "how do I share a value with another object?" without threads, and all the same solutions will work.
For example. you're already passing the foo instance into the bar initializer, so just get it from there:
class bar(threading.Thread):
def __init__(self,fooInstance):
threading.Thread.__init__(self,name="bar")
self.var1 = fooInstance.var1
But is this thread-safe?
Well, yes, but only because you never actually start the background thread. But I assume in your real code, you're going to have two threads running at the same time, both accessing that var1 value. In which case it's not thread-safe without some kind of synchronization. For example:
class foo(threading.Thread):
def __init__(self):
threading.Thread.__init__(self,name="foo=>bar")
self.var1 = {}
self.var1lock = threading.Lock()
class bar(threading.Thread):
def __init__(self,fooInstance):
threading.Thread.__init__(self,name="bar")
self.var1 = fooInstance.var1
self.var1lock = fooInstance.var1lock
And now, instead of this:
self.var1[name] = value
… you do this:
with self.var1lock:
self.var1[name] = value
And likewise, instead of this:
val = myOtherfunction(fooInstance.var1[arg]) //other function
… you do this:
with self.var1lock:
var1arg = var1[arg]
val = myOtherfunction(var1arg)
Or… as it turns out, in CPython, updating a value for a single key in a dict (only a builtin dict, not a subclass or custom mapping class!) has always been atomic, and probably always will be. If you want to rely on that fact, you can. But I'd only do that if the lock turned out to be a significant performance issue. And I'd comment every use of it to make it clear, too.
If you'd rather pass values instead of share them, the usual answer is queue.Queue or one of its relatives.
But this requires a redesign of your program. For example, maybe you want to pass each new/changed key-value pair over the queue. That would go something like this:
class foo(threading.Thread):
def __init__(self):
threading.Thread.__init__(self,name="foo=>bar")
self.var1 = {}
self.q = queue.Queue()
def run(self):
b = bar(self)
b.start()
while True:
value, name = getvalue() // name is an string
self.var1[name] = value
self.q.put((name, value))
class bar(threading.Thread):
def __init__(self,fooInstance):
threading.Thread.__init__(self,name="bar")
self.var1 = copy.deepcopy(fooInstance.var1)
self.q = fooInstance.q
def _checkq(self):
while True:
try:
key, val = self.q.get_nowait()
except queue.Empty:
break
else:
self.var1[key] = val
def run(self):
while True:
self._checkq()
arg = myfunction() // somefunction (not shown for simplicity)
val = myOtherfunction(fooInstance.var1[arg]) //other function
print(val)
So the situation is that I have multiple methods, which might be threaded simaltenously, but all need their own lock
against being re-threaded until they have run. They are established by initialising a class with some dataprocessing options:
class InfrequentDataDaemon(object): pass
class FrequentDataDaemon(object): pass
def addMethod(name):
def wrapper(f):
setattr(processor, f.__name__, staticmethod(f))
return f
return wrapper
class DataProcessors(object):
lock = threading.Lock()
def __init__(self, options):
self.common_settings = options['common_settings']
self.data_processing_configurations = options['data_processing_configurations'] #Configs for each processing method
self.data_processing_types = options['data_processing_types']
self.Data_Processsing_Functions ={}
#I __init__ each processing method as a seperate function so that it can be locked
for type in options['data_processing_types']:
def bindFunction1(name):
def func1(self, data=None, lock=None):
config = self.data_processing_configurations[data['type']] #I get the right config for the datatype
with lock:
FetchDataBaseStuff(data['type'])
#I don't want this to be run more than once at a time per DataProcessing Type
# But it's fine if multiple DoSomethings run at once, as long as each DataType is different!
DoSomething(data, config)
WriteToDataBase(data['type'])
func1.__name__ = "Processing_for_{}".format(type)
self.Data_Processing_Functions[func1.__name__] = func1 #Add this function to the Dictinary object
bindFunction1(type)
#Then I add some methods to a daemon that are going to check if our Dataprocessors need to be called
def fast_process_types(data):
if not example_condition is True: return
if not data['type'] in self.data_processing_types: return #Check that we are doing something with this type of data
threading.Thread(target=self.Data_Processing_Functions["Processing_for_{}".format(data['type'])], args=(self,data, lock)).start()
def slow_process_types(data):
if not some_other_condition is True: return
if not data['type'] in self.data_processing_types: return #Check that we are doing something with this type of data
threading.Thread(target=self.Data_Processing_Functions["Processing_for_{}".format(data['type'])], args=(self,data, lock)).start()
addMethod(InfrequentDataDaemon)(slow_process_types)
addMethod(FrequentDataDaemon)(fast_process_types)
The idea is to lock each method in
DataProcessors.Data_Processing_Functions - so that each method is only accessed by one thread at a time (and the rest of the threads for the same method are queued). How does Locking need to be set up to achieve this effect?
I'm not sure I completely follow what you're trying to do here, but could you just create a separate threading.Lock object for each type?
class DataProcessors(object):
def __init__(self, options):
self.common_settings = options['common_settings']
self.data_processing_configurations = options['data_processing_configurations'] #Configs for each processing method
self.data_processing_types = options['data_processing_types']
self.Data_Processsing_Functions ={}
self.locks = {}
#I __init__ each processing method as a seperate function so that it can be locked
for type in options['data_processing_types']:
self.locks[type] = threading.Lock()
def bindFunction1(name):
def func1(self, data=None):
config = self.data_processing_configurations[data['type']] #I get the right config for the datatype
with self.locks[data['type']]:
FetchDataBaseStuff(data['type'])
DoSomething(data, config)
WriteToDataBase(data['type'])
func1.__name__ = "Processing_for_{}".format(type)
self.Data_Processing_Functions[func1.__name__] = func1 #Add this function to the Dictinary object
bindFunction1(type)
#Then I add some methods to a daemon that are going to check if our Dataprocessors need to be called
def fast_process_types(data):
if not example_condition is True: return
if not data['type'] in self.data_processing_types: return #Check that we are doing something with this type of data
threading.Thread(target=self.Data_Processing_Functions["Processing_for_{}".format(data['type'])], args=(self,data)).start()
def slow_process_types(data):
if not some_other_condition is True: return
if not data['type'] in self.data_processing_types: return #Check that we are doing something with this type of data
threading.Thread(target=self.Data_Processing_Functions["Processing_for_{}".format(data['type'])], args=(self,data)).start()
addMethod(InfrequentDataDaemon)(slow_process_types)
addMethod(FrequentDataDaemon)(fast_process_types)
I am maintaining a little library of useful functions for interacting with my company's APIs and I have come across (what I think is) a neat question that I can't find the answer to.
I frequently have to request large amounts of data from an API, so I do something like:
class Client(object):
def __init__(self):
self.data = []
def get_data(self, offset = 0):
done = False
while not done:
data = get_more_starting_at(offset)
self.data.extend(data)
offset += 1
if not data:
done = True
This works fine and allows me to restart the retrieval where I left off if something goes horribly wrong. However, since python functions are just regular objects, we can do stuff like:
def yo():
yo.hi = "yo!"
return None
and then we can interrogate yo about its properties later, like:
yo.hi => "yo!"
my question is: Can I rewrite my class-based example to pin the data to the function itself, without referring to the function by name. I know I can do this by:
def get_data(offset=0):
done = False
get_data.data = []
while not done:
data = get_more_starting_from(offset)
get_data.data.extend(data)
offset += 1
if not data:
done = True
return get_data.data
but I would like to do something like:
def get_data(offset=0):
done = False
self.data = [] # <===== this is the bit I can't figure out
while not done:
data = get_more_starting_from(offset)
self.data.extend(data) # <====== also this!
offset += 1
if not data:
done = True
return self.data # <======== want to refer to the "current" object
Is it possible to refer to the "current" object by anything other than its name?
Something like "this", "self", or "memememe!" is what I'm looking for.
I don't understand why you want to do this, but it's what a fixed point combinator allows you to do:
import functools
def Y(f):
#functools.wraps(f)
def Yf(*args):
return inner(*args)
inner = f(Yf)
return Yf
#Y
def get_data(f):
def inner_get_data(*args):
# This is your real get data function
# define it as normal
# but just refer to it as 'f' inside itself
print 'setting get_data.foo to', args
f.foo = args
return inner_get_data
get_data(1, 2, 3)
print get_data.foo
So you call get_data as normal, and it "magically" knows that f means itself.
You could do this, but (a) the data is not per-function-invocation, but per function (b) it's much easier to achieve this sort of thing with a class.
If you had to do it, you might do something like this:
def ybother(a,b,c,yrselflambda = lambda: ybother):
yrself = yrselflambda()
#other stuff
The lambda is necessary, because you need to delay evaluation of the term ybother until something has been bound to it.
Alternatively, and increasingly pointlessly:
from functools import partial
def ybother(a,b,c,yrself=None):
#whatever
yrself.data = [] # this will blow up if the default argument is used
#more stuff
bothered = partial(ybother, yrself=ybother)
Or:
def unbothered(a,b,c):
def inbothered(yrself):
#whatever
yrself.data = []
return inbothered, inbothered(inbothered)
This last version gives you a different function object each time, which you might like.
There are almost certainly introspective tricks to do this, but they are even less worthwhile.
Not sure what doing it like this gains you, but what about using a decorator.
import functools
def add_self(f):
#functools.wraps(f)
def wrapper(*args,**kwargs):
if not getattr(f, 'content', None):
f.content = []
return f(f, *args, **kwargs)
return wrapper
#add_self
def example(self, arg1):
self.content.append(arg1)
print self.content
example(1)
example(2)
example(3)
OUTPUT
[1]
[1, 2]
[1, 2, 3]
I've got a piece of code which contains a for loop to draw things from an XML file;
for evoNode in node.getElementsByTagName('evolution'):
evoName = getText(evoNode.getElementsByTagName( "type")[0].childNodes)
evoId = getText(evoNode.getElementsByTagName( "typeid")[0].childNodes)
evoLevel = getText(evoNode.getElementsByTagName( "level")[0].childNodes)
evoCost = getText(evoNode.getElementsByTagName("costperlevel")[0].childNodes)
evolutions.append("%s x %s" % (evoLevel, evoName))
Currently it outputs into a list called evolutions as it says in the last line of that code, for this and several other for functions with very similar functionality I need it to output into a class instead.
class evolutions:
def __init__(self, evoName, evoId, evoLevel, evoCost)
self.evoName = evoName
self.evoId = evoId
self.evoLevel = evoLevel
self.evoCost = evoCost
How to create a series of instances of this class, each of which is a response from that for function? Or what is a core practical solution? This one doesn't really need the class but one of the others really does.
A list comprehension might be a little cleaner. I'd also move the parsing logic to the constructor to clean up the implemenation:
class Evolution:
def __init__(self, node):
self.node = node
self.type = property("type")
self.typeid = property("typeid")
self.level = property("level")
self.costperlevel = property("costperlevel")
def property(self, prop):
return getText(self.node.getElementsByTagName(prop)[0].childNodes)
evolutionList = [Evolution(evoNode) for evoNode in node.getElementsByTagName('evolution')]
Alternatively, you could use map:
evolutionList = map(Evolution, node.getElementsByTagName('evolution'))
for evoNode in node.getElementsByTagName('evolution'):
evoName = getText(evoNode.getElementsByTagName("type")[0].childNodes)
evoId = getText(evoNode.getElementsByTagName("typeid")[0].childNodes)
evoLevel = getText(evoNode.getElementsByTagName("level")[0].childNodes)
evoCost = getText(evoNode.getElementsByTagName("costperlevel")[0].childNodes)
temporaryEvo = Evolutions(evoName, evoId, evoLevel, evoCost)
evolutionList.append(temporaryEvo)
# Or you can go with the 1 liner
evolutionList.append(Evolutions(evoName, evoId, evoLevel, evoCost))
I renamed your list because it shared the same name as your class and was confusing.