Python Running Multiple Locks across Multiple Threads - python

So the situation is that I have multiple methods, which might be threaded simaltenously, but all need their own lock
against being re-threaded until they have run. They are established by initialising a class with some dataprocessing options:
class InfrequentDataDaemon(object): pass
class FrequentDataDaemon(object): pass
def addMethod(name):
def wrapper(f):
setattr(processor, f.__name__, staticmethod(f))
return f
return wrapper
class DataProcessors(object):
lock = threading.Lock()
def __init__(self, options):
self.common_settings = options['common_settings']
self.data_processing_configurations = options['data_processing_configurations'] #Configs for each processing method
self.data_processing_types = options['data_processing_types']
self.Data_Processsing_Functions ={}
#I __init__ each processing method as a seperate function so that it can be locked
for type in options['data_processing_types']:
def bindFunction1(name):
def func1(self, data=None, lock=None):
config = self.data_processing_configurations[data['type']] #I get the right config for the datatype
with lock:
FetchDataBaseStuff(data['type'])
#I don't want this to be run more than once at a time per DataProcessing Type
# But it's fine if multiple DoSomethings run at once, as long as each DataType is different!
DoSomething(data, config)
WriteToDataBase(data['type'])
func1.__name__ = "Processing_for_{}".format(type)
self.Data_Processing_Functions[func1.__name__] = func1 #Add this function to the Dictinary object
bindFunction1(type)
#Then I add some methods to a daemon that are going to check if our Dataprocessors need to be called
def fast_process_types(data):
if not example_condition is True: return
if not data['type'] in self.data_processing_types: return #Check that we are doing something with this type of data
threading.Thread(target=self.Data_Processing_Functions["Processing_for_{}".format(data['type'])], args=(self,data, lock)).start()
def slow_process_types(data):
if not some_other_condition is True: return
if not data['type'] in self.data_processing_types: return #Check that we are doing something with this type of data
threading.Thread(target=self.Data_Processing_Functions["Processing_for_{}".format(data['type'])], args=(self,data, lock)).start()
addMethod(InfrequentDataDaemon)(slow_process_types)
addMethod(FrequentDataDaemon)(fast_process_types)
The idea is to lock each method in
DataProcessors.Data_Processing_Functions - so that each method is only accessed by one thread at a time (and the rest of the threads for the same method are queued). How does Locking need to be set up to achieve this effect?

I'm not sure I completely follow what you're trying to do here, but could you just create a separate threading.Lock object for each type?
class DataProcessors(object):
def __init__(self, options):
self.common_settings = options['common_settings']
self.data_processing_configurations = options['data_processing_configurations'] #Configs for each processing method
self.data_processing_types = options['data_processing_types']
self.Data_Processsing_Functions ={}
self.locks = {}
#I __init__ each processing method as a seperate function so that it can be locked
for type in options['data_processing_types']:
self.locks[type] = threading.Lock()
def bindFunction1(name):
def func1(self, data=None):
config = self.data_processing_configurations[data['type']] #I get the right config for the datatype
with self.locks[data['type']]:
FetchDataBaseStuff(data['type'])
DoSomething(data, config)
WriteToDataBase(data['type'])
func1.__name__ = "Processing_for_{}".format(type)
self.Data_Processing_Functions[func1.__name__] = func1 #Add this function to the Dictinary object
bindFunction1(type)
#Then I add some methods to a daemon that are going to check if our Dataprocessors need to be called
def fast_process_types(data):
if not example_condition is True: return
if not data['type'] in self.data_processing_types: return #Check that we are doing something with this type of data
threading.Thread(target=self.Data_Processing_Functions["Processing_for_{}".format(data['type'])], args=(self,data)).start()
def slow_process_types(data):
if not some_other_condition is True: return
if not data['type'] in self.data_processing_types: return #Check that we are doing something with this type of data
threading.Thread(target=self.Data_Processing_Functions["Processing_for_{}".format(data['type'])], args=(self,data)).start()
addMethod(InfrequentDataDaemon)(slow_process_types)
addMethod(FrequentDataDaemon)(fast_process_types)

Related

Passing variables between classes in different threads?

Assume I have two classes that use threads
class foo(threading.Thread):
def __init__(self):
threading.Thread.__init__(self,name="foo=>bar")
self.var1 = {}
def run(self):
while True
value, name = getvalue() // name is an string
self.var1[name] = value
bar(self)
class bar(threading.Thread):
def __init__(self,fooInstance):
threading.Thread.__init__(self,name="bar")
def run(self):
while True
arg = myfunction() // somefunction (not shown for simplicity)
val = myOtherfunction(fooInstance.var1[arg]) //other function
print(val)
f = foo()
f.start()
The variable var1 in foo will change over time and bar needs to be aware of these changes. It makes sense to me, but I wonder if there is something fundamental here that could fail eventually. is this correct in python?
The actual sharing part is the same question as "how do I share a value with another object?" without threads, and all the same solutions will work.
For example. you're already passing the foo instance into the bar initializer, so just get it from there:
class bar(threading.Thread):
def __init__(self,fooInstance):
threading.Thread.__init__(self,name="bar")
self.var1 = fooInstance.var1
But is this thread-safe?
Well, yes, but only because you never actually start the background thread. But I assume in your real code, you're going to have two threads running at the same time, both accessing that var1 value. In which case it's not thread-safe without some kind of synchronization. For example:
class foo(threading.Thread):
def __init__(self):
threading.Thread.__init__(self,name="foo=>bar")
self.var1 = {}
self.var1lock = threading.Lock()
class bar(threading.Thread):
def __init__(self,fooInstance):
threading.Thread.__init__(self,name="bar")
self.var1 = fooInstance.var1
self.var1lock = fooInstance.var1lock
And now, instead of this:
self.var1[name] = value
… you do this:
with self.var1lock:
self.var1[name] = value
And likewise, instead of this:
val = myOtherfunction(fooInstance.var1[arg]) //other function
… you do this:
with self.var1lock:
var1arg = var1[arg]
val = myOtherfunction(var1arg)
Or… as it turns out, in CPython, updating a value for a single key in a dict (only a builtin dict, not a subclass or custom mapping class!) has always been atomic, and probably always will be. If you want to rely on that fact, you can. But I'd only do that if the lock turned out to be a significant performance issue. And I'd comment every use of it to make it clear, too.
If you'd rather pass values instead of share them, the usual answer is queue.Queue or one of its relatives.
But this requires a redesign of your program. For example, maybe you want to pass each new/changed key-value pair over the queue. That would go something like this:
class foo(threading.Thread):
def __init__(self):
threading.Thread.__init__(self,name="foo=>bar")
self.var1 = {}
self.q = queue.Queue()
def run(self):
b = bar(self)
b.start()
while True:
value, name = getvalue() // name is an string
self.var1[name] = value
self.q.put((name, value))
class bar(threading.Thread):
def __init__(self,fooInstance):
threading.Thread.__init__(self,name="bar")
self.var1 = copy.deepcopy(fooInstance.var1)
self.q = fooInstance.q
def _checkq(self):
while True:
try:
key, val = self.q.get_nowait()
except queue.Empty:
break
else:
self.var1[key] = val
def run(self):
while True:
self._checkq()
arg = myfunction() // somefunction (not shown for simplicity)
val = myOtherfunction(fooInstance.var1[arg]) //other function
print(val)

Python : Iterate a list which is shared by several method in a class

I'm using tornado.websocket, where class-methods are overrides of the WebSocketHandler methods. Anyway, my code look like that:
class SocketHandler(tornado.websocket.WebSocketHandler):
current_ninja_pool = enumerate(return_dependency_lvl_0())
current_ninja = next(current_ninja_pool)
file_to_upload = []
def check_origin(self, origin):
return True
def open(self):
logging.info("A client connected.")
self.run()
def run(self):
if condition:
do_this()
else:
do_that()
self.current_ninja = next(self.current_ninja_pool)
self.run()
def on_message(self, message):
do_a_lot_of_stuff()
if message == 'next one':
self.current_ninja = next(self.current_ninja_pool)
def on_close(self):
logging.info("A client disconnected")
So, what I want is to be able to iterate my enumerate, so that every element can be processed in the methods run or on_message depending on how my client-websocket will answer. The problem is that I want to iterate under particular conditions, and I don't have a clue on how to do this. Since I'm not very familiar with the way you manipulate class- and instance-variables, I'm probably missing a point here.
Thank you
You need an iterator. Luckily, enumerate already returns an iterator; you just need to access that, rather than storing the current item.
I also suspect that current_ninja_pool should be an instance variable, not a class one (which would be shared across all instances of the class).
class SocketHandler(tornado.websocket.WebSocketHandler):
def __init__(self, *args, **kwargs)
self.current_ninja_pool = enumerate(return_dependency_lvl_0())
file_to_upload = []
def run(self):
item = next(self.current_ninja_pool)
do_something_with(item)

Threading, multiprocessing shared memory and asyncio

I am having trouble implementing the following scheme :
class A:
def __init__(self):
self.content = []
self.current_len = 0
def __len__(self):
return self.current_len
def update(self, new_content):
self.content.append(new_content)
self.current_len += 1
class B:
def __init__(self, id):
self.id = id
And I also have these 2 functions that will be called later in the main :
async def do_stuff(first_var, second_var):
""" this function is ideally called from the main in another
process. Also, first_var and second_var are not modified so it
would be nice if they could be given by reference without
having to copy them """
### used for first call
yield None
while len(first_var) < CERTAIN_NUMBER:
time.sleep(10)
while True:
## do stuff
if condition_met:
yield new_second_var ## which is a new instance of B
## continue doing stuff
def do_other_stuff(first_var, second_var):
while True:
queue = multiprocessing.JoinableQueue()
results = multiprocessing.Queue()
### do stuff
first_var.update(results)
The main looks like this at the moment :
first_var = A()
second_var = B()
while True:
async for new_second_var in do_stuff(first_var, second_var):
if new_second_var:
## stop the do_other_stuff that is currently running
## to re-launch it with the updated new_var
do_other_stuff(first_var, new_second_var)
else: ## used for the first call
do_other_stuff(first_var, second_var)
Here are my questions :
Is there a better solution to make this scheme work?
How can I implement the "stopping" part since there is a while True loop that fills first_var by reference?
Will the instance of A (first_var) be passed by reference to do_stuff if first_var doesn't get modified inside it?
Is it even possible to have an asynchronous generator in another process?
Is it even possible at all?
This is using Python 3.6 for the async generators.
I hope this is somewhat clear! Thanks a lot!

How can I split a long function into separate steps while maintaining the relationship between said steps?

I have a very long function func which takes a browser handle and performs a bunch of requests and reads a bunch of responses in a specific order:
def func(browser):
# make sure we are logged in otherwise log in
# make request to /search and check that the page has loaded
# fill form in /search and submit it
# read table of response and return the result as list of objects
Each operation require a large amount of code due to the complexity of the DOM and they tend to grow really fast.
What would be the best way to refactor this function into smaller components so that the following properties still hold:
the execution flow of the operations and/or their preconditions is guaranteed just like in the current version
the preconditions are not checked with asserts against the state, as this is a very costly operation
func can be called multiple times on the browser
?
Just wrap the three helper methods in a class, and track which methods are allowed to run in an instance.
class Helper(object):
def __init__(self):
self.a = True
self.b = False
self.c = False
def funcA(self):
if not self.A:
raise Error("Cannot run funcA now")
# do stuff here
self.a = False
self.b = True
return whatever
def funcB(self):
if not self.B:
raise Error("Cannot run funcB now")
# do stuff here
self.b = False
self.c = True
return whatever
def funcC(self):
if not self.C:
raise Error("Cannot run funcC now")
# do stuff here
self.c = False
self.a = True
return whatever
def func(...):
h = Helper()
h.funcA()
h.funcB()
h.funcC()
# etc
The only way to call a method is if its flag is true, and each method clears its own flag and sets the next method's flag before exiting. As long as you don't touch h.a et al. directly, this ensures that each method can only be called in the proper order.
Alternately, you can use a single flag that is a reference to the function currently allowed to run.
class Helper(object):
def __init__(self):
self.allowed = self.funcA
def funcA(self):
if self.allowed is not self.funcA:
raise Error("Cannot run funcA now")
# do stuff
self.allowed = self.funcB
return whatever
# etc
Here's the solution I came up with. I used a decorator (closely related to the one in this blog post) which only allows for a function to be called once.
def call_only_once(func):
def new_func(*args, **kwargs):
if not new_func._called:
try:
return func(*args, **kwargs)
finally:
new_func._called = True
else:
raise Exception("Already called this once.")
new_func._called = False
return new_func
#call_only_once
def stateA():
print 'Calling stateA only this time'
#call_only_once
def stateB():
print 'Calling stateB only this time'
#call_only_once
def stateC():
print 'Calling stateC only this time'
def state():
stateA()
stateB()
stateC()
if __name__ == "__main__":
state()
You'll see that if you re-call any of the functions, the function will throw an Exception stating that the functions have already been called.
The problem with this is that if you ever need to call state() again, you're hosed. Unless you implement these functions as private functions, I don't think you can do exactly what you want due to the nature of Python's scoping rules.
Edit
You can also remove the else in the decorator and your function will always return None.
Here a snippet I used once for my state machine
class StateMachine(object):
def __init__(self):
self.handlers = {}
self.start_state = None
self.end_states = []
def add_state(self, name, handler, end_state=0):
name = name.upper()
self.handlers[name] = handler
if end_state:
self.end_states.append(name)
def set_start(self, name):
# startup state
self.start_state = name
def run(self, **kw):
"""
Run
:param kw:
:return:
"""
# the first .run call call the first handler with kw keywords
# each registered handler should returns the following handler and the needed kw
try:
handler = self.handlers[self.start_state]
except:
raise InitializationError("must call .set_start() before .run()")
while True:
(new_state, kw) = handler(**kw)
if isinstance(new_state, str):
if new_state in self.end_states:
print("reached ", new_state)
break
else:
handler = self.handlers[new_state]
elif hasattr(new_state, "__call__"):
handler = new_state
else:
return
The use
class MyParser(StateMachine):
def __init__(self):
super().__init__()
# define handlers
# we can define many handler as we want
self.handlers["begin_parse"] = self.begin_parse
# define the startup handler
self.set_start("begin_parse")
def end(self, **kw):
logging.info("End of parsing ")
# no callable handler => end
return None, None
def second(self, **kw):
logging.info("second ")
# do something
# if condition is reach the call `self.end` handler
if ...:
return self.end, {}
def begin_parse(self, **kw):
logging.info("start of parsing ")
# long process until the condition is reach then call the `self.second` handler with kw new keywords
while True:
kw = {}
if ...:
return self.second, kw
# elif other cond:
# return self.other_handler, kw
# elif other cond 2:
# return self.other_handler 2, kw
else:
return self.end, kw
# start the state machine
MyParser().run()
will print
INFO:root:start of parsing
INFO:root:second
INFO:root:End of parsing
You could use local functions in your func function. Ok, they are still declared inside one single global function, but Python is nice enough to still give you access to them for tests.
Here is one example of one function declaring and executing 3 (supposedly heavy) subfunctions. It takes one optional parameter test that when set to TEST prevent actual execution but instead gives external access to individual sub-functions and to a local variable:
def func(test=None):
glob = []
def partA():
glob.append('A')
def partB():
glob.append('B')
def partC():
glob.append('C')
if (test == 'TEST'):
global testA, testB, testC, testCR
testA, testB, testC, testCR = partA, partB, partC, glob
return None
partA()
partB()
partC()
return glob
When you call func, the 3 parts are executed in sequence. But if you first call func('TEST'), you can then access the local glob variable as testCR, and the 3 subfunctions as testA, testB and testC. This way you can still test individually the 3 parts with well defined input and control their output.
I would insist on the suggestion given by #user3159253 in his comment on the original question:
If the sole purpose is readability I would split the func into three "private" > or "protected" ones (i.e. _func1 or __func1) and a private or protected property > which keeps the state shared between the functions.
This makes a lot of sense to me and seems more usual amongst object oriented programming than the other options. Consider this example as an alternative:
Your class (teste.py):
class Test:
def __init__(self):
self.__environment = {} # Protected information to be shared
self.public_stuff = 'public info' # Accessible to outside callers
def func(self):
print "Main function"
self.__func_a()
self.__func_b()
self.__func_c()
print self.__environment
def __func_a(self):
self.__environment['function a says'] = 'hi'
def __func_b(self):
self.__environment['function b says'] = 'hello'
def __func_c(self):
self.__environment['function c says'] = 'hey'
Other file:
from teste import Test
t = Test()
t.func()
This will output:
Main function says hey guys
{'function a says': 'hi', 'function b says': 'hello', 'function c says': 'hey'}
If you try to call one of the protected functions, an error occurs:
Traceback (most recent call last):
File "C:/Users/Lucas/PycharmProjects/testes/other.py", line 6, in <module>
t.__func_a()
AttributeError: Test instance has no attribute '__func_a'
Same thing if you try to access the protected environment variable:
Traceback (most recent call last):
File "C:/Users/Lucas/PycharmProjects/testes/other.py", line 5, in <module>
print t.__environment
AttributeError: Test instance has no attribute '__environment'
In my view this is the most elegant, simple and readable way to solve your problem, let me know if it fits your needs :)

Google App Engine NDB post_create hook

I wanted to create a proper post_create (also post_get and post_put) hooks, similar to the ones I had on the DB version of my app.
Unfortunately I can't use has_complete_key.
The problem is quite known: lack of is_saved in a model.
Right now I have implemented it like this:
class NdbStuff(HooksInterface):
def __init__(self, *args, **kwds):
super(NdbStuff, self).__init__(*args, **kwds)
self._is_saved = False
def _put_async(self, post_hooks=True, **ctx_options):
""" Implementation of pre/post create hooks. """
if not self._is_saved:
self._pre_create_hook()
fut = super(NdbStuff, self)._put_async(**ctx_options)
if not self._is_saved:
fut._immediate_callbacks.insert(
0,
(
self._post_create_hook,
[fut],
{},
)
)
self._is_saved = True
if post_hooks is False:
fut._immediate_callbacks = []
return fut
put_async = _put_async
#classmethod
def _post_get_hook(cls, key, future):
obj = future.get_result()
if obj is not None:
obj._is_saved = True
cls._post_get(key, future)
def _post_put_hook(self, future):
if future.state == future.FINISHING:
self._is_saved = True
else:
self._is_saved = False
self._post_put(future)
Everything except the post_create hook seems to work.
The post_create is triggered every time the I use put_async without retrieving the object first.
I would really appreciate a clue on how to trigger the post_create_hook only once after the object was created.
I am not sure why you are creating the NDBStuff class.
Any way if you creating an instance of a class, and you want to track _is_saved or something similar , use a factory to control creation and setting of the property, in this case it makes more sense to track _is_new for example.
class MyModel(ndb.Model):
some_prop = ndb.StringProperty()
def _pre_put_hook(self):
if getattr(self,'_is_new',None):
self._pre_create_hook()
# do something
def _pre_create_hook(self):
# do something on first save
log.info("First put for this object")
def _post_create_hook(self, future):
# do something
def _post_put_hook(self, future);
if getattr(self,'_is_new', None):
self._post_create_hook(future)
# Get rid of the flag on successful put,
# in case you make some changes and save again.
delattr(self,'_is_new')
#classmethod
def factory(cls,*args,**kwargs):
new_obj = cls(*args,**kwargs)
settattr(new_obj,'_is_new',True)
return new_obj
Then
myobj = MyModel.factory(someargs)
myobj.put()
myobj.some_prop = 'test'
myobj.put()
Will call the _pre_create_hook on the first put, and not on the second.
Always create the entities through the factory then you will always have the to call to _pre_create_hook executed.
Does that make sense ?

Categories

Resources