Python, re-entering the `with` block - python

I am currently working on an import script that import listings from a database that regularly shuts down every 15 mins for re-snap.
I have created a with block as below to look after the retry mechanism when creating connections:
class DBRetryController(object):
conn_obj = None
connection = None
cursor = None
retry_count_down = None
sleep_time = None
def __init__(self, conn_obj, retry_count_down=5, sleep_time=10):
self.conn_obj = conn_obj
self.retry_count_down = retry_count_down
self.sleep_time = sleep_time
def __enter__(self):
ex = None
while self.retry_count_down > 0:
try:
if hasattr(self.conn_obj, '__call__'):
self.connection = self.conn_obj()
else:
self.connection = self.conn_obj
self.cursor = self.connection.cursor()
self.retry_count_down = False
except OperationalError as ex:
log.warning('Caught db error, possibly due to sql server gone away, retrying in a few moment')
self.retry_count_down -= 1
time.sleep(self.sleep_time)
if ex:
raise ex
return self.connection, self.cursor
def __exit__(self, type, value, traceback):
try:
self.cursor.close()
self.connection.close()
except:
pass
if value:
raise value
And use as below:
with DBRetryController(self.connection) as (_, cursor):
cursor.execute(self.LISTING_QUERY)
But the problem is the server can shutdown during execution of the query, is it possible to modifying the DBRetryController to make the nested block of code to re-enter?

If I understand your question correctly, I think you can use such a scheme:
notCompleted = 1
class TestClass():
def run(self):
global notCompleted
notCompleted = 1
#do_something here
notCompleted = 0
test = TestClass()
test.run()
while(notCompleted):
test.run()
Let assume that I want to be sure, even if any error occure during execution of run() method, my program will retry to finish to run it complete again. The notCompleted is 1 by default. when I call the run method at the beginning I assign 1 to it and at the end of my run method I assigned 0 to it. Anywhere inside the run() if I have any problem, in the while loop the function will called again.
I think you need to add a Try...Catch too.

Related

Python - threading assert group is None when creating a custom Thread Class

I wanted to create a custom Thread class that is able to propagate an exception it comes across to the main thread. My implementation is as follows:
class VerseThread(threading.Thread):
def __init__(self, args):
super().__init__(self, args=args)
# self.scraper = scraper
def run(self):
self.exc = None
try:
book, abbrev, template, chapter = self.args
self.parser.parse(book, abbrev, template, chapter)
except ChapterNotFoundError as e:
self.exc = e
def join(self):
threading.Thread.join(self)
if self.exc:
raise self.exc
This is supposed to run in the following method, inside a Scraper class (it's all inside a ẁhile true):
for book, abbrev, testament in self.books[init:end]:
base_chapter = 1
while True:
threads = []
if testament == 'ot':
for i in range(3):
threads.append(VerseThread(args=(book, abbrev, OT_TEMPLATE, base_chapter+i)))
else:
for i in range(3):
threads.append(VerseThread(args=(book, abbrev, NT_TEMPLATE, base_chapter+i)))
try:
for thread in threads:
if not thread.is_alive():
thread.start()
for thread in threads:
thread.join()
base_chapter += 3
except ChapterNotFoundError as e:
LOGGER.info(f"{{PROCESS {multiprocessing.current_process().pid}}} - Chapter {e.chapter} not found in {book}, exiting book...")
break
The issue is, if I run it like presented here, I get the error assert group is None, "group argument must be None for now". However, when I run it using Thread(target=self.parse, args=(book, abbrev, OT_TEMPLATE, base_chapter+1)) instead of VerseThread(args=(book, abbrev, OT_TEMPLATE, base_chapter+i)), it works just fine, but the exception is of course still there. What's wrong with my code? How can I get rid of this error?
EDIT: Upon further testing, it seems that what I'm trying to do works fine when I use thread.run() instead of thread.start(), but then only one thread is being used, which is a problem. This, however, means that the error must be in the start() method, but I've no idea what to do.
You have several errors. First, if you are using super() as in super().__init__(self, target=target, args=args), you do not pass self explicitly as an argument. Second, to handle any possible thread-initializer arguments, your signature for this method should just be as follows:
class VerseThread(threading.Thread):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
... # rest of the code omitted
But since your __init__ method does not do anything but call the parent's __init__ method with any passed arguments, there is now no need to even override this method.
Finally, the attributes that you are interested in are not args but rather _args and _kwargs (in case keyword arguments are specified). Also, you have specified self.parser, but I do not see where that attribute has been set.
import threading
class ChapterNotFoundError(Exception):
pass
class VerseThread(threading.Thread):
def run(self):
self.exc = None
try:
book, abbrev, template, chapter = self._args
self.parser.parse(book, abbrev, template, chapter)
except ChapterNotFoundError as e:
self.exc = e
def join(self):
threading.Thread.join(self) # Or: super().join()
if self.exc:
raise self.exc
for book, abbrev, testament in self.books[init:end]:
base_chapter = 1
while True:
threads = []
if testament == 'ot':
for i in range(3):
threads.append(VerseThread(args=(book, abbrev, OT_TEMPLATE, base_chapter+i)))
else:
for i in range(3):
threads.append(VerseThread(args=(book, abbrev, NT_TEMPLATE, base_chapter+i)))
try:
for thread in threads:
if not thread.is_alive():
thread.start()
for thread in threads:
thread.join()
base_chapter += 3
except ChapterNotFoundError as e:
LOGGER.info(f"{{PROCESS {multiprocessing.current_process().pid}}} - Chapter {e.chapter} not found in {book}, exiting book...")
break
Improvement
Accessing quasi-private attributes, such as self._args is a potentially dangerous thing and should be avoided.
I can see the value of creating a subclass of Thread that will catch exceptions in the "worker" function it is to execute and then "propogate" it back to the main thread when it joins the thread. But I believe such a class should be general purpose and work with any type of worker function. In general, I don't like to have application-specific code (business logic) in a multithreading.Thread or multiprocessing.Pool subclass. I instead prefer having my business logic coded within a function or class method(s) that can then be used in multithreading, multiprocessing or serial processing as you see fit. The following is how I would code the Thread subclass (I have named it PropogateExceptionThread, but chose whatever name you wish) and I might use it:
import threading
class PropogateExceptionThread(threading.Thread):
def run(self):
self.exc = None
try:
super().run()
except Exception as e:
self.exc = e
def join(self):
super().join()
if self.exc:
raise self.exc
def worker(x):
if x < 10 or x > 20:
raise ValueError(f'Bad value for argument x = {x}')
t = PropogateExceptionThread(target=worker, args=(1,))
t.start()
try:
t.join()
except Exception as e:
print('The thread raised an exception:', e)
Prints:
The thread raised an exception: Bad value for argument x = 1

Exec alternative

The problem
I've written a function that receives a function name as a string and places it in a priority queue for execution. This works if I use exec, but I don't want to leave it like this because of the inherent security issues.
All of the other alternatives I've found require the received function name to be without the brackets at the end. I need to keep them in place, until I'm able to rewrite the 'sending' code.
The question
Is there an alternative to exec that will work with the functions brackets in place?
Sample code:
Listening / queuing function:
def queue_listener():
listener = multiprocessing.connection.Listener(('localhost', 6000), authkey=b'secret password')
global task_queue
task_queue = PriorityQueue()
try:
while True:
conn = listener.accept()
msg = None
try:
msg = conn.recv()
print("Recv: " + msg)
task_queue.put(ast.literal_eval(msg))
except EOFError:
listener.close() #Ignore end of file errors.
except Exception as err:
'''Do some logging '''
return 1
finally:
listener.close()
return 0
Processing function:
def task_queue_processor():
global task_queue
try:
while True:
task_queue_item = task_queue.get()
print("Proc: " + str(task_queue_item))
exec(task_queue_item[1])
except Exception as err:
'''Do some logging'''
return 1
finally:
task_queue_item = None
return 0
Here's an example of the function name as received by the listener:
"read_device()"
or possibly
"read_device([3],[8])"
Thanks in advance

Is there a way to 'pause' pyserial's ReaderThread to allow a direct read of a serial port

I've got a gui that I'm playing with that uses pyserial. In it I'm using pyserial's ReaderThread to monitor the serial output of my serial device and print it out on a console window.
I also am using pyserial's Serial() implementation for sending commands to the serial device.
Usually I don't need to grab the response to a ser.write() and just let the ReaderThread handle it.
However there are now occasions where I'd like in pause the ReaderThread do a ser.read() to a variable, act on the variable, and unpause the ReaderThread to let it continue it's thing.
Tried ReaderThread.stop(), but it seems to be dropping the connection.
Also tried creating my own readerThread.run() function that has mutex locking and replacing the run method with it, but that's turning out to be a bit squirrelly.
Am I missing an easy way to do this?
Figured a way by monkey patching the ReaderThread Class:
def localinit(self, serial_instance, protocol_factory):
"""\
Initialize thread.
Note that the serial_instance' timeout is set to one second!
Other settings are not changed.
"""
super(ReaderThread, self).__init__()
self.daemon = True
self.serial = serial_instance
self.protocol_factory = protocol_factory
self.alive = True
self._lock = threading.Lock()
self._connection_made = threading.Event()
self.protocol = None
self._stop_event = threading.Event()
print("****************************************************")
print(" localinit ")
print("****************************************************")
def localrun(self):
"""Reader loop"""
print("****************************************************")
print(" localrun ")
print("****************************************************")
if not hasattr(self.serial, 'cancel_read'):
self.serial.timeout = 1
self.protocol = self.protocol_factory()
try:
self.protocol.connection_made(self)
except Exception as e:
self.alive = False
self.protocol.connection_lost(e)
self._connection_made.set()
return
error = None
self._connection_made.set()
while self.alive and self.serial.is_open:
while self._stop_event.is_set():
#print("local run while")
time.sleep(1)
try:
data = self.serial.read(self.serial.in_waiting or 1)
except serial.SerialException as e:
# probably some I/O problem such as disconnected USB serial
# adapters -> exit
error = e
break
else:
if data:
# make a separated try-except for called user code
try:
self.protocol.data_received(data)
except Exception as e:
error = e
break
self.alive = False
self.protocol.connection_lost(error)
self.protocol = None
def localpause(self):
self._stop_event.set()
def localresume(self):
self._stop_event.clear()
Then in my main code:
ReaderThread.run = localrun
ReaderThread.__init__ = localinit
ReaderThread.pause = localpause
ReaderThread.resume = localresume
self.reader = ReaderThread(serialPort, SerialReaderProtocolLine)
self.reader.start()
def write_read_cmd(self, cmd_str):
if(serialPort.isOpen() == False):
print("Serial port not yet open")
return
app.serialcom.reader.pause()
serialPort.reset_input_buffer() # flush the buffer
serialPort.reset_input_buffer() # flush the buffer
serialPort.reset_input_buffer() # flush the buffer
serialPort.write(bytes(cmd_str, encoding='utf-8'))
line = serialPort.readline()
app.serialcom.reader.resume()
line = line.decode("utf-8")
return line

Start python Process with output and timeout

I'm trying to find the way to start a new Process and get its output if it takes less than X seconds. If the process takes more time I would like to ignore the Process result, kill the Process and carry on.
I need to basically add the timer to the code below. Now sure if there's a better way to do it, I'm open to a different and better solution.
from multiprocessing import Process, Queue
def f(q):
# Ugly work
q.put(['hello', 'world'])
if __name__ == '__main__':
q = Queue()
p = Process(target=f, args=(q,))
p.start()
print q.get()
p.join()
Thanks!
You may find the following module useful in your case:
Module
#! /usr/bin/env python3
"""Allow functions to be wrapped in a timeout API.
Since code can take a long time to run and may need to terminate before
finishing, this module provides a set_timeout decorator to wrap functions."""
__author__ = 'Stephen "Zero" Chappell ' \
'<stephen.paul.chappell#atlantis-zero.net>'
__date__ = '18 December 2017'
__version__ = 1, 0, 1
__all__ = [
'set_timeout',
'run_with_timeout'
]
import multiprocessing
import sys
import time
DEFAULT_TIMEOUT = 60
def set_timeout(limit=None):
"""Return a wrapper that provides a timeout API for callers."""
if limit is None:
limit = DEFAULT_TIMEOUT
_Timeout.validate_limit(limit)
def wrapper(entry_point):
return _Timeout(entry_point, limit)
return wrapper
def run_with_timeout(limit, polling_interval, entry_point, *args, **kwargs):
"""Execute a callable object and automatically poll for results."""
engine = set_timeout(limit)(entry_point)
engine(*args, **kwargs)
while engine.ready is False:
time.sleep(polling_interval)
return engine.value
def _target(queue, entry_point, *args, **kwargs):
"""Help with multiprocessing calls by being a top-level module function."""
# noinspection PyPep8,PyBroadException
try:
queue.put((True, entry_point(*args, **kwargs)))
except:
queue.put((False, sys.exc_info()[1]))
class _Timeout:
"""_Timeout(entry_point, limit) -> _Timeout instance"""
def __init__(self, entry_point, limit):
"""Initialize the _Timeout instance will all needed attributes."""
self.__entry_point = entry_point
self.__limit = limit
self.__queue = multiprocessing.Queue()
self.__process = multiprocessing.Process()
self.__timeout = time.monotonic()
def __call__(self, *args, **kwargs):
"""Begin execution of the entry point in a separate process."""
self.cancel()
self.__queue = multiprocessing.Queue(1)
self.__process = multiprocessing.Process(
target=_target,
args=(self.__queue, self.__entry_point) + args,
kwargs=kwargs
)
self.__process.daemon = True
self.__process.start()
self.__timeout = time.monotonic() + self.__limit
def cancel(self):
"""Terminate execution if possible."""
if self.__process.is_alive():
self.__process.terminate()
#property
def ready(self):
"""Property letting callers know if a returned value is available."""
if self.__queue.full():
return True
elif not self.__queue.empty():
return True
elif self.__timeout < time.monotonic():
self.cancel()
else:
return False
#property
def value(self):
"""Property that retrieves a returned value if available."""
if self.ready is True:
valid, value = self.__queue.get()
if valid:
return value
raise value
raise TimeoutError('execution timed out before terminating')
#property
def limit(self):
"""Property controlling what the timeout period is in seconds."""
return self.__limit
#limit.setter
def limit(self, value):
self.validate_limit(value)
self.__limit = value
#staticmethod
def validate_limit(value):
"""Verify that the limit's value is not too low."""
if value <= 0:
raise ValueError('limit must be greater than zero')
To use, see the following example that demonstrates its usage:
Example
from time import sleep
def main():
timeout_after_four_seconds = timeout(4)
# create copies of a function that have a timeout
a = timeout_after_four_seconds(do_something)
b = timeout_after_four_seconds(do_something)
c = timeout_after_four_seconds(do_something)
# execute the functions in separate processes
a('Hello', 1)
b('World', 5)
c('Jacob', 3)
# poll the functions to find out what they returned
results = [a, b, c]
polling = set(results)
while polling:
for process, name in zip(results, 'abc'):
if process in polling:
ready = process.ready
if ready is True: # if the function returned
print(name, 'returned', process.value)
polling.remove(process)
elif ready is None: # if the function took too long
print(name, 'reached timeout')
polling.remove(process)
else: # if the function is running
assert ready is False, 'ready must be True, False, or None'
sleep(0.1)
print('Done.')
def do_something(data, work):
sleep(work)
print(data)
return work
if __name__ == '__main__':
main()
Does the process you are running involve a loop?
If so you can get the timestamp prior to starting the loop and include an if statement within the loop with an sys.exit(); command terminating the script if the current timestamp differs from the recorded start time stamp by more than x seconds.
All you need to adapt the queue example from the docs to your case is to pass the timeout to the q.get() call and terminate the process on timeout:
from Queue import Empty
...
try:
print q.get(timeout=timeout)
except Empty: # no value, timeout occured
p.terminate()
q = None # the queue might be corrupted after the `terminate()` call
p.join()
Using a Pipe might be more lightweight otherwise the code is the same (you could use .poll(timeout), to find out whether there is a data to receive).

Sub Process in its own Thread

I'm wondering if the following class is sound. I'm using it to launch a bunch of simulators for each test in my test environment.
class SubProcessInOwnThread(threading.Thread):
def __init__(self, arguments, currentWorkingDirectory):
self.arguments = arguments
self.currentWorkingDirectory = currentWorkingDirectory
threading.Thread.__init__(self)
self.isTerminated = False
def run(self):
try:
self.subProcess = subprocess.Popen(self.arguments, cwd=self.currentWorkingDirectory)
self.subProcess.wait()
finally:
self.isTerminated = True
def kill(self):
while not self.isTerminated:
try:
self.subProcess.kill()
except:
time.sleep(0.1)
Some senarios:
# Normal
subProcessThreadArguments = ["cmd.exe"]
subProcessThread = SubProcessInOwnThread(subProcessThreadArguments,r"C:\\")
subProcessThread.start()
time.sleep(5)
subProcessThread.kill()
# Process killed very quickly
subProcessThreadArguments = ["cmd.exe"]
subProcessThread = SubProcessInOwnThread(subProcessThreadArguments,r"C:\\")
subProcessThread.start()
subProcessThread.kill()
# Incorrect configuration
subProcessThreadArguments = ["cmdsfgfg.exe"]
subProcessThread = SubProcessInOwnThread(subProcessThreadArguments,r"C:\\")
subProcessThread.start()
time.sleep(5)
subProcessThread.kill()
So I can create simulators like this:
subProcessThreadArguments1 = ["sim1.exe"]
subProcessThread1 = SubProcessInOwnThread(subProcessThreadArguments1,r"C:\\")
subProcessThread1.start()
subProcessThreadArguments2 = ["sim2.exe"]
subProcessThread2 = SubProcessInOwnThread(subProcessThreadArguments2,r"C:\\")
subProcessThread2.start()
# do test...
subProcessThread1.kill()
subProcessThread2.kill()
I'd be interested in any improvents. Should I consider the use of the with keyword? If so, what would the benifits be?
Thanks!
I don't see the point of having a separate thread being stuck in wait() here. Working directly on the subprocess would work like
class SubProcessWithoutThread(object):
def __init__(self, arguments, currentWorkingDirectory):
self.arguments = arguments
self.currentWorkingDirectory = currentWorkingDirectory
self.isTerminated = False
def start(self):
self.subProcess = subprocess.Popen(self.arguments, cwd=self.currentWorkingDirectory)
def kill(self):
while self.subProcess.poll() is None:
try:
self.subProcess.kill()
except:
time.sleep(0.1)
__enter__ = start
def __exit__(self, *x):
self.kill()
(untested)
I have added the methods for a context manager, but I cannot see how that would help you as it would be quite a bunch of with statements which you would have to create, including the necessary indentation.
But maybe I have got your intention wrong...

Categories

Resources