Python time and size batcher - python

I need a little util to batch messages by count or time duration, whichever comes first (application: sending messages to Kinesis, either one at a time if production is slow, or in batches if all of a sudden there are lots of messages to send).
There are many ways to skin a cat, but I came up with the following, which uses a deque and threading.Timer. The questions are:
is it safe (this is used by the main thread)?
is there a simpler or more pythonic way of doing this?
profiling suggests that acquiring _thread.lock and _thread.start_new_thread take a while; is there a different way that would be faster? (Note: if Batcher(..., seconds=None) is used, there is no such cost).
import threading
import time
from collections import deque
class Batcher():
def __init__(self, size=None, seconds=None, callback=None):
self.batch = deque()
self.size = size
self.seconds = seconds
self.callback = callback
self.thread = None
def flush(self):
if self.thread:
self.thread.cancel()
self.thread = None
if self.batch:
a = list(self.batch)
self.batch.clear()
if self.callback:
self.callback(a)
def add(self, e):
self.batch.append(e)
if self.size is not None and len(self.batch) >= self.size:
self.flush()
elif self.seconds is not None and self.thread is None:
self.thread = threading.Timer(self.seconds, self.flush)
self.thread.start()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.flush()
Simple test:
origin = time.time()
def walltime(origin):
dt = time.time() - origin
return f'{dt:6.3f} s'
def foo(batch):
print(f'now={walltime(origin)}, batch={batch}')
with Batcher(size=3, seconds=0.5, callback=foo) as b:
for k in range(7):
b.add(f'at {walltime(origin)}: {k}')
time.sleep(0.3)
Out[ ]:
now= 0.501 s, batch=['at 0.000 s: 0', 'at 0.301 s: 1']
now= 1.101 s, batch=['at 0.601 s: 2', 'at 0.902 s: 3']
now= 1.702 s, batch=['at 1.202 s: 4', 'at 1.503 s: 5']
now= 2.103 s, batch=['at 1.803 s: 6']
Speed test:
In[ ]:
%%time
batch_stats = []
def proc(batch):
batch_stats.append(len(batch))
with Batcher(size=100, seconds=5, callback=proc) as b:
for k in range(120164):
b.add(k)
Out[ ]:
CPU times: user 166 ms, sys: 74.7 ms, total: 240 ms
Wall time: 178 ms
In[ ]:
Counter(batch_stats)
Out[ ]:
Counter({100: 1201, 64: 1})

The reason the code spend so much time within acquire.lock and start_thread because you start a thread every time you need to start a timer for a delayed send.
Here's a solution with the thread kept constantly running in the background. Two condition are used two trigger and wait for the requested delay. It was twice as fast in my timing test but I could only test on one machine:
class Batcher:
def __init__(self, size=None, seconds=None, callback=None):
self._batch = []
self._seconds = seconds
self._size = size
self._callback = callback
self._cyclic_requested = False
self._wait_for_start = False
self._cancelled = False
self._timer_started = False
self._lock = threading.RLock()
self._timer_condition = Condition(self._lock)
self._finished = Condition(self._lock)
self._finished_flag = False
self._thread = threading.Thread(target=self._cycle_send)
self._thread.start()
def _cycle_send(self):
while True:
with self._lock:
# Wait for the timer_condition to be set to start the timer
self._wait_for_start = True
# If a cyclic send was requested and the thread was not
# not waiting we go directly to the wait time
if not self._cyclic_requested:
self._timer_condition.wait()
# If finished is set end the thread
if self._finished_flag:
return
# Reset the flags
self._cyclic_requested = False
self._wait_for_start = False
self._cancelled = False
self._timer_started = True
# Wait for the finished timer to be set or the timeout
self._finished.wait(self._seconds)
# If finished is set end the thread
if self._finished_flag:
return
self._timer_started = False
# If the time_condition has been clear no sending
# is needed anymore, go back to waiting
if self._cancelled:
continue
self._timer_condition_flag = False
batch = self._batch
self._batch = []
self._send_batch(batch)
def _send_batch(self, batch):
if self._callback:
self._callback(batch)
def add(self, e):
batch = None
with self._lock:
# Unconditionally append to the batch
self._batch.append(e)
if self._size is not None and len(self._batch) >= self._size:
# If immediate send required, copy the batch and reset the shared variable
# also cancel the cycle_send by clearing
# the timer_condition and setting the finished event
batch = self._batch
self._batch = []
self._cancelled = True
self._cyclic_requested = False
self._finished.notify_all()
# If the batch is not full, set the timer send condition
elif not self._timer_started:
if self._wait_for_start:
self._timer_condition.notify_all()
else:
self._cyclic_requested = True
# the sending is done outside the lock to avoid keeping the lock for too long
if batch is not None:
self._send_batch(batch)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
with self._lock:
# Set the finish and timer condition to let the thread terminate
self._finished_flag = True
self._timer_condition.notify_all()
self._finished.notify_all()
self._thread.join()
# Send what is left
self._send_batch(self._batch)

Related

Concurrency Leetcode problem raise a Time Limit Exceeded without any understandable reason

Learning concurrency and threading lock, i tried this problem: https://leetcode.com/problems/building-h2o (explanation of the problem in the link).
My code aim to release in order: Hydrogen 2 times before releasing Oxygen. So with the test case "HOH" (which means in Leetcode: Thread "hydrogen" arrive in first, then a thread "oxygen" and finally an other thread "hydrogen" arrive). My code should let the first thread running, then block the "oxygen" thread till the last thread "hydrogen" is ran and lastly release the previous thread "oxygen" running.
I got a time Limit Exceeded on the test case "HOH" , so i feel there is perhaps a deadlock in my solution but not sure. Have you any idea ?
import threading
class H2O:
def __init__(self):
print("init")
self.lock1 = threading.Lock()
self.lock2 = threading.Lock()
self.H = 0
self.lock1.acquire()
self.lock2.acquire()
def hydrogen(self, releaseHydrogen: 'Callable[[], None]') -> None:
if self.H<2:
# releaseHydrogen() outputs "H". Do not change or remove this line.
print("release H")
releaseHydrogen()
self.H+=1
else:
self.lock2.release()
self.lock1.acquire()
def oxygen(self, releaseOxygen: 'Callable[[], None]') -> None:
self.lock2.acquire()
# releaseOxygen() outputs "O". Do not change or remove this line.
print("release O")
releaseOxygen()
self.H=0
self.lock1.release()
EDIT:
I wrote here a minimal example (cooked by me), but i suppose it is better to run the code on the Leetcode platform (test case and interactive function are way better implemented by them):
from threading import Thread
import threading
import time
def releaseOxygen():
print("O")
def releaseHydrogen():
print("H")
class H2O:
def __init__(self):
print("init")
self.lock1 = threading.Lock()
self.lock2 = threading.Lock()
self.H = 0
self.lock1.acquire()
self.lock2.acquire()
def hydrogen(self, releaseHydrogen: 'Callable[[], None]') -> None:
if self.H<2:
# releaseHydrogen() outputs "H". Do not change or remove this line.
print("release H")
releaseHydrogen()
self.H+=1
else:
self.lock2.release()
self.lock1.acquire()
def oxygen(self, releaseOxygen: 'Callable[[], None]') -> None:
self.lock2.acquire()
# releaseOxygen() outputs "O". Do not change or remove this line.
print("release O")
releaseOxygen()
self.H=0
self.lock1.release()
if __name__ == "__main__":
H2o = H2O()
thread = threading.Thread(target = H2o.hydrogen,args=(releaseHydrogen,))
thread2 = threading.Thread(target = H2o.oxygen,args=(releaseOxygen,))
thread3 = threading.Thread(target = H2o.hydrogen,args=(releaseHydrogen,))
print("thread 1")
thread.start()
time.sleep(0.2)
print("thread 2")
thread2.start()
time.sleep(0.2)
print("thread 3")
thread3.start()
thread.join()
thread2.join()
thread3.join()
print('Finished')
Most likely the initializer is called from a different thread than the calls to hydrogen and oxygen. As a result, the locks that are required in the initializer are never released again and all subsequent acquisitions will block.
One possible solution takes advantages of threading.Condition.
class H2O:
def __init__(self):
self.cond = threading.Condition()
self.H = 0
self.O = 0
def hydrogen(self, releaseHydrogen: 'Callable[[], None]') -> None:
with self.cond:
while self.H == 2:
self.cond.wait()
self.H += 1
releaseHydrogen()
self.check_H2O()
def oxygen(self, releaseOxygen: 'Callable[[], None]') -> None:
with self.cond:
while self.O == 1:
self.cond.wait()
self.O += 1
releaseOxygen()
self.check_H2O()
def check_H2O(self) -> None:
if self.H == 2 and self.O == 1:
# Reset
self.H = 0
self.O = 0
# Notify other threads
self.cond.notify_all()

Threaded Buffer crashes

I tried to implement a 5 element buffer through threading and a list.
import threading
class Buffer1 :
def __init__(self,size) :
self.empty = True
self.content = None
self.lock = threading.Condition()
self.list = []
def take(self) :
with self.lock :
while not self.list :
self.lock.wait()
help = self.list[len(self.list)-1]
del self.list[len(self.list)-1]
self.lock.notify_all()
return help
def put(self,v) :
with self.lock :
while len(self.list) >4:
self.lock.wait()
#self.content = v
self.list.append(v)
self.empty = False
self.lock.notify_all()
def show_list(self):
return self.list
a = Buffer1(5)
a.put(7)
Theoretically it works, but when you exceed the limitations of the buffer, either by buffering 6 values or by trying to „take()“ when there is no value buffered, the IDE becomes unresponsive. How could I go about fixing this?
You are using only one thread to add elements in the buffer, so the list contains 5 items, and your main thread is waiting indefinitely on self.lock.wait(). You can use another thread that would take some elements in parallel and then it will notify the producer thread.
For example, creation of a consumer thread that takes 5 items:
def consume(buffer):
import time
for i in range(5):
print(threading.current_thread(), "consume", buffer.take())
time.sleep(2)
print(threading.current_thread(), "Bye")
buffer = Buffer1(5)
t = threading.Thread(target=consume, args=(buffer,), name="consumer")
t.start()
buffer.put(1)
buffer.put(2)
buffer.put(3)
buffer.put(4)
buffer.put(5)
buffer.put(6)
print(buffer.show_list())
... the IDE becomes unresponsive. How could I go about fixing this?
You only showed adding to the buffer from the main thread and nothing ever takes anything out.
If the buffer gets filled up or becomes empty the next put/take will cause its Condition (lock) to wait until something notifies it to continue. I didn't see any of that signaling in your example.
The buffer is a shared resource. The buffer and the threads that use it need to have good control so that everyone can stay out of everyone else's way and enough logic to keep from getting stuck somewhere.
Presumably you need a thread that puts stuff into the buffer and a thread that takes stuff out of the buffer - both having ample signaling to notify everyone when they are done messing with the buffer.
Set up logging so that the program execution could be traced with log messages.
Buffer1 changes:
Changed the list to a collections.deque to simplify things a bit.
Added properties for empty and full
Added an Event attribute to stop putting/taking when the process gets shut down.
Added a timeout while waiting to put/take to forestall any timing issues when the threads are shut down
Added notifications for empty and full conditions.
Made two threads: one to add to the buffer and one to take from the buffer.
Each will add/take while its Event is not set.
In each iteration a random number of items are taken or added.
When taking/putting the buffer's Condition is acquired and all Waiter's are notified when complete.
In the main thread:
An Event is created - for signaling the thread to quit
A Timer is created to limit thread execution time - when it times out its callback function sets the Event and uses the buffer's Condition (lock) to notify anyone that is waiting and free them up.
The threads are created, started, and joined.
import threading
import collections
import random
import string
import time
import logging
import sys
# logging setup
root = logging.getLogger()
root.setLevel(logging.INFO)
formatter = logging.Formatter(fmt='%(asctime)s.%(msecs)03d %(message)s',
datefmt='%S')
class WarningFilter(logging.Filter):
def filter(self, record):
return record.levelno == logging.WARNING
class InfoFilter(logging.Filter):
def filter(self, record):
return record.levelno == logging.INFO
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.INFO)
handler.setFormatter(formatter)
handler.addFilter(InfoFilter())
root.addHandler(handler)
handler = logging.StreamHandler(sys.stderr)
handler.setLevel(logging.WARNING)
handler.setFormatter(formatter)
handler.addFilter(WarningFilter())
root.addHandler(handler)
# logging setup end
class Buffer1:
'''FILO buffer.
'''
def __init__(self,size,evt) :
self.content = None
self.size = size
self.evt = evt
self.lock = threading.Condition()
self.list = collections.deque()
#property
def full(self):
return len(self.list) >= self.size
#property
def empty(self):
return bool(self.list)
def take(self) :
with self.lock :
while not self.empty:
root.warning('buffer empty waiting to take')
self.lock.wait(timeout=5)
if self.evt.is_set():
help = None
break
else:
help = self.list.pop()
self.lock.notify_all()
return help
def put(self,v):
success = False
with self.lock :
while self.full:
root.warning('buffer full waiting to put')
self.lock.wait(timeout=5)
if self.evt.is_set():
break
else:
self.list.append(v)
success = True
self.lock.notify_all()
return success
def show_list(self):
return self.list
class Prod(threading.Thread):
'''Puts stuff onto buffer, quits on Event.
Contrived toy - periodically puts random n items in buffer.
'''
def __init__(self,buffer,evt):
super().__init__(name='producer')
self.buffer = buffer
self.evt = evt
def run(self):
n = 0
while not self.evt.is_set():
howmany = random.randint(1,9)
payload = random.sample(string.ascii_letters,howmany)
payload = collections.deque(payload)
root.info(f'{self.name} putting {howmany}')
with self.buffer.lock:
while payload and (not self.evt.is_set()):
c = payload.popleft()
root.info(f'{self.name} -----> {c}')
if not self.buffer.put(c):
root.warning(f'{self.name} last put failed')
self.buffer.lock.notify_all()
time.sleep(.04)
n += 1
root.info(f'{self.name} dying n={n}')
with self.buffer.lock:
self.buffer.lock.notify_all()
root.info(f'{self.name} is done')
class Cons(threading.Thread):
'''Takes stuff off of buffer, quits on Event set.
Contrived toy - periodically takes random n items from buffer.
'''
def __init__(self,buffer,evt):
super().__init__(name='consumer')
self.buffer = buffer
self.evt = evt
def run(self):
n = 0
while not self.evt.is_set():
howmany = random.randint(1,9)
root.info(f'{self.name} taking {howmany}')
with self.buffer.lock:
while (howmany > 0) and (not self.evt.is_set()):
c = self.buffer.take()
root.info(f'{self.name} <----- {c}')
howmany -= 1
self.buffer.lock.notify_all()
time.sleep(.04)
n += 1
root.info(f'{self.name} dying n={n}')
with self.buffer.lock:
self.buffer.lock.notify_all()
root.info(f'{self.name} is done')
if __name__ == '__main__':
# use an Event to shut down the whole process
evt = threading.Event()
buffer = Buffer1(5,evt)
def kill(evt=evt,buffer=buffer):
root.warning('killing everything')
evt.set()
with buffer.lock:
buffer.lock.notify_all()
# don't let this toy example run forever
t = threading.Timer(5,kill)
t.start()
p1 = Prod(buffer,evt)
c1 = Cons(buffer,evt)
c1.start()
p1.start()
p1.join()
c1.join()
print('exit')
Here is another take using asyncio instead of threads to exercise your buffer.
import asyncio
import collections
import random
import string
import time
import logging
import sys
# logging setup
root = logging.getLogger()
root.setLevel(logging.INFO)
formatter = logging.Formatter(fmt='%(asctime)s.%(msecs)03d %(message)s',
datefmt='%S')
class WarningFilter(logging.Filter):
def filter(self, record):
return record.levelno == logging.WARNING
class InfoFilter(logging.Filter):
def filter(self, record):
return record.levelno == logging.INFO
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.INFO)
handler.setFormatter(formatter)
handler.addFilter(InfoFilter())
root.addHandler(handler)
handler = logging.StreamHandler(sys.stderr)
handler.setLevel(logging.WARNING)
handler.setFormatter(formatter)
handler.addFilter(WarningFilter())
root.addHandler(handler)
class Buffer:
'''FILO buffer.
'''
def __init__(self,size,evt) :
self.content = None
self.size = size
self.stop_evt = evt
self.lock = asyncio.Condition()
self.list = collections.deque()
def full(self):
return len(self.list) >= self.size
def not_full(self):
return len(self.list) < self.size
def empty(self):
return not bool(self.list)
def not_empty(self):
return bool(self.list)
async def take(self) :
async with self.lock:
#root.info(f'take:lock acquired - wait for not empty')
while self.empty():
waiters = [thing for thing in self.lock._waiters]
#root.warning(f'take:{waiters} waiting')
await self.lock.wait()
if self.stop_evt.is_set(): # shutting down
val = None
else:
#root.info('take: not empty')
val = self.list.pop()
self.lock.notify_all()
return val
async def put(self,v):
success = False
async with self.lock:
#root.info(f'put:lock acquired - wait for not full')
while self.full():
waiters = [thing for thing in self.lock._waiters]
#root.warning(f'put:{waiters} waiting')
await self.lock.wait()
if self.stop_evt.is_set(): # shutting down
break
else:
#root.info('put: not full')
self.list.append(v)
success = True
self.lock.notify_all()
return success
def show_list(self):
return self.list
async def random_stuff():
howmany = random.randint(1,9)
payload = random.sample(string.ascii_letters,howmany)
return collections.deque(payload)
async def produce(buffer,stop_evt,name):
puts = []
try:
while True:
payload = await random_stuff()
root.warning(f'producer{name} putting {len(payload)}')
while payload:
c = payload.popleft()
root.info(f'producer{name} -----> {c}')
success = await buffer.put(c)
if not success:
root.warning(f'producer{name} failed to put {c}')
else:
puts.append(c)
await asyncio.sleep(.03)
except asyncio.CancelledError as e:
root.warning('producer canceled')
root.info(f'producer{name} dying n={len(puts)}')
root.info(f'producer{name} is done')
return puts
async def consume(buffer, stop_evt, name):
'''Takes stuff off of buffer, quits on Event set.
Contrived toy - periodically takes random n items from buffer.
'''
takes = []
try:
while True:
howmany = random.randint(1,9)
msg = f'consumer{name} taking {howmany}'
root.warning(f'{msg:>38}')
while howmany > 0:
c = await buffer.take()
takes.append(c)
msg = f'consumer{name} <----- {c}'
root.info(f'{msg:>38}')
howmany -= 1
await asyncio.sleep(.02)
except asyncio.CancelledError as e:
root.warning('consumer canceled')
root.info(f'consumer{name} dying n={len(takes)}')
root.info(f'consumer{name} is done')
return takes
async def timer(n,buffer,evt, tasks):
root.warning('timer started')
await asyncio.sleep(n)
evt.set()
root.warning('timed out - event set')
root.warning('canceling tasks')
for task in tasks:
task.cancel()
async def main():
loop = asyncio.get_running_loop()
loop.set_debug(True)
# use an Event to shut down the whole process
evt = asyncio.Event()
buffer = Buffer(5,evt)
put_task = asyncio.create_task(produce(buffer,evt,1))
take_task = asyncio.create_task(consume(buffer,evt,1))
timer_task = asyncio.create_task(timer(5,buffer,evt,[put_task,take_task]))
root.info('tasks created')
await timer_task
puts = await put_task
takes = await take_task
print('exit')
return puts,takes,buffer.list
if __name__ == '__main__':
puts,takes,remains = asyncio.run(main())
puts = collections.Counter(puts)
takes = collections.Counter(takes)
remains = collections.Counter(remains)
#print(remains == (puts-takes))

Start python Process with output and timeout

I'm trying to find the way to start a new Process and get its output if it takes less than X seconds. If the process takes more time I would like to ignore the Process result, kill the Process and carry on.
I need to basically add the timer to the code below. Now sure if there's a better way to do it, I'm open to a different and better solution.
from multiprocessing import Process, Queue
def f(q):
# Ugly work
q.put(['hello', 'world'])
if __name__ == '__main__':
q = Queue()
p = Process(target=f, args=(q,))
p.start()
print q.get()
p.join()
Thanks!
You may find the following module useful in your case:
Module
#! /usr/bin/env python3
"""Allow functions to be wrapped in a timeout API.
Since code can take a long time to run and may need to terminate before
finishing, this module provides a set_timeout decorator to wrap functions."""
__author__ = 'Stephen "Zero" Chappell ' \
'<stephen.paul.chappell#atlantis-zero.net>'
__date__ = '18 December 2017'
__version__ = 1, 0, 1
__all__ = [
'set_timeout',
'run_with_timeout'
]
import multiprocessing
import sys
import time
DEFAULT_TIMEOUT = 60
def set_timeout(limit=None):
"""Return a wrapper that provides a timeout API for callers."""
if limit is None:
limit = DEFAULT_TIMEOUT
_Timeout.validate_limit(limit)
def wrapper(entry_point):
return _Timeout(entry_point, limit)
return wrapper
def run_with_timeout(limit, polling_interval, entry_point, *args, **kwargs):
"""Execute a callable object and automatically poll for results."""
engine = set_timeout(limit)(entry_point)
engine(*args, **kwargs)
while engine.ready is False:
time.sleep(polling_interval)
return engine.value
def _target(queue, entry_point, *args, **kwargs):
"""Help with multiprocessing calls by being a top-level module function."""
# noinspection PyPep8,PyBroadException
try:
queue.put((True, entry_point(*args, **kwargs)))
except:
queue.put((False, sys.exc_info()[1]))
class _Timeout:
"""_Timeout(entry_point, limit) -> _Timeout instance"""
def __init__(self, entry_point, limit):
"""Initialize the _Timeout instance will all needed attributes."""
self.__entry_point = entry_point
self.__limit = limit
self.__queue = multiprocessing.Queue()
self.__process = multiprocessing.Process()
self.__timeout = time.monotonic()
def __call__(self, *args, **kwargs):
"""Begin execution of the entry point in a separate process."""
self.cancel()
self.__queue = multiprocessing.Queue(1)
self.__process = multiprocessing.Process(
target=_target,
args=(self.__queue, self.__entry_point) + args,
kwargs=kwargs
)
self.__process.daemon = True
self.__process.start()
self.__timeout = time.monotonic() + self.__limit
def cancel(self):
"""Terminate execution if possible."""
if self.__process.is_alive():
self.__process.terminate()
#property
def ready(self):
"""Property letting callers know if a returned value is available."""
if self.__queue.full():
return True
elif not self.__queue.empty():
return True
elif self.__timeout < time.monotonic():
self.cancel()
else:
return False
#property
def value(self):
"""Property that retrieves a returned value if available."""
if self.ready is True:
valid, value = self.__queue.get()
if valid:
return value
raise value
raise TimeoutError('execution timed out before terminating')
#property
def limit(self):
"""Property controlling what the timeout period is in seconds."""
return self.__limit
#limit.setter
def limit(self, value):
self.validate_limit(value)
self.__limit = value
#staticmethod
def validate_limit(value):
"""Verify that the limit's value is not too low."""
if value <= 0:
raise ValueError('limit must be greater than zero')
To use, see the following example that demonstrates its usage:
Example
from time import sleep
def main():
timeout_after_four_seconds = timeout(4)
# create copies of a function that have a timeout
a = timeout_after_four_seconds(do_something)
b = timeout_after_four_seconds(do_something)
c = timeout_after_four_seconds(do_something)
# execute the functions in separate processes
a('Hello', 1)
b('World', 5)
c('Jacob', 3)
# poll the functions to find out what they returned
results = [a, b, c]
polling = set(results)
while polling:
for process, name in zip(results, 'abc'):
if process in polling:
ready = process.ready
if ready is True: # if the function returned
print(name, 'returned', process.value)
polling.remove(process)
elif ready is None: # if the function took too long
print(name, 'reached timeout')
polling.remove(process)
else: # if the function is running
assert ready is False, 'ready must be True, False, or None'
sleep(0.1)
print('Done.')
def do_something(data, work):
sleep(work)
print(data)
return work
if __name__ == '__main__':
main()
Does the process you are running involve a loop?
If so you can get the timestamp prior to starting the loop and include an if statement within the loop with an sys.exit(); command terminating the script if the current timestamp differs from the recorded start time stamp by more than x seconds.
All you need to adapt the queue example from the docs to your case is to pass the timeout to the q.get() call and terminate the process on timeout:
from Queue import Empty
...
try:
print q.get(timeout=timeout)
except Empty: # no value, timeout occured
p.terminate()
q = None # the queue might be corrupted after the `terminate()` call
p.join()
Using a Pipe might be more lightweight otherwise the code is the same (you could use .poll(timeout), to find out whether there is a data to receive).

Why doesn't SIGVTALRM trigger inside time.sleep()?

I'm trying to use SIGVTALRM to snapshot profile my Python code, but it doesn't seem to be firing inside blocking operations like time.sleep() and socket operations.
Why is that? And is there any way to address that, so I can collect samples while I'm inside blocking operations?
I've also tried using ITIMER_PROF/SIGPROF and ITIMER_REAL/SIGALRM and both seem to produce similar results.
The code I'm testing with follows, and the output is something like:
$ python profiler-test.py
<module>(__main__:1);test_sampling_profiler(__main__:53): 1
<module>(__main__:1);test_sampling_profiler(__main__:53);busyloop(__main__:48): 1509
Note that the timesleep function isn't shown at all.
Test code:
import time
import signal
import collections
class SamplingProfiler(object):
def __init__(self, interval=0.001, logger=None):
self.interval = interval
self.running = False
self.counter = collections.Counter()
def _sample(self, signum, frame):
if not self.running:
return
stack = []
while frame is not None:
formatted_frame = "%s(%s:%s)" %(
frame.f_code.co_name,
frame.f_globals.get('__name__'),
frame.f_code.co_firstlineno,
)
stack.append(formatted_frame)
frame = frame.f_back
formatted_stack = ';'.join(reversed(stack))
self.counter[formatted_stack] += 1
signal.setitimer(signal.ITIMER_VIRTUAL, self.interval, 0)
def start(self):
if self.running:
return
signal.signal(signal.SIGVTALRM, self._sample)
signal.setitimer(signal.ITIMER_VIRTUAL, self.interval, 0)
self.running = True
def stop(self):
if not self.running:
return
self.running = False
signal.signal(signal.SIGVTALRM, signal.SIG_IGN)
def flush(self):
res = self.counter
self.counter = collections.Counter()
return res
def busyloop():
start = time.time()
while time.time() - start < 5:
pass
def timesleep():
time.sleep(5)
def test_sampling_profiler():
p = SamplingProfiler()
p.start()
busyloop()
timesleep()
p.stop()
print "\n".join("%s: %s" %x for x in sorted(p.flush().items()))
if __name__ == "__main__":
test_sampling_profiler()
Not sure about why time.sleep works that way (could it be using SIGALRM for itself to know when to resume?) but Popen.wait does not block signals so worst case you can call out to OS sleep.
Another approach is to use a separate thread to trigger the sampling:
import sys
import threading
import time
import collections
class SamplingProfiler(object):
def __init__(self, interval=0.001):
self.interval = interval
self.running = False
self.counter = collections.Counter()
self.thread = threading.Thread(target=self._sample)
def _sample(self):
while self.running:
next_wakeup_time = time.time() + self.interval
for thread_id, frame in sys._current_frames().items():
if thread_id == self.thread.ident:
continue
stack = []
while frame is not None:
formatted_frame = "%s(%s:%s)" % (
frame.f_code.co_name,
frame.f_globals.get('__name__'),
frame.f_code.co_firstlineno,
)
stack.append(formatted_frame)
frame = frame.f_back
formatted_stack = ';'.join(reversed(stack))
self.counter[formatted_stack] += 1
sleep_time = next_wakeup_time - time.time()
if sleep_time > 0:
time.sleep(sleep_time)
def start(self):
if self.running:
return
self.running = True
self.thread.start()
def stop(self):
if not self.running:
return
self.running = False
def flush(self):
res = self.counter
self.counter = collections.Counter()
return res
def busyloop():
start = time.time()
while time.time() - start < 5:
pass
def timesleep():
time.sleep(5)
def test_sampling_profiler():
p = SamplingProfiler()
p.start()
busyloop()
timesleep()
p.stop()
print "\n".join("%s: %s" %x for x in sorted(p.flush().items()))
if __name__ == "__main__":
test_sampling_profiler()
When doing it this way the result is:
$ python profiler-test.py
<module>(__main__:1);test_sampling_profiler(__main__:62);busyloop(__main__:54): 2875
<module>(__main__:1);test_sampling_profiler(__main__:62);start(__main__:37);start(threading:717);wait(threading:597);wait(threading:309): 1
<module>(__main__:1);test_sampling_profiler(__main__:62);timesleep(__main__:59): 4280
Still not totally fair, but better than no samples at all during sleep.
The absence of SIGVTALRM during a sleep() doesn't surprise me, since ITIMER_VIRTUAL "runs only when the process is executing."
(As an aside, CPython on non-Windows platforms implements time.sleep() in terms of select().)
With a plain SIGALRM, however, I expect a signal interruption and indeed I observe one:
<module>(__main__:1);test_sampling_profiler(__main__:62);busyloop(__main__:54): 4914
<module>(__main__:1);test_sampling_profiler(__main__:62);timesleep(__main__:59): 1
I changed the code somewhat, but you get the idea:
class SamplingProfiler(object):
TimerSigs = {
signal.ITIMER_PROF : signal.SIGPROF,
signal.ITIMER_REAL : signal.SIGALRM,
signal.ITIMER_VIRTUAL : signal.SIGVTALRM,
}
def __init__(self, interval=0.001, timer = signal.ITIMER_REAL): # CHANGE
self.interval = interval
self.running = False
self.counter = collections.Counter()
self.timer = timer # CHANGE
self.signal = self.TimerSigs[timer] # CHANGE
....

How to start and stop a thread

How can I start and stop a thread with my poor thread class?
It is in loop, and I want to restart it again at the beginning of the code. How can I do start-stop-restart-stop-restart?
My class:
import threading
class Concur(threading.Thread):
def __init__(self):
self.stopped = False
threading.Thread.__init__(self)
def run(self):
i = 0
while not self.stopped:
time.sleep(1)
i = i + 1
In the main code, I want:
inst = Concur()
while conditon:
inst.start()
# After some operation
inst.stop()
# Some other operation
You can't actually stop and then restart a thread since you can't call its start() method again after its run() method has terminated. However you can make one pause and then later resume its execution by using a threading.Condition variable to avoid concurrency problems when checking or changing its running state.
threading.Condition objects have an associated threading.Lock object and methods to wait for it to be released and will notify any waiting threads when that occurs. Here's an example derived from the code in your question which shows this being done. In the example code I've made the Condition variable a part of Thread subclass instances to better encapsulate the implementation and avoid needing to introduce additional global variables:
from __future__ import print_function
import threading
import time
class Concur(threading.Thread):
def __init__(self):
super(Concur, self).__init__()
self.iterations = 0
self.daemon = True # Allow main to exit even if still running.
self.paused = True # Start out paused.
self.state = threading.Condition()
def run(self):
self.resume()
while True:
with self.state:
if self.paused:
self.state.wait() # Block execution until notified.
# Do stuff...
time.sleep(.1)
self.iterations += 1
def pause(self):
with self.state:
self.paused = True # Block self.
def resume(self):
with self.state:
self.paused = False
self.state.notify() # Unblock self if waiting.
class Stopwatch(object):
""" Simple class to measure elapsed times. """
def start(self):
""" Establish reference point for elapsed time measurements. """
self.start_time = time.time()
return self
#property
def elapsed_time(self):
""" Seconds since started. """
try:
return time.time() - self.start_time
except AttributeError: # Wasn't explicitly started.
self.start_time = time.time()
return 0
MAX_RUN_TIME = 5 # Seconds.
concur = Concur()
stopwatch = Stopwatch()
print('Running for {} seconds...'.format(MAX_RUN_TIME))
concur.start()
while stopwatch.elapsed_time < MAX_RUN_TIME:
concur.resume()
# Can also do other concurrent operations here...
concur.pause()
# Do some other stuff...
# Show Concur thread executed.
print('concur.iterations: {}'.format(concur.iterations))
This is David Heffernan's idea fleshed-out. The example below runs for 1 second, then stops for 1 second, then runs for 1 second, and so on.
import time
import threading
import datetime as DT
import logging
logger = logging.getLogger(__name__)
def worker(cond):
i = 0
while True:
with cond:
cond.wait()
logger.info(i)
time.sleep(0.01)
i += 1
logging.basicConfig(level=logging.DEBUG,
format='[%(asctime)s %(threadName)s] %(message)s',
datefmt='%H:%M:%S')
cond = threading.Condition()
t = threading.Thread(target=worker, args=(cond, ))
t.daemon = True
t.start()
start = DT.datetime.now()
while True:
now = DT.datetime.now()
if (now-start).total_seconds() > 60: break
if now.second % 2:
with cond:
cond.notify()
The implementation of stop() would look like this:
def stop(self):
self.stopped = True
If you want to restart, then you can just create a new instance and start that.
while conditon:
inst = Concur()
inst.start()
#after some operation
inst.stop()
#some other operation
The documentation for Thread makes it clear that the start() method can only be called once for each instance of the class.
If you want to pause and resume a thread, then you'll need to use a condition variable.

Categories

Resources