Using multiple worker threads in Python - python

I have a sample code using multiple worker threads below.
import threading
import time
from threading import Thread
from threading import Event
import signal, os
import sys
class CameraStreamingWidget():
def __init__(self, ID):
self.id = ID
self.ready_lock = threading.Lock()
self.ready = False
self.stop_event = Event()
self.get_frame_thread = Thread(target=self.get_frame, args=())
self.get_frame_thread.daemon = True
self.get_frame_thread.start()
self.count = 0
self.before_read = time.time()
print("Camera "+str(ID)+" launched ");
def reset_ready_flag(self):
with self.ready_lock:
self.ready = False
return
def read_ready_flag(self):
ret = False
with self.ready_lock:
ret = self.ready
return ret
def get_frame(self):
while not self.stop_event.is_set():
try:
time.sleep(1)
self.count = self.count + 1
if(self.count >= 4):
cur_time = time.time()
print(f'Camera thread {self.id}: {1000*(cur_time - self.before_read):.0f} ms')
with self.ready_lock:
self.ready = True
self.before_read = time.time()
self.count = 0
except AttributeError:
pass
class SIGINT_handler():
def __init__(self):
self.SIGINT_WORKER = False
def signal_handler(self, signal, frame):
print('\nYou pressed Ctrl+C!')
self.SIGINT_WORKER = True
handler = SIGINT_handler()
signal.signal(signal.SIGINT, handler.signal_handler)
class InferenceWidget():
def __init__(self,):
# source params
self.Cameras = []
for id_ in range(4):
cam = CameraStreamingWidget(id_)
self.Cameras.append(cam)
self.stop_event = Event()
self.runinference_thread = Thread(target=self.runinference, args=())
self.runinference_thread.daemon = True
self.runinference_thread.start()
def runinference(self):
#Run
while not self.stop_event.is_set():
#This is for detection
for id_,cam in enumerate(self.Cameras):
if cam.read_ready_flag() == True:
inference_start = time.time()
time.sleep(0.08)
cam.reset_ready_flag()
print(f'Inference time {1000*(time.time() - inference_start):.0f} ms for cam_id {id_}')
if handler.SIGINT_WORKER:
self.stopexecution()
return
def stopexecution(self):
print("Cameras are closed")
self.stop_event.set()
#python spatiotemporal_det_multicam.py
def main():
inferenceObj = InferenceWidget()
inferenceObj.runinference()
inferenceObj.runinference_thread.join()
print("Thread exit")
sys.exit()
if __name__ == '__main__':
main()
The print outputs are
Camera 0 launched
Camera 1 launched
Camera 2 launched
Camera 3 launched
cam_id 0: 4003 ms
cam_id 1: 4003 ms
cam_id 2: 4003 ms
cam_id 3: 4004 ms
Inference time 80 ms for cam_id 0
Inference time 80 ms for cam_id 0
Inference time 80 ms for cam_id 1
Inference time 80 ms for cam_id 1
Inference time 80 ms for cam_id 2
Inference time 80 ms for cam_id 2
Inference time 80 ms for cam_id 3
Inference time 85 ms for cam_id 3
^C
You pressed Ctrl+C!
Cameras are closed
Cameras are closed
Thread exit
What suppose to be is that
cam_id 0 runs for 4 sec and after 4 sec, Inference time 80 ms for cam_id 0 should print only one time.
Cameras are closed
also should be one time.
The output expected is
Camera 0 launched
Camera 1 launched
Camera 2 launched
Camera 3 launched
cam_id 0: 4003 ms
cam_id 1: 4003 ms
cam_id 2: 4003 ms
cam_id 3: 4004 ms
Inference time 80 ms for cam_id 0
Inference time 80 ms for cam_id 1
Inference time 80 ms for cam_id 2
Inference time 80 ms for cam_id 3
^C
You pressed Ctrl+C!
Cameras are closed
Thread exit
What is wrong with the code?

Related

Python time and size batcher

I need a little util to batch messages by count or time duration, whichever comes first (application: sending messages to Kinesis, either one at a time if production is slow, or in batches if all of a sudden there are lots of messages to send).
There are many ways to skin a cat, but I came up with the following, which uses a deque and threading.Timer. The questions are:
is it safe (this is used by the main thread)?
is there a simpler or more pythonic way of doing this?
profiling suggests that acquiring _thread.lock and _thread.start_new_thread take a while; is there a different way that would be faster? (Note: if Batcher(..., seconds=None) is used, there is no such cost).
import threading
import time
from collections import deque
class Batcher():
def __init__(self, size=None, seconds=None, callback=None):
self.batch = deque()
self.size = size
self.seconds = seconds
self.callback = callback
self.thread = None
def flush(self):
if self.thread:
self.thread.cancel()
self.thread = None
if self.batch:
a = list(self.batch)
self.batch.clear()
if self.callback:
self.callback(a)
def add(self, e):
self.batch.append(e)
if self.size is not None and len(self.batch) >= self.size:
self.flush()
elif self.seconds is not None and self.thread is None:
self.thread = threading.Timer(self.seconds, self.flush)
self.thread.start()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.flush()
Simple test:
origin = time.time()
def walltime(origin):
dt = time.time() - origin
return f'{dt:6.3f} s'
def foo(batch):
print(f'now={walltime(origin)}, batch={batch}')
with Batcher(size=3, seconds=0.5, callback=foo) as b:
for k in range(7):
b.add(f'at {walltime(origin)}: {k}')
time.sleep(0.3)
Out[ ]:
now= 0.501 s, batch=['at 0.000 s: 0', 'at 0.301 s: 1']
now= 1.101 s, batch=['at 0.601 s: 2', 'at 0.902 s: 3']
now= 1.702 s, batch=['at 1.202 s: 4', 'at 1.503 s: 5']
now= 2.103 s, batch=['at 1.803 s: 6']
Speed test:
In[ ]:
%%time
batch_stats = []
def proc(batch):
batch_stats.append(len(batch))
with Batcher(size=100, seconds=5, callback=proc) as b:
for k in range(120164):
b.add(k)
Out[ ]:
CPU times: user 166 ms, sys: 74.7 ms, total: 240 ms
Wall time: 178 ms
In[ ]:
Counter(batch_stats)
Out[ ]:
Counter({100: 1201, 64: 1})
The reason the code spend so much time within acquire.lock and start_thread because you start a thread every time you need to start a timer for a delayed send.
Here's a solution with the thread kept constantly running in the background. Two condition are used two trigger and wait for the requested delay. It was twice as fast in my timing test but I could only test on one machine:
class Batcher:
def __init__(self, size=None, seconds=None, callback=None):
self._batch = []
self._seconds = seconds
self._size = size
self._callback = callback
self._cyclic_requested = False
self._wait_for_start = False
self._cancelled = False
self._timer_started = False
self._lock = threading.RLock()
self._timer_condition = Condition(self._lock)
self._finished = Condition(self._lock)
self._finished_flag = False
self._thread = threading.Thread(target=self._cycle_send)
self._thread.start()
def _cycle_send(self):
while True:
with self._lock:
# Wait for the timer_condition to be set to start the timer
self._wait_for_start = True
# If a cyclic send was requested and the thread was not
# not waiting we go directly to the wait time
if not self._cyclic_requested:
self._timer_condition.wait()
# If finished is set end the thread
if self._finished_flag:
return
# Reset the flags
self._cyclic_requested = False
self._wait_for_start = False
self._cancelled = False
self._timer_started = True
# Wait for the finished timer to be set or the timeout
self._finished.wait(self._seconds)
# If finished is set end the thread
if self._finished_flag:
return
self._timer_started = False
# If the time_condition has been clear no sending
# is needed anymore, go back to waiting
if self._cancelled:
continue
self._timer_condition_flag = False
batch = self._batch
self._batch = []
self._send_batch(batch)
def _send_batch(self, batch):
if self._callback:
self._callback(batch)
def add(self, e):
batch = None
with self._lock:
# Unconditionally append to the batch
self._batch.append(e)
if self._size is not None and len(self._batch) >= self._size:
# If immediate send required, copy the batch and reset the shared variable
# also cancel the cycle_send by clearing
# the timer_condition and setting the finished event
batch = self._batch
self._batch = []
self._cancelled = True
self._cyclic_requested = False
self._finished.notify_all()
# If the batch is not full, set the timer send condition
elif not self._timer_started:
if self._wait_for_start:
self._timer_condition.notify_all()
else:
self._cyclic_requested = True
# the sending is done outside the lock to avoid keeping the lock for too long
if batch is not None:
self._send_batch(batch)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
with self._lock:
# Set the finish and timer condition to let the thread terminate
self._finished_flag = True
self._timer_condition.notify_all()
self._finished.notify_all()
self._thread.join()
# Send what is left
self._send_batch(self._batch)

Problem with cancelling timer within a function (python)

I have a function constantly running in a loop checking if it should start or cancel a timer that's defined in the function's scope. Timer needs to be defined within the function as the callback is also defined in the function. I'm able to start the timer fine, but when it tries to cancel, I get an error 'local variable 'timer' referenced before assignment'.
I've tried defining the timer and its callback in the global scope (which is undesirable), and I get another error 'threads can only be started once'.
import threading
import random
def start():
trigger = random.randint(0,1)
def do_something():
print(trigger)
if trigger == 0:
timer = threading.Timer(2,do_something)
timer.start()
else:
timer.cancel() #: if trigger is 1, I want to cancel the timer
threading.Timer(1,start).start() #: start() is in a loop and is constantly checking trigger's value
start()
I want the same timer to be started or cancelled according to trigger's value. timer and its callback should be defined within the function.
This program shows how a random number can be used to start or stop a timer.
If the random number selects 0 enough times in a row, the timer will be started and be allowed to continue timing until time runs out and it calls its target.
If ever the random number selects 1, the timer is cancelled and the target is not called:
import threading
import random
import time
class Timing:
def __init__(self):
self.timer = None # No timer at first
self.something = None # Nothing to print at first
self.restart()
def restart(self):
self.run = threading.Timer(1.1, self.start)
self.run.start()
def cancel(self):
if self.run is not None:
self.run.cancel()
self.run = None
def start(self):
trigger = random.randint(0, 1)
self.do_start(trigger)
def do_start(self, trigger):
print('start', trigger)
if trigger == 0:
if self.timer is None:
self.something = trigger
self.timer = threading.Timer(2, self.do_something)
self.timer.start()
else:
if self.timer is not None:
self.timer.cancel()
self.timer = None
self.something=None
self.restart()
def do_something(self):
print(self.something)
t = Timing()
print('sleeping...')
time.sleep(20)
t.cancel()
t.do_start(1)
t.cancel()
print('Done')
Sample output (ymmv because its random)
sleeping...
start 1
start 0
start 1
start 0
start 0
0
start 1
start 0
start 1
start 1
start 1
start 1
start 1
start 0
start 1
start 0
start 0
0
start 1
start 0
start 1
Done
I've learnt from #quamrana and #smci and came up with this
import threading
import random
class Timer():
pass
t = Timer()
def start():
trigger = random.randint(0,1)
def do_something():
print(trigger)
if trigger == 0:
t.timer = threading.Timer(1,do_something)
t.timer.start()
else:
if hasattr(t,'timer'):
t.timer.cancel()
threading.Timer(1,start).start()
start()
This seems to solve the issue while keeping the code compact.

Handle multiprocess in python

My code is processing some parallel perforce tasks while showing a progress bar and letting user to terminate the job whenever he wants, the problem is when user clicks the close button the thread function is not being killed but the lock is released and the main UI thread is being unlocked.
The p4.run_sync() is not terminating when Cancel button is clicked.
def P4SyncLibrary(args, que):
syncType = args[0]
view = args[1]
p4 = P4CreateConnection(disable_tmp_cleanup=True)
try:
p4.run_sync(view)
except P4Exception:
for e in p4.errors:
print "SyncError: - %s" %e
p4.disconnect()
que.put(None)
class CreateJob(QtGui.QDialog):
def __init__(self, thread, args):
QtGui.QDialog.__init__(self)
self.ui=Ui_ProgressBar()
self.ui.setupUi(self)
self.ui.cancel.clicked.connect(self.closeEvent)
self.ui.cancel.setIcon(QtGui.QIcon(QtGui.QPixmap("%s/delete.xpm" %resources)))
self.threadControl = ThreadControl(thread=thread, args=args)
self.connect(self.threadControl, QtCore.SIGNAL("__updateProgressBar(int)"), self.__updateProgressBar)
self.threadControl.finished.connect(self.closeEvent)
self.threadControl.start()
#QtCore.pyqtSlot(int)
def __updateProgressBar(self,val):
self.ui.progressBar.setValue(val)
self.setWindowTitle("Processing: {0}%".format(val))
def closeEvent(self, QCloseEvent=None):
if self.threadControl.isRunning():
self.threadControl.stop()
self.threadControl.wait()
if QCloseEvent: QtGui.QDialog.closeEvent(self, QCloseEvent)
else: self.close()
def getResults(self):
return self.threadControl.resultDict
class ThreadControl(QtCore.QThread):
stopFlag = 0
def __init__(self, thread=None, args=None):
super(ThreadControl, self).__init__()
self.args = args
self.thread = thread
self.resultDict = []
def run(self):
threads = {}
queue = multiprocessing.Queue()
for arg in self.args:
process = multiprocessing.Process(target=self.thread, args=(arg, queue))
process.start()
threads[process] = 1 ## ACTIVE thread
# WAIT TILL ALL PROCESSES COMPLETE
completedThreads = 0
total = len(threads.keys())
while completedThreads != total:
if self.stopFlag:
for t in threads.keys():
if threads[t] == 1:
t.terminate()
t.join()
threads[t] = 0
completedThreads += 1
else:
for t in threads.keys():
if self.stopFlag: break ## Process threads termination
elif threads[t] == 1 and not t.is_alive():
threads[t] = 0
completedThreads += 1
self.resultDict.append(queue.get())
self.emit(QtCore.SIGNAL('__updateProgressBar(int)'),(completedThreads*100)/total)
sleep(0.5) ## Prevent CPU from overloading
def stop(self):
self.stopFlag=1
a job is being created using instance of CreateJob
CreateJob(thread=P4SyncLibrary, args=P4Libraries).exec_()
The only solution I could give is to pass p4 object to calling thread as argument so that p4 server connection can disconnect when user wants to cancel the job.
def P4SyncLibrary(p4, args, que):
syncType = args[0]
view = args[1]
try:
p4.run_sync(view)
except P4Exception:
for e in p4.errors:
print "SyncError: - %s" %e
que.put(None)
class ThreadControl(QtCore.QThread):
...
def run(self):
threads = {}
queue = multiprocessing.Queue()
for arg in self.args:
connection = P4CreateConnection(disable_tmp_cleanup=True)
if connection.connected():
process = multiprocessing.Process(target=self.thread, args=(connection, arg, queue))
process.start()
threads[process] = {
'isAlive': True,
'connection': connection
}
# WAIT TILL ALL PROCESSES COMPLETE
completedThreads = 0
total = len(threads.keys())
while completedThreads != total:
if self._stop:
for t in threads.keys():
if threads[t]['isAlive']:
threads[t]['connection'].disconnect()
t.terminate()
t.join()
threads[t]['isAlive'] = False
completedThreads += 1
else:
for t in threads.keys():
if self._stop: break ## Process threads termination
elif threads[t]['isAlive'] and not t.is_alive():
threads[t]['connection'].disconnect()
threads[t]['isAlive'] = False
completedThreads += 1
self.results.append(queue.get())
self.emit(QtCore.SIGNAL('__updateProgressBar(int)'),(completedThreads*100)/total)
sleep(0.5) ## Prevent CPU from overloading

Threading does not thread in my Python code

First of all, I learned Python on my own from online tutorials and (mostly) learning by doing, so I might did some strange things in my code. :)
So, I am working om my first bigger project with Raspberry Pi, and for that I need codes running parallel. I wrote this part of my code for managing a simple D-pad:
Problematic code
import threading
import time
import pigpio
# input from the D-pad goes to these pins
BUTT_UP = 12
BUTT_LEFT = 16
BUTT_CENTER = 25
BUTT_RIGHT = 20
BUTT_DOWN = 21
class dpad_monitoring(threading.Thread):
'''thread for monitoring the D-Pad'''
def __init__(self, thread_ID, butt_up, butt_left, butt_center, butt_right, butt_down, res = 10.00):
threading.Thread.__init__(self)
self.running = True
self.thread_ID = thread_ID
# number of checks per sec
self.res = res
# key pins
self._pins = [butt_up, butt_left, butt_center, butt_right, butt_down]
#key monitor
self.last_pressed = 0
'''key numbers:
UP LEFT CENTER RIGHT DOWN
1 2 3 4 5 '''
# setting up GPIO
self.pi = pigpio.pi()
for i in range(0, len(self._pins)):
self.pi.set_mode(self._pins[i], pigpio.INPUT)
self.pi.set_pull_up_down(self._pins[i], pigpio.PUD_UP)
def stop(self):
'''stopping the thread cleanly'''
self.pi.stop()
self.running = False
def run(self):
'''checks which button is pressed as many times per sec as specified
in the res variable in init. If any of them is pressed, it suspends itself
until self.last_pressed is set to 0 again by main()'''
while self.running:
states = []
for i in range(0, len(self._pins)):
state = not self.pi.read(self._pins[i]) # pi.read returns 1L if the pin is high,
states.append(state) # what means the button is not pressed, 0L when pressed
for i in range(0, len(states)):
if states[i]:
self.last_pressed = i+1
'''UGLY AS SHIT but works now, will change to locks after the code works'''
if self.last_pressed != 0 :
while self.last_pressed != 0:
pass
else:
time.sleep(1/self.res)
print 'im groot' # for debugging
def main():
print 'ok' #debug
dpad = dpad_monitoring(0, BUTT_UP, BUTT_LEFT, BUTT_CENTER, BUTT_RIGHT, BUTT_DOWN)
dpad.run()
print 'okey' #debug again
while i != 3:
i = dpad.last_pressed
if i == 1:
print 'UP'
dpad.last_pressed = 0
if i == 2:
print 'LEFT'
dpad.last_pressed = 0
if i == 4:
print 'RIGHT'
dpad.last_pressed = 0
if i == 5:
print 'DOWN'
dpad.last_pressed = 0
print 'CENTER, stopping'
time.sleep(0.5)
dpad.stop()
if __name__ == '__main__':
main()
The problem is when I run the code, I get this:
ok
im groot
im groot
im groot
im groot
im groot
im groot
... (endless groot)
So it seems the code gets stuck at dpad.run(). Now AFAIK, the main point of threading is that the code continues after calling the run() function and can interact with the threading object, so I don't know what the he'll is going on. Could you, all experts, help me out please?
(Since the code after dpad.run() never ran, I don't know if it works, it may be all garbage. :P
The strange thing is that this simple test code works with no problem:
Cool code:
import threading
import time
class thread1(threading.Thread):
def __init__(self, threadID, start_from):
threading.Thread.__init__(self)
self.threadID = threadID
self.i = start_from
self.running = True
def run(self):
while self.running:
time.sleep(1)
self.i = self.i+1
def stop(self):
self.running = False
class thread2(threading.Thread):
def __init__(self, threadID, start_from):
threading.Thread.__init__(self)
self.threadID = threadID
self.i = start_from
self.running = True
def run(self):
while self.running:
time.sleep(0.5)
self.i = self.i+10
def stop(self):
self.running = False
thread1 = thread1(1, 10)
thread2 = thread2(2, 1)
thread1.start()
thread2.start()
for j in range(30):
print thread1.i, thread2.i
time.sleep(0.3)
thread1.stop()
thread2.stop()
The output is
10 1
10 1
10 11
10 11
11 21
11 31
11 31
12 41
12 41
12 51
13 61
13 61
13 71
13 71
14 81
14 91
14 91
15 101
15 101
15 111
16 121
16 121
16 131
16 131
17 141
17 151
17 151
18 161
18 161
18 171
------------------
(program exited with code: 0)
Press return to continue
So there I got the main thread plus the two other run parallel, unlikely to the previous code. What the he'll is going on?
Instead of
dpad.run()
do
dpad.start()
When calling run() directly you are skipping the whole threading functionality and using it as a regular class.

Why doesn't SIGVTALRM trigger inside time.sleep()?

I'm trying to use SIGVTALRM to snapshot profile my Python code, but it doesn't seem to be firing inside blocking operations like time.sleep() and socket operations.
Why is that? And is there any way to address that, so I can collect samples while I'm inside blocking operations?
I've also tried using ITIMER_PROF/SIGPROF and ITIMER_REAL/SIGALRM and both seem to produce similar results.
The code I'm testing with follows, and the output is something like:
$ python profiler-test.py
<module>(__main__:1);test_sampling_profiler(__main__:53): 1
<module>(__main__:1);test_sampling_profiler(__main__:53);busyloop(__main__:48): 1509
Note that the timesleep function isn't shown at all.
Test code:
import time
import signal
import collections
class SamplingProfiler(object):
def __init__(self, interval=0.001, logger=None):
self.interval = interval
self.running = False
self.counter = collections.Counter()
def _sample(self, signum, frame):
if not self.running:
return
stack = []
while frame is not None:
formatted_frame = "%s(%s:%s)" %(
frame.f_code.co_name,
frame.f_globals.get('__name__'),
frame.f_code.co_firstlineno,
)
stack.append(formatted_frame)
frame = frame.f_back
formatted_stack = ';'.join(reversed(stack))
self.counter[formatted_stack] += 1
signal.setitimer(signal.ITIMER_VIRTUAL, self.interval, 0)
def start(self):
if self.running:
return
signal.signal(signal.SIGVTALRM, self._sample)
signal.setitimer(signal.ITIMER_VIRTUAL, self.interval, 0)
self.running = True
def stop(self):
if not self.running:
return
self.running = False
signal.signal(signal.SIGVTALRM, signal.SIG_IGN)
def flush(self):
res = self.counter
self.counter = collections.Counter()
return res
def busyloop():
start = time.time()
while time.time() - start < 5:
pass
def timesleep():
time.sleep(5)
def test_sampling_profiler():
p = SamplingProfiler()
p.start()
busyloop()
timesleep()
p.stop()
print "\n".join("%s: %s" %x for x in sorted(p.flush().items()))
if __name__ == "__main__":
test_sampling_profiler()
Not sure about why time.sleep works that way (could it be using SIGALRM for itself to know when to resume?) but Popen.wait does not block signals so worst case you can call out to OS sleep.
Another approach is to use a separate thread to trigger the sampling:
import sys
import threading
import time
import collections
class SamplingProfiler(object):
def __init__(self, interval=0.001):
self.interval = interval
self.running = False
self.counter = collections.Counter()
self.thread = threading.Thread(target=self._sample)
def _sample(self):
while self.running:
next_wakeup_time = time.time() + self.interval
for thread_id, frame in sys._current_frames().items():
if thread_id == self.thread.ident:
continue
stack = []
while frame is not None:
formatted_frame = "%s(%s:%s)" % (
frame.f_code.co_name,
frame.f_globals.get('__name__'),
frame.f_code.co_firstlineno,
)
stack.append(formatted_frame)
frame = frame.f_back
formatted_stack = ';'.join(reversed(stack))
self.counter[formatted_stack] += 1
sleep_time = next_wakeup_time - time.time()
if sleep_time > 0:
time.sleep(sleep_time)
def start(self):
if self.running:
return
self.running = True
self.thread.start()
def stop(self):
if not self.running:
return
self.running = False
def flush(self):
res = self.counter
self.counter = collections.Counter()
return res
def busyloop():
start = time.time()
while time.time() - start < 5:
pass
def timesleep():
time.sleep(5)
def test_sampling_profiler():
p = SamplingProfiler()
p.start()
busyloop()
timesleep()
p.stop()
print "\n".join("%s: %s" %x for x in sorted(p.flush().items()))
if __name__ == "__main__":
test_sampling_profiler()
When doing it this way the result is:
$ python profiler-test.py
<module>(__main__:1);test_sampling_profiler(__main__:62);busyloop(__main__:54): 2875
<module>(__main__:1);test_sampling_profiler(__main__:62);start(__main__:37);start(threading:717);wait(threading:597);wait(threading:309): 1
<module>(__main__:1);test_sampling_profiler(__main__:62);timesleep(__main__:59): 4280
Still not totally fair, but better than no samples at all during sleep.
The absence of SIGVTALRM during a sleep() doesn't surprise me, since ITIMER_VIRTUAL "runs only when the process is executing."
(As an aside, CPython on non-Windows platforms implements time.sleep() in terms of select().)
With a plain SIGALRM, however, I expect a signal interruption and indeed I observe one:
<module>(__main__:1);test_sampling_profiler(__main__:62);busyloop(__main__:54): 4914
<module>(__main__:1);test_sampling_profiler(__main__:62);timesleep(__main__:59): 1
I changed the code somewhat, but you get the idea:
class SamplingProfiler(object):
TimerSigs = {
signal.ITIMER_PROF : signal.SIGPROF,
signal.ITIMER_REAL : signal.SIGALRM,
signal.ITIMER_VIRTUAL : signal.SIGVTALRM,
}
def __init__(self, interval=0.001, timer = signal.ITIMER_REAL): # CHANGE
self.interval = interval
self.running = False
self.counter = collections.Counter()
self.timer = timer # CHANGE
self.signal = self.TimerSigs[timer] # CHANGE
....

Categories

Resources