Python - How can I pause at thread by other thread? - python

I am confused for showing waiting message.
For my program,
Step 1, it keep looping to show the waiting message.
Step 2, If the trigger_file exist, it stop the waiting message and run main_process()
Step 3, After finishing main_process, it show the waiting message again.
I tried to use variable waiting to stop the waiting message but it is not working
I am not sure how to use async/await function and multithreadubg for this case.
Thank you
import os
import time
import threading
waiting = True
trigger_file = r'D:\Desktop\OK'
def main_process():
print('1')
time.sleep(5)
print('2')
time.sleep(5)
print('3')
time.sleep(5)
print('4')
time.sleep(5)
print('5')
def print_waiting(): # animation of waiting
while(waiting):
for loading_symbol in ['|','/','-','\\','|','/','-','\\','|']:
print('\r[INFO] Waiting for trigger... '+loading_symbol,end="")
time.sleep(0.2)
def triggerListener(): # trigger a function if the file exist
while(True):
if os.path.exists(trigger_file):
global waiting
waiting=False
print('\n[INFO] main process start')
main_process()
waiting=True
if __name__ == "__main__":
# creating thread
t1 = threading.Thread(target=print_waiting)
t2 = threading.Thread(target=triggerListener)
# starting thread 1
t1.start()
# starting thread 2
t2.start()
# wait until thread 1 is completely executed
t1.join()
# wait until thread 2 is completely executed
t2.join()
# both threads completely executed
print("Done!")
Expected Output:
[INFO] Waiting for trigger... -
[INFO] main process start
1
2
3
4
5
[INFO] Waiting for trigger... -

Finally, I use threading.Lock to solve the problem. acquire the lock and release the lock after finishing the function.
class Print_waiting(threading.Thread):
def __init__(self,lock):
threading.Thread.__init__(self)
self.running = True
self.lock = lock
def run(self): # animation of waiting
while self.running:
for loading_symbol in ['|','/','-','\\','|','/','-','\\','|']:
self.lock.acquire()
print('\r[INFO] Waiting for trigger... '+loading_symbol,end="")
self.lock.release()
time.sleep(0.2)
def stop(self):
self.running = False
class TriggerListener(threading.Thread):
def __init__(self,lock):
threading.Thread.__init__(self)
self.running = True
self.lock = lock # for mutex
def run(self): # trigger a function if the file exist
while(self.running):
if os.path.exists(trigger_file):
self.lock.acquire()
print('\n[INFO] main process start')
Program.main()
self.lock.release()
def stop(self):
self.running = False
if __name__ == "__main__":
lock = threading.Lock()
waiting_anime = Print_waiting(lock)
Trigger_RPA = TriggerListener(lock)
Trigger_RPA.start()
waiting_anime.start()
Trigger_RPA.join()
waiting_anime.join()

Related

How to implement right threading execution order in python?

I recently started studying threads in python, and I ran into this problem: I need the "two" function to finish executing after executing the function one in the thread, but the join method does not work, apparently because of the while true loop in the third function. I tried using queue, but it didn't work either.
the code itself:
from threading import Thread,Event
def one():
event.set()
thr.join()
for i in range(3):
print('some print')
time.sleep(1)
def two():
t = Thread(target=one)
t.start()
#with t.join() here the program does not work at all, same thing with event.set()
print('func two finished')
def three(callback, event):
c = 0
while True:
c += 1
time.sleep(1)
print('func 3 is working')
if c == 5:
two()
if event.is_set():
callback(c)
print('func 3 is stopped')
break
def callback(t):
print('callback ',t)
def thread(callback):
global event, thr
event = Event()
thr = Thread(target=three, args=(callback, event,))
thr.start()
thr.join()
thread(callback)
current output:
func 3 is working
func 3 is working
func 3 is working
func 3 is working
func 3 is working
func two finished
callback 5
func 3 is stopped
some print
some print
some print
expected:
func 3 is working
func 3 is working
func 3 is working
func 3 is working
func 3 is working
callback 5
func 3 is stopped
some print
some print
some print
func two finished
After running the code I understand by "not working" you mean the program finished before all prints are printed.
The reason is that you join the thr thread twice, one of them by the main thread.
The sequence of return of join is not guaranteed.
When the main thread finished, all threads created by the program also finish, so they terminated no matter what.
Same thing when setting the event, it makes the main thread exit and kill the remaining threads.
To do what you intend, you should wait for the one thread in the main thread.
from threading import Thread,Event
def one():
event.set()
thr.join()
for i in range(3):
print('some print')
time.sleep(1)
def two():
t = Thread(target=one)
t.start()
#with t.join() here the program does not work at all, same thing with event.set()
print('func two finished')
def three(callback, event):
c = 0
while True:
c += 1
time.sleep(1)
print('func 3 is working')
if c == 5:
two()
if event.is_set():
callback(c)
print('func 3 is stopped')
break
def callback(t):
print('callback ',t)
def thread(callback):
global event, thr
event = Event()
thr = Thread(target=three, args=(callback, event,))
thr.start()
thr.join()
thread(callback)
Note that as other said, this might be nice for learning purpesses but has to be modified if you want to actually use it in real life code.
Your program creates a deadlock if you un-comment that t.join() call in function two;
The thr thread cannot finish until after the t thread has finished because the thr thread calls t.join() in function two.
The t thread cannot finish until after the thr thread has finished because the t thread calls thr.join() in function one.
Neither thread can finish until after the other thread finishes. Therefore, neither thread can ever finish.
Why does one join the thr thread?
def one():
event.set()
thr.join() # What were you trying to do here?
for i in range(3):
print('some print')
time.sleep(1)
Your program will give the output you wanted if you comment out that join call, and uncomment the t.join() call in function two.
The sequence you need is obtained by a small permutation of commands. But it is not clear why you need threads if everything is done sequentially.
from threading import Thread, Event
import time
def one(event):
event.set()
for i in range(3):
print('some print')
time.sleep(1)
def two(event):
t = Thread(target=one, args=(event,))
t.start()
t.join()
print('func two finished')
def three(event):
c = 0
while True:
c += 1
time.sleep(1)
print('func 3 is working')
if c == 5:
callback(c)
print('func 3 is stopped')
two(event)
break
def callback(t):
print('callback ', t)
def thread():
event = Event()
thr = Thread(target=three, args=(event,))
thr.start()
thread()
--------------------------------
func 3 is working
func 3 is working
func 3 is working
func 3 is working
func 3 is working
callback 5
func 3 is stopped
some print
some print
some print
func two finished
This is a comment, not an answer.
This makes no sense:
t = Thread(target=one, args=(event,))
t.start()
t.join()
There's no point in starting a new thread if you aren't going to do something concurrently with the thread. Either do this,
t = Thread(target=one, args=(event,))
t.start()
do_something_else_concurrently_with_thread_t(...)
t.join()
Or just just call the function instead of creating a new thread to call it:
one(event)
If you don't want concurrency, then you don't need threads.

Stop Gracefully Tornado ioLoop

I have this async worker functionality using tornado's ioloop.
I'm trying to shutdown the loop gracefully on Ctrl+C but getting the following error
tornado.ioloop.TimeoutError: Operation timed out after None seconds
I know I can catch it but I do want to finish the process in a graceful way, how can I achieve that?
#!/usr/bin/env python
import time
import signal
import random
from tornado import gen, ioloop, queues
concurrency = 10
def sig_exit(signum, frame):
ioloop.IOLoop.current().add_callback_from_signal(shutdown)
def shutdown():
print('Will shutdown in few seconds ...')
io_loop = ioloop.IOLoop.current()
deadline = time.time() + 3
def stop_loop():
now = time.time()
if now < deadline and (io_loop._callbacks or io_loop._timeouts):
io_loop.add_timeout(now + 1, stop_loop)
else:
io_loop.stop()
print('Shutdown')
stop_loop()
#gen.coroutine
def main():
q = queues.Queue()
q.put(1)
#gen.coroutine
def do_stuff():
print("doing stuff")
yield gen.Task(ioloop.IOLoop.instance().add_timeout, time.time() + random.randint(1, 5))
print("done doing stuff")
#gen.coroutine
def worker():
while True:
yield do_stuff()
for _ in range(concurrency):
worker()
yield q.join()
if __name__ == '__main__':
signal.signal(signal.SIGTERM, sig_exit)
signal.signal(signal.SIGINT, sig_exit)
io_loop = ioloop.IOLoop.instance()
io_loop.run_sync(main)
If you're using run_sync, you can no longer call IOLoop.stop - run_sync is now responsible for that. So if you want to make this shutdown "graceful" (instead of just raising a KeyboardInterrupt at the point where you now call stop() and exiting with a stack trace), you need to change the coroutine passed to run_sync so it exits.
One possible solution is a tornado.locks.Event:
# Create a global Event
shutdown_event = tornado.locks.Event()
def shutdown():
# Same as in the question, but instead of `io_loop.stop()`:
shutdown_event.set()
#gen.coroutine
def main():
# Use a WaitIterator to exit when either the queue
# is done or shutdown is triggered.
wait_iter = gen.WaitIterator(q.join(), shutdown_event.wait())
# In this case we just want to wait for the first one; we don't
# need to actually iterate over the WaitIterator.
yield wait_iter.next()
async def main():
tornado.options.parse_command_line()
...
app = Application(db)
app.listen(options.port)
shutdown_event = tornado.locks.Event()
def shutdown( signum, frame ):
print("shutdown database !!!!")
db.close()
shutdown_event.set()
signal.signal(signal.SIGTERM, shutdown)
signal.signal(signal.SIGINT, shutdown)
await shutdown_event.wait()
print("\n\nshutdown -h now")
if __name__ == "__main__":
tornado.ioloop.IOLoop.current().run_sync(main)

Python threads exit with ctrl-c in Python

I am having the Python Multi-threaded program as below. If I press ctrl+c within 5 seconds (approx), It is going inside the KeyboardInterrupt exception.
Running the code longer than 15 seconds failed to respond to ctrl+c. If I press ctrl+c after 15 seconds, It is not working. It is not throwing KeyboardInterrupt exception. What could be the reason ? I tested this on Linux.
#!/usr/bin/python
import os, sys, threading, time
class Worker(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
# A flag to notify the thread that it should finish up and exit
self.kill_received = False
def run(self):
while not self.kill_received:
self.do_something()
def do_something(self):
[i*i for i in range(10000)]
time.sleep(1)
def main(args):
threads = []
for i in range(10):
t = Worker()
threads.append(t)
t.start()
while len(threads) > 0:
try:
# Join all threads using a timeout so it doesn't block
# Filter out threads which have been joined or are None
threads = [t.join(1) for t in threads if t is not None and t.isAlive()]
except KeyboardInterrupt:
print "Ctrl-c received! Sending kill to threads..."
for t in threads:
t.kill_received = True
if __name__ == '__main__':
main(sys.argv)
After the first execution of
threads = [t.join(1) for t in threads if t is not None and t.isAlive()]
your variable threads contains
[None, None, None, None, None, None, None, None, None, None]
after the second execution, the same variable threads contains:
[]
At this point, len(threads) > 0 is False and you get out of the while loop. Your script is still running since you have 10 threads still active, but since you're not anymore in your try / except block (to catch KeyboardInterrupt), you can't stop using Ctrl + C
Add some prints to your script to see what I described:
#!/usr/bin/python
import os, sys, threading, time
class Worker(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
# A flag to notify the thread that it should finish up and exit
self.kill_received = False
def run(self):
while not self.kill_received:
self.do_something()
def do_something(self):
[i*i for i in range(10000)]
time.sleep(1)
def main(args):
threads = []
for i in range(10):
t = Worker()
threads.append(t)
t.start()
print('thread {} started'.format(i))
while len(threads) > 0:
print('Before joining')
try:
# Join all threads using a timeout so it doesn't block
# Filter out threads which have been joined or are None
threads = [t.join(1) for t in threads if t is not None and t.isAlive()]
print('After join() on threads: threads={}'.format(threads))
except KeyboardInterrupt:
print("Ctrl-c received! Sending kill to threads...")
for t in threads:
t.kill_received = True
print('main() execution is now finished...')
if __name__ == '__main__':
main(sys.argv)
And the result:
$ python thread_test.py
thread 0 started
thread 1 started
thread 2 started
thread 3 started
thread 4 started
thread 5 started
thread 6 started
thread 7 started
thread 8 started
thread 9 started
Before joining
After join() on threads: threads=[None, None, None, None, None, None, None, None, None, None]
Before joining
After join() on threads: threads=[]
main() execution is now finished...
Actually, Ctrl + C doesn't stop to work after 15 seconds, but after 10 or 11 seconds. This is the time needed to create and start the 10 threads (less than a second) and to execute join(1) on each thread (about 10 seconds).
Hint from the doc:
As join() always returns None, you must call isAlive() after join() to decide whether a timeout happened – if the thread is still alive, the join() call timed out.
to follow up on the poster above, isAlive() got renamed to is_alive()
tried on Python 3.9.6
full code:
#!/usr/bin/python
import os, sys, threading, time
class Worker(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
# A flag to notify the thread that it should finish up and exit
self.kill_received = False
def run(self):
while not self.kill_received:
self.do_something()
def do_something(self):
[i*i for i in range(10000)]
time.sleep(1)
def main(args):
threads = []
for i in range(10):
t = Worker()
threads.append(t)
t.start()
print('thread {} started'.format(i))
while len(threads) > 0:
print('Before joining')
try:
# Join all threads using a timeout so it doesn't block
# Filter out threads which have been joined or are None
threads = [t.join(1) for t in threads if t is not None and t.is_alive()]
print('After join() on threads: threads={}'.format(threads))
except KeyboardInterrupt:
print("Ctrl-c received! Sending kill to threads...")
for t in threads:
t.kill_received = True
print('main() execution is now finished...')
if __name__ == '__main__':
main(sys.argv)

Process vs. Thread with regards to using Queue()/deque() and class variable for communication and "poison pill"

I would like to create either a Thread or a Process which runs forever in a While True loop.
I need to send and receive data to the worker in the form for queues, either a multiprocessing.Queue() or a collections.deque(). I prefer to use collections.deque() as it is significantly faster.
I also need to be able to kill the worker eventually (as it runs in a while True loop. Here is some test code I've put together to try and understand the differences between Threads, Processes, Queues, and deque ..
import time
from multiprocessing import Process, Queue
from threading import Thread
from collections import deque
class ThreadingTest(Thread):
def __init__(self, q):
super(ThreadingTest, self).__init__()
self.q = q
self.toRun = False
def run(self):
print("Started Thread")
self.toRun = True
while self.toRun:
if type(self.q) == type(deque()):
if self.q:
i = self.q.popleft()
print("Thread deque: " + str(i))
elif type(self.q) == type(Queue()):
if not self.q.empty():
i = self.q.get_nowait()
print("Thread Queue: " + str(i))
def stop(self):
print("Trying to stop Thread")
self.toRun = False
while self.isAlive():
time.sleep(0.1)
print("Stopped Thread")
class ProcessTest(Process):
def __init__(self, q):
super(ProcessTest, self).__init__()
self.q = q
self.toRun = False
self.ctr = 0
def run(self):
print("Started Process")
self.toRun = True
while self.toRun:
if type(self.q) == type(deque()):
if self.q:
i = self.q.popleft()
print("Process deque: " + str(i))
elif type(self.q) == type(Queue()):
if not self.q.empty():
i = self.q.get_nowait()
print("Process Queue: " + str(i))
def stop(self):
print("Trying to stop Process")
self.toRun = False
while self.is_alive():
time.sleep(0.1)
print("Stopped Process")
if __name__ == '__main__':
q = Queue()
t1 = ProcessTest(q)
t1.start()
for i in range(10):
if type(q) == type(deque()):
q.append(i)
elif type(q) == type(Queue()):
q.put_nowait(i)
time.sleep(1)
t1.stop()
t1.join()
if type(q) == type(deque()):
print(q)
elif type(q) == type(Queue()):
while q.qsize() > 0:
print(str(q.get_nowait()))
As you can see, t1 can either be ThreadingTest, or ProcessTest. Also, the queue passed to it can either be a multiprocessing.Queue or a collections.deque.
ThreadingTest works with a Queue or deque(). It also kills run() properly when the stop() method is called.
Started Thread
Thread deque: 0
Thread deque: 1
Thread deque: 2
Thread deque: 3
Thread deque: 4
Thread deque: 5
Thread deque: 6
Thread deque: 7
Thread deque: 8
Thread deque: 9
Trying to stop Thread
Stopped Thread
deque([])
ProcessTest is only able to read from the queue if it is of type multiprocessing.Queue. It doesn't work with collections.deque. Furthermore, I am unable to kill the process using stop().
Process Queue: 0
Process Queue: 1
Process Queue: 2
Process Queue: 3
Process Queue: 4
Process Queue: 5
Process Queue: 6
Process Queue: 7
Process Queue: 8
Process Queue: 9
Trying to stop Process
I'm trying to figure out why? Also, what would be the best way to use deque with a process? And, how would I go about killing the process using some sort of stop() method.
You can't use a collections.deque to pass data between two multiprocessing.Process instances, because collections.deque is not process-aware. multiprocessing.Queue writes its contents to a multiprocessing.Pipe internally, which means that data in it can be enqueued in once process and retrieved in another. collections.deque doesn't have that kind of plumbing, so it won't work. When you write to the deque in one process, the deque instance in the other process won't be affected at all; they're completely separate instances.
A similar issue is happening to your stop() method. You're changing the value of toRun in the main process, but this won't affect the child at all. They're completely separate instances. The best way to end the child would be to send some sentinel to the Queue. When you get the sentinel in the child, break out of the infinite loop:
def run(self):
print("Started Process")
self.toRun = True
while self.toRun:
if type(self.q) == type(deque()):
if self.q:
i = self.q.popleft()
print("Process deque: " + str(i))
elif type(self.q) == type(Queue()):
if not self.q.empty():
i = self.q.get_nowait()
if i is None:
break # Got sentinel, so break
print("Process Queue: " + str(i))
def stop(self):
print("Trying to stop Process")
self.q.put(None) # Send sentinel
while self.is_alive():
time.sleep(0.1)
print("Stopped Process")
Edit:
If you actually do need deque semantics between two process, you can use a custom multiprocessing.Manager() to create a shared deque in a Manager process, and each of your Process instances will get a Proxy to it:
import time
from multiprocessing import Process
from multiprocessing.managers import SyncManager
from collections import deque
SyncManager.register('deque', deque)
def Manager():
m = SyncManager()
m.start()
return m
class ProcessTest(Process):
def __init__(self, q):
super(ProcessTest, self).__init__()
self.q = q
self.ctr = 0
def run(self):
print("Started Process")
self.toRun = True
while self.toRun:
if self.q._getvalue():
i = self.q.popleft()
if i is None:
break
print("Process deque: " + str(i))
def stop(self):
print("Trying to stop Process")
self.q.append(None)
while self.is_alive():
time.sleep(0.1)
print("Stopped Process")
if __name__ == '__main__':
m = Manager()
q = m.deque()
t1 = ProcessTest(q)
t1.start()
for i in range(10):
q.append(i)
time.sleep(1)
t1.stop()
t1.join()
print(q)
Note that this probably isn't going to be faster than a multiprocessing.Queue, though, since there's an IPC cost for every time you access the deque. It's also a much less natural data structure for passing messages the way you are.

python can't start a new thread

I am building a multi threading application.
I have setup a threadPool.
[ A Queue of size N and N Workers that get data from the queue]
When all tasks are done I use
tasks.join()
where tasks is the queue .
The application seems to run smoothly until suddently at some point (after 20 minutes in example) it terminates with the error
thread.error: can't start new thread
Any ideas?
Edit: The threads are daemon Threads and the code is like:
while True:
t0 = time.time()
keyword_statuses = DBSession.query(KeywordStatus).filter(KeywordStatus.status==0).options(joinedload(KeywordStatus.keyword)).with_lockmode("update").limit(100)
if keyword_statuses.count() == 0:
DBSession.commit()
break
for kw_status in keyword_statuses:
kw_status.status = 1
DBSession.commit()
t0 = time.time()
w = SWorker(threads_no=32, network_server='http://192.168.1.242:8180/', keywords=keyword_statuses, cities=cities, saver=MySqlRawSave(DBSession), loglevel='debug')
w.work()
print 'finished'
When the daemon threads are killed?
When the application finishes or when the work() finishes?
Look at the thread pool and the worker (it's from a recipe )
from Queue import Queue
from threading import Thread, Event, current_thread
import time
event = Event()
class Worker(Thread):
"""Thread executing tasks from a given tasks queue"""
def __init__(self, tasks):
Thread.__init__(self)
self.tasks = tasks
self.daemon = True
self.start()
def run(self):
'''Start processing tasks from the queue'''
while True:
event.wait()
#time.sleep(0.1)
try:
func, args, callback = self.tasks.get()
except Exception, e:
print str(e)
return
else:
if callback is None:
func(args)
else:
callback(func(args))
self.tasks.task_done()
class ThreadPool:
"""Pool of threads consuming tasks from a queue"""
def __init__(self, num_threads):
self.tasks = Queue(num_threads)
for _ in range(num_threads): Worker(self.tasks)
def add_task(self, func, args=None, callback=None):
''''Add a task to the queue'''
self.tasks.put((func, args, callback))
def wait_completion(self):
'''Wait for completion of all the tasks in the queue'''
self.tasks.join()
def broadcast_block_event(self):
'''blocks running threads'''
event.clear()
def broadcast_unblock_event(self):
'''unblocks running threads'''
event.set()
def get_event(self):
'''returns the event object'''
return event
ALSo maybe the problem it's because I create SWorker objects in a loop?
What happens with the old SWorker (garbage collection ?) ?
There is still not enough code for localize the problem, but I'm sure that this is because you don't utilize the threads and start too much of them. Did you see canonical example from Queue python documentation http://docs.python.org/library/queue.html (bottom of the page)?
I can reproduce your problem with the following code:
import threading
import Queue
q = Queue.Queue()
def worker():
item = q.get(block=True) # sleeps forever for now
do_work(item)
q.task_done()
# create infinite number of workers threads and fails
# after some time with "error: can't start new thread"
while True:
t = threading.Thread(target=worker)
t.start()
q.join() # newer reached this
Instead you must create the poll of threads with known number of threads and put your data to queue like:
q = Queue()
def worker():
while True:
item = q.get()
do_work(item)
q.task_done()
for i in range(num_worker_threads):
t = Thread(target=worker)
t.daemon = True
t.start()
for item in source():
q.put(item)
q.join() # block until all tasks are done
UPD: In case you need to stop some thread, you can add a flag to it or send a special mark means "stop" for break while loop:
class Worker(Thread):
break_msg = object() # just uniq mark sign
def __init__(self):
self.continue = True
def run():
while self.continue: # can stop and destroy thread, (var 1)
msg = queue.get(block=True)
if msg == self.break_msg:
return # will stop and destroy thread (var 2)
do_work()
queue.task_done()
workers = [Worker() for _ in xrange(num_workers)]
for w in workers:
w.start()
for task in tasks:
queue.put(task)
for _ in xrange(num_workers):
queue.put(Worker.break_msg) # stop thread after all tasks done. Need as many messages as many threads you have
OR
queue.join() # wait until all tasks done
for w in workers:
w.continue = False
w.put(None)

Categories

Resources