I don't know why I'm having such a problem with this, basically, I want to have a Queue that is constantly running during the program called "Worker" this then works, however, every 10 seconds or so.. Another method called "Process" comes in and processes the data. Let's assume the following, data is captured every 10 seconds.. (0, 1, 2, 3, ..... n) and then the "Proces" function receives this, processes the data, ends, and then the "Worker" goes back to work and does their job until the program has ended.
I have the following code:
import multiprocessing as mp
import time
DELAY_SIZE = 10
def Worker(q):
print "I'm working..."
def Process(q):
print "I'm processing.."
queue = mp.Queue(maxsize=DELAY_SIZE)
p = mp.Process(target=Worker, args=(queue,))
p.start()
while True:
d = queue.get()
time.sleep(10)
Process()
In this example, it would look like the following:
I'm working...
I'm working...
I'm working...
...
...
...
I'm working...
I'm processing...
I'm processing...
I'm processing...
...
...
I'm working..
I'm working..
Any ideas?
Here is an alternative way using threads:
import threading
import Queue
import time
class Worker(threading.Thread):
def __init__(self, q):
threading.Thread.__init__(self)
self._q = q
def run(self):
# here, worker does its job
# results are pushed to the shared queue
while True:
print 'I am working'
time.sleep(1)
result = time.time() # just an example
self._q.put(result)
def process(q):
while True:
if q.empty():
time.sleep(10)
print 'I am processing'
worker_result = q.get()
# do whatever you want with the result...
print " ", worker_result
if __name__ == '__main__':
shared_queue = Queue.Queue()
worker = Worker(shared_queue)
worker.start()
process(shared_queue)
Related
This is my current code, the main issue is I use Semphore to control the output of two process, but it seems like the Semphore does not change globaly, i.e. when process "producer" change the Semphore to 2 the process "consumer" still think the Semphore is zero , which cause it to wait forever.
from multiprocessing import Process, Semaphore, Queue
import time
from random import random
buffer = Queue(10)
empty = Semaphore(2)
full = Semaphore(0)
class Consumer(Process):
def run(self):
global buffer, empty, full
while True:
time.sleep(4)
print(full)
full.acquire()
buffer.get()
print('Consumer get')
time.sleep(1)
empty.release()
class Producer(Process):
def run(self):
global buffer, empty, full
while True:
empty.acquire()
print ('Producer put ')
time.sleep(1)
full.release()
buffer.put(1)
print(full)
if __name__ == '__main__':
p = Producer()
c = Consumer()
p.daemon = c.daemon = True
p.start()
c.start()
p.join()
c.join()
print ('Ended!')
and the output is
Producer put
<Semaphore(value=1)>
Producer put
<Semaphore(value=2)>
<Semaphore(value=0)>
I don't know what should I do to let "consumer" process detect the change.
Your two processes have both their own copy of both the semaphores, because each process runs the whole code in the script when it is instantiated.
You must move the semaphores and queue definitions inside the if __name__ == '__main__': and pass the instances of the semaphores to the Producer and Consumer constructors so that they both use the same instance of the three objects.
from multiprocessing import Process, Semaphore, Lock, Queue
import time
from random import random
class Consumer(Process):
def __init__(self, empty, full, buffer):
super(Consumer, self).__init__()
self.empty = empty
self.full = full
self.buffer = buffer
def run(self):
while True:
time.sleep(4)
print("Consumer: {}".format(self.full), flush=True)
print("Consumer: buf {}".format(self.buffer.qsize()), flush=True)
self.full.acquire()
self.buffer.get()
print('Consumer get', flush=True)
time.sleep(1)
self.empty.release()
class Producer(Process):
def __init__(self, empty, full, buffer):
super(Process, self).__init__()
self.empty = empty
self.full = full
self.buffer = buffer
def run(self):
while True:
self.empty.acquire()
print ('Producer put ', flush=True)
self.buffer.put('a') #<<<<<<<<<<< you forgot this in your code. If the queue is empty, get() will block on the consumer
time.sleep(1)
self.full.release()
print(self.full, flush=True)
if __name__ == '__main__':
buffer = Queue(10)
empty = Semaphore(2)
full = Semaphore(0)
p = Producer(empty, full, buffer)
c = Consumer(empty, full, buffer)
p.daemon = c.daemon = True
p.start()
c.start()
p.join()
c.join()
print ('Ended!')
My multi-threading script raising this error:
thread.error : can't start new thread
when it reached 460 threads:
threading.active_count() = 460
I assume the old threads keeps stack up, since the script didn't kill them. This my code:
import threading
import Queue
import time
import os
import csv
def main(worker):
#Do Work
print worker
return
def threader():
while True:
worker = q.get()
main(worker)
q.task_done()
def main_threader(workers):
global q
global city
q = Queue.Queue()
for x in range(20):
t = threading.Thread(target=threader)
t.daemon = True
print "\n\nthreading.active_count() = " + str(threading.active_count()) + "\n\n"
t.start()
for worker in workers:
q.put(worker)
q.join()
How do I kill the old threads when their job is done? (Is the function returning not enough?)
Python threading API doesn't have any function to kill a thread (nothing like threading.kill(PID)).
That said, you should code some thread-stopping algorithm yourself. For example, your thread should somehow decide that is should terminate (e.g. check some global variable or check whether some signal has been sent) and simply return.
For example:
import threading
nthreads = 7
you_should_stop = [0 for _ in range(nthreads)]
def Athread(number):
while True:
if you_should_stop[number]:
print "Thread {} stopping...".format(number)
return
print "Running..."
for x in range(nthreads):
threading.Thread(target = Athread, args = (x, )).start()
for x in range(nthreads):
you_should_stop[x] = 1
print "\nStopped all threads!"
I want to do a infinite loop function.
Here is my code
def do_request():
# my code here
print(result)
while True:
do_request()
When use while True to do this, it's a little slow, so I want to use a thread pool to concurrently execute the function do_request(). How to do this ?
Just like use ab (Apache Bench) to test HTTP server.
Finally, I've solved this problem. I use a variable to limit the thread number.
Here is my final code, solved my problem.
import threading
import time
thread_num = 0
lock = threading.Lock()
def do_request():
global thread_num
# -------------
# my code here
# -------------
with lock:
thread_num -= 1
while True:
if thread_num <= 50:
with lock:
thread_num += 1
t = threading.Thread(target=do_request)
t.start()
else:
time.sleep(0.01)
Thanks for all replies.
You can use threading in Python to implement this.
Can be something similar to this (when using two extra threads only):
import threading
# define threads
task1 = threading.Thread(target = do_request)
task2 = threading.Thread(target = do_request)
# start both threads
task1.start()
task2.start()
# wait for threads to complete
task1.join()
task2.join()
Basically, you start as many threads as you need (make sure you don't get too many, so your system can handle it), then you .join() them to wait for tasks to complete.
Or you can get fancier with multiprocessing Python module.
Try the following code:
import multiprocessing as mp
import time
def do_request():
while(True):
print('I\'m making requests')
time.sleep(0.5)
p = mp.Process(target=do_request)
p.start()
for ii in range(10):
print 'I\'m also doing other things though'
time.sleep(0.7)
print 'Now it is time to kill the service thread'
p.terminate()
The main thread stars a service thread that does the request and goes on until it has to, and then it finishes up the service thread.
Maybe you can use the concurrent.futures.ThreadPoolExecutor
from concurrent.futures import ThreadPoolExecutor
import time
def wait_on_b(hello):
time.sleep(1)
print(hello) # b will never complete because it is waiting on a.
return 5
def wait_on_a():
time.sleep(1)
print(a.result()) # a will never complete because it is waiting on b.
return 6
executor = ThreadPoolExecutor(max_workers=2)
a = executor.submit(wait_on_b, 3)
b = executor.submit(wait_on_a)
How about this?
from threading import Thread, Event
class WorkerThread(Thread):
def __init__(self, logger, func):
Thread.__init__(self)
self.stop_event = Event()
self.logger = logger
self.func = func
def run(self):
self.logger("Going to start the infinite loop...")
#Your code
self.func()
concur_task = WorkerThread(logger, func = do_request)
concur_task.start()
To end this thread...
concur_task.stop_event.set()
concur_task.join(10) #or any value you like
I have a simple example script constructed that defines three separate processes using multiprocessing in python. My objective is to have one parent thread that spawns two smaller threads that will collect and process data.
Currently, my implementation looks like this:
from Queue import Queue,Empty
from multiprocessing import Process
import time
import hashlib
class FillQueue(Process):
def __init__(self,q):
Process.__init__(self)
self.q = q
def run(self):
i = 0
while i is not 5:
print 'putting'
self.q.put('foo')
i+=1
self.q.put('|STOP|')
class ConsumeQueue(Process):
def __init__(self,q):
Process.__init__(self)
self.q = q
def run(self):
print 'Consume'
while True:
try:
value = self.q.get(False)
print value
if value == '|STOP|':
print 'done'
break;
except Empty:
print 'Nothing to process atm'
class Ripper(Process):
q = Queue()
def __init__(self):
self.fq = FillQueue(self.q)
self.cq = ConsumeQueue(self.q)
self.fq.daemon = True
self.cq.daemon = True
def run(self):
try:
self.fq.start()
self.cq.start()
except KeyboardInterrupt:
print 'exit'
if __name__ == '__main__':
r = Ripper()
r.start()
As it runs presently, the output from the script on CLI looks like this:
putting
putting
putting
putting
putting
Consume
foo
foo
foo
foo
foo
|STOP|
done
Obviously, the way I am starting my two threads is blocking, since the consumer doesn't even begin to process the items in the queue until the filler finishes adding items.
How should I rewrite this to make both threads begin immediately and not block, so the consumer will simply pass to the Empty except block while there is no work to process, but will exit completely when it receives the stop message?
EDIT: typo, had the start and run methods mixed up
You seem to be starting multiple processes using multiprocessing.Process.
However, you are using Queue.Queue which is only threadsafe, and not designed to be used by multiple processes.
shevek's answer is valid as well, but as a start, you should replace Queue.Queue with multiprocessing.Queue.
try this:
from Queue import Empty
from multiprocessing import Process, Queue
import time
import hashlib
class FillQueue(object):
def __init__(self, q):
self.q = q
def run(self):
i = 0
while i < 5:
print 'putting'
self.q.put('foo %d' % i )
i+=1
time.sleep(.5)
self.q.put('|STOP|')
class ConsumeQueue(object):
def __init__(self, q):
self.q = q
def run(self):
while True:
try:
value = self.q.get(False)
print value
if value == '|STOP|':
print 'done'
break;
except Empty:
print 'Nothing to process atm'
time.sleep(.2)
if __name__ == '__main__':
q = Queue()
f = FillQueue(q)
c = ConsumeQueue(q)
p1 = Process(target=f.run)
p1.start()
p2 = Process(target=c.run)
p2.start()
p1.join()
p2.join()
I think your program works fine. The CPU processes only one thing at a time, for a short time. However, the time required to put all your stuff in the queue is very short. So there is no reason that the filler cannot do this in one time slice.
If you add some delays in the filler, I think you should see that it actually works as you expect.
I have a script and I want one function to run at the same time as the other.
The example code I have looked at:
import threading
def MyThread (threading.thread):
# doing something........
def MyThread2 (threading.thread):
# doing something........
MyThread().start()
MyThread2().start()
I am having trouble getting this working. I would prefer to get this going using a threaded function rather than a class.
This is the working script:
from threading import Thread
class myClass():
def help(self):
os.system('./ssh.py')
def nope(self):
a = [1,2,3,4,5,6,67,78]
for i in a:
print i
sleep(1)
if __name__ == "__main__":
Yep = myClass()
thread = Thread(target = Yep.help)
thread2 = Thread(target = Yep.nope)
thread.start()
thread2.start()
thread.join()
print 'Finished'
You don't need to use a subclass of Thread to make this work - take a look at the simple example I'm posting below to see how:
from threading import Thread
from time import sleep
def threaded_function(arg):
for i in range(arg):
print("running")
sleep(1)
if __name__ == "__main__":
thread = Thread(target = threaded_function, args = (10, ))
thread.start()
thread.join()
print("thread finished...exiting")
Here I show how to use the threading module to create a thread which invokes a normal function as its target. You can see how I can pass whatever arguments I need to it in the thread constructor.
There are a few problems with your code:
def MyThread ( threading.thread ):
You can't subclass with a function; only with a class
If you were going to use a subclass you'd want threading.Thread, not threading.thread
If you really want to do this with only functions, you have two options:
With threading:
import threading
def MyThread1():
pass
def MyThread2():
pass
t1 = threading.Thread(target=MyThread1, args=[])
t2 = threading.Thread(target=MyThread2, args=[])
t1.start()
t2.start()
With thread:
import thread
def MyThread1():
pass
def MyThread2():
pass
thread.start_new_thread(MyThread1, ())
thread.start_new_thread(MyThread2, ())
Doc for thread.start_new_thread
I tried to add another join(), and it seems worked. Here is code
from threading import Thread
from time import sleep
def function01(arg,name):
for i in range(arg):
print(name,'i---->',i,'\n')
print (name,"arg---->",arg,'\n')
sleep(1)
def test01():
thread1 = Thread(target = function01, args = (10,'thread1', ))
thread1.start()
thread2 = Thread(target = function01, args = (10,'thread2', ))
thread2.start()
thread1.join()
thread2.join()
print ("thread finished...exiting")
test01()
Python 3 has the facility of Launching parallel tasks. This makes our work easier.
It has for thread pooling and Process pooling.
The following gives an insight:
ThreadPoolExecutor Example
import concurrent.futures
import urllib.request
URLS = ['http://www.foxnews.com/',
'http://www.cnn.com/',
'http://europe.wsj.com/',
'http://www.bbc.co.uk/',
'http://some-made-up-domain.com/']
# Retrieve a single page and report the URL and contents
def load_url(url, timeout):
with urllib.request.urlopen(url, timeout=timeout) as conn:
return conn.read()
# We can use a with statement to ensure threads are cleaned up promptly
with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:
# Start the load operations and mark each future with its URL
future_to_url = {executor.submit(load_url, url, 60): url for url in URLS}
for future in concurrent.futures.as_completed(future_to_url):
url = future_to_url[future]
try:
data = future.result()
except Exception as exc:
print('%r generated an exception: %s' % (url, exc))
else:
print('%r page is %d bytes' % (url, len(data)))
Another Example
import concurrent.futures
import math
PRIMES = [
112272535095293,
112582705942171,
112272535095293,
115280095190773,
115797848077099,
1099726899285419]
def is_prime(n):
if n % 2 == 0:
return False
sqrt_n = int(math.floor(math.sqrt(n)))
for i in range(3, sqrt_n + 1, 2):
if n % i == 0:
return False
return True
def main():
with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:
for number, prime in zip(PRIMES, executor.map(is_prime, PRIMES)):
print('%d is prime: %s' % (number, prime))
if __name__ == '__main__':
main()
You can use the target argument in the Thread constructor to directly pass in a function that gets called instead of run.
Did you override the run() method? If you overrided __init__, did you make sure to call the base threading.Thread.__init__()?
After starting the two threads, does the main thread continue to do work indefinitely/block/join on the child threads so that main thread execution does not end before the child threads complete their tasks?
And finally, are you getting any unhandled exceptions?
the simple way to implement multithread process using threading
code snippet for the same
import threading
#function which takes some time to process
def say(i):
time.sleep(1)
print(i)
threads = []
for i in range(10):
thread = threading.Thread(target=say, args=(i,))
thread.start()
threads.append(thread)
#wait for all threads to complete before main program exits
for thread in threads:
thread.join()