I've got a task to speed up the execution time of this script. Target is below 12 secs.
The code does the job but it's not really efficient. How can I improve it ?
I suspect I'm not using the multiprocessing module the best way possible here:
import multiprocessing, datetime
from time import sleep
def queue_time(customers, num_of_tills):
if len(customers) == 0:
return 0
start = datetime.datetime.now()
for x in range(num_of_tills):
for y in customers:
process = multiprocessing.Process(target=sleep, args=(y,))
process.start()
process.join()
result = datetime.datetime.now() - start
return result.seconds
SUGGESTED EDIT:
import multiprocessing, datetime
from time import sleep
def queue_time(customers, num_of_tills):
if len(customers) == 0:
return 0
start = datetime.datetime.now()
for x in range(num_of_tills):
for y in customers:
process = multiprocessing.Process(target=sleep, args=(y,))
process.start()
process.join()
result = datetime.datetime.now() - start
return result.seconds
Please assume that the customers argument is a list of integers.
Related
Import threading module
import threading
Import time module to implement sleep functionality
import time
Import datetime to get start time, end time and elapsed time
from datetime import datetime
#Function to loop 100000 times
def loop_fun(name):
for i in range(0, 10):
time.sleep(0.0001)
print(name)
print("Loop executed!")
#Function to execute loop_fun without multithreading
num =0
def without_thread(num):
starting_time = datetime.now()
for i in range(0, num):
loop_fun("Called from without_thread")
ending_time = datetime.now()
elapsed_time = (ending_time - starting_time).total_seconds()
print("\n\nTime Elapsed without_thread: "+ str(elapsed_time)+"\n\n\n\n\n")
#Function to execute loop_fun with multithreading"
def with_thread(num):
start_time = datetime.now()
threads_lst = []
# Creating threads to call loop_fun
for i in range(0, num):
threads_lst.append(threading.Thread(target=loop_fun, args=("Called from with_thread",)))
# Running threads
for threads_ in threads_lst:
threads_.start()
# Waiting for completion of all threads
for threads_ in threads_lst:
threads_.join()
end_time = datetime.now()
elapsed_time = (end_time - start_time).total_seconds()
print("\n\nTime Elapsed with_thread: "+ str(elapsed_time)+"\n\n\n\n\n")
if __name__ == "__main__":
without_thread(10)
with_thread(10)
Your indentation is messed up. Copy/paste issue or does your code look like that ?
Missing Indentation in Python makes it hard to guess what you are trying to achieve.
Nevertheless I tried and came up with this code working w/o errors. Does it do what you want ? I frankly don't know.
I added these lines, which got messed up by the forum I think
from datetime import datetime
import time
import threading
I "fixed" indentation
#Function to loop 100000 times
def loop_fun(name):
for i in range(0, 10):
time.sleep(0.0001)
print(name)
print("Loop executed!")
#Function to execute loop_fun without multithreading
num =0
def without_thread(num):
starting_time = datetime.now()
for i in range(0, num):
loop_fun("Called from without_thread")
ending_time = datetime.now()
elapsed_time = (ending_time - starting_time).total_seconds()
print("\n\nTime Elapsed without_thread: "+ str(elapsed_time)+"\n\n\n\n\n")
#Function to execute loop_fun with multithreading"
def with_thread(num):
start_time = datetime.now()
threads_lst = []
# Creating threads to call loop_fun
for i in range(0, num):
threads_lst.append(threading.Thread(target=loop_fun, args=("Called from with_thread",)))
# Running threads
for threads_ in threads_lst:
threads_.start()
# Waiting for completion of all threads
for threads_ in threads_lst:
threads_.join()
end_time = datetime.now()
elapsed_time = (end_time - start_time).total_seconds()
print("\n\nTime Elapsed with_thread: "+ str(elapsed_time)+"\n\n\n\n\n")
if __name__ == "__main__":
without_thread(10)
with_thread(10)
It's printing stuff like that
Loop executed!
Called from without_thread
Loop executed!
Called from without_thread
Loop executed!
Time Elapsed without_thread: 6.378763
I am running a multiprocessing task in python, how can I timeout a function after 60seconds.
What I have done is shown in the snippet below:
import multiprocessing as mp
from multiprocessing import Pool
from multiprocessing import Queue
def main():
global f
global question
global queue
queue = Queue()
processes = []
question = [16,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,17,18,19,21,20,23]
cores=5
loww=0
chunksize = int((len(question)-loww)/cores)
splits = []
for i in range(cores):
splits.append(loww+1+((i)*chunksize))
splits.append(len(question)+1)
print("",splits)
args = []
for i in range(cores):
a=[]
arguments = (i, splits[i], splits[i+1])
a.append(arguments)
args.append(a)
print(args)
p = Pool(cores)
p.map(call_process, args)
p.close()
p.join
def call_process(args):
## end this whole block if it is taking more than 1 minutes
starttime = time.time()
lower=args[0][1]
upper=args[0][2]
for x in range(lower,upper):
if time.time() >= starttime + 60: break
a = question[x-1]
try:
pass
# a lot of functions is called and returned here
except:
continue
#write item to file
print('a = ',a)
return a
main()
I want to ensure that the call_process() method does not run for more than a minute for a particular value. Currently, I am using if time.time() >= starttime + 60: break which would not work effectively as I have different functions and things happening in the try and except block. What can I do?
I have the following scenario:
res = []
def longfunc(arg):
# function runs arg number of steps
# each step can take 500 ms to 2 seconds to complete
# longfunc keeps adding result of each step into the array res
def getResult(arg,timeout):
# should call longfunc()
# if longfunc() has not provided result by timeout milliseconds then return None
# if there is partial result in res by timeout milliseconds then return res
# if longfunc() ends before timeout milliseconds then return complete result of longfunc i.e. res array
result = getResult(2, 500)
I am thinking of using multiprocessing.Process() to put longfunc() in a separate process, then start another thread to sleep for timeout milliseconds. I can't figure out how to get result from both of them in the main thread and decide which one came first. Any suggestions on this approach or other approaches are appreciated.
You can use time.perf_counterand your code will see:
import time
ProcessTime = time.perf_counter #this returns nearly 0 when first call it if python version <= 3.6
ProcessTime()
def longfunc(arg, timeout):
start = ProcessTime()
while True
# Do anything
delta = start + timeout - ProcessTime()
if delta > 0:
sleep(1)
else:
return #Error or False
you can change While for a for loop an for each task, check timeout
If you are applying multiprocessing then you have to simply apply p.join(timeout=5) where p in a process
Here is a simple example
import time
from itertools import count
from multiprocessing import Process
def inc_forever():
print('Starting function inc_forever()...')
while True:
time.sleep(1)
print(next(counter))
def return_zero():
print('Starting function return_zero()...')
return 0
if __name__ == '__main__':
# counter is an infinite iterator
counter = count(0)
p1 = Process(target=inc_forever, name='Process_inc_forever')
p2 = Process(target=return_zero, name='Process_return_zero')
p1.start()
p2.start()
p1.join(timeout=5)
p2.join(timeout=5)
p1.terminate()
p2.terminate()
if p1.exitcode is None:
print(f'Oops, {p1} timeouts!')
if p2.exitcode == 0:
print(f'{p2} is luck and finishes in 5 seconds!')
I think it may help you
I'm trying to run a function in python for a specific amount of time (say 100 sec), and then move on to run another function for a specific amount of time.
I've tried creating a counter and using while counter < (some frame number). I've also tried using datetime by doing something like
end_time = datetime.now() + timedelta(seconds=100)
while datetime.now() < end_time:
These things don't seem to be working and I don't know why.
Here is my current version of the code:
class FicTracAout90deg(object):
def run(self, gain_yaw = 1):
end_time = datetime.now() + timedelta(seconds=10)
while datetime.now() < end_time:
for item in self.pubsub.listen():
message = item['data']
try:
data = json.loads(message)
except TypeError:
continue
if data['type'] == 'reset':
self.time_start = time.time()
self.heading_rate_calc.reset(self.time_start)
else:
time_curr = time.time()
heading = data['heading']
intx = data['intx']
inty = data['inty']
velx = data['velx']
vely = data['vely']
velheading = data['deltaheading']
self.accum_heading += velheading
self.accum_x += velx
self.accum_y += vely
time_elapsed = time_curr - self.time_start
I'm running this with the following code:
from analogoutNoise import FicTracAoutNoise
from analogout90deg import FicTracAout90deg
import time
#for a certain amount of time run block 1
#Block 1
for x in range(2):
client = FicTracAout90deg()
client.run(1)
The 'run' function never seems to stop, and I don't understand why.
Likely the issue is that the line for item in self.pubsub.listen(): never returns a value and so it doesn't finish executing. If this statement doesn't finish execution then the rest of the code is not run and the outer loop is not checked.
Please post a SSCCE. In order to get your script to run, I had to add from datetime import datetime, timedelta. The following code does work, and is similar structurally to yours:
from datetime import datetime, timedelta
from time import sleep
tBegin = datetime.now()
tEnd = tBegin + timedelta(seconds=100)
while datetime.now() < tEnd:
print(datetime.now())
sleep(10)
print(datetime.now())
Since this works, it seems to me that the problem is not with the time endpoints, but rather with the internals of your while loop. It can perform a very large number of iterations in 100 sec., which means it's pinning the processor for all that time and accumulating a lot of garbage. It's probably better to think up a different approach, as #iamchoosinganame suggested.
I'm making a simple client/server program in Python 3 and in the client I would like a clock or printout of the running time. I'm trying to make it in a loop that starts at the beginning of the program, but in a thread so the rest of the code keeps going.
class time_thread():
def run(self):
loop = 0
while (zetime > -1):
print(zetime);
zetime = zetime + 1;
time_thread.start()
zetime = 0
This is what I have so far, but it doesn't work. It says:
time_thread has no attribute start()
I'm new to this and haven't used threads before, so I'm not sure how to go about this. Is there a better way?
I think this is what you're looking for:
import time, sys
zetime = 0
while (zetime > -1):
sys.stdout.write("\r" + str(zetime))
sys.stdout.flush()
time.sleep(1)
zetime = zetime + 1;
First of all , to use Thread module, you have to inherit the class Thread on your class, so you can use their methods like start.
To calculate time, you can use datetime.
from datetime import datetime
from time import sleep
start_time = datetime.now()
sleep(5) # code that you want to calculate.
end_time = datetime.now()
print(end_time - start_time)
Just place this
So let's say you define a function elapsed_time such as:
import time, sys
def elapsed_time(time_start):
''' time_start: a time.time()
Goal: print the elapsed time since time_start '''
# Allow to stop the counting once bool_time = False in the main program
global bool_elapsed_time
# loop while the condition
while bool_elapsed_time == True:
# Note: erase and write below does not work in spyder but in a shell yes
print ("Elapsed time: {} seconds".format(int(time.time() - time_start))),
sys.stdout.flush()
print ("\r"),
time.sleep(1)
# erase the line with elapsed time to clean the shell at the end
sys.stdout.flush()
print ("\r"),
Then in your program:
import threading
bool_elapsed_time = True
t = threading.Thread(name='child procs', target=elapsed_time, args=(time.time(),))
t.start()
## Here add the job you want to do as an example:
time.sleep(10)
bool_elapsed_time = False #to stop the elapsed time printing
Should do the job you want to do.
Note: I used python 2.7 so it might be slightly different with 3.x