Stop Scipy minimize after set time - python

I use minimize from the Scipy module on Python 3.4, specifically:
resultats=minimize(margin_rate, iniprices, method='SLSQP',
jac=margin_rate_deriv, bounds=pricebounds, options={'disp': True,
'maxiter':2000}, callback=iter_report_margin_rate)
The maximum number of iterations can be set (as above), but is there a way to tell minimize to stop searching for a solution after a given set time? I looked at the general options of minimize as well as the specific options of the SLSQP solver, but could not work it out.
Thanks

You can use the callback argument to raise a warning or exception if the execution time exceeds some threshold:
import numpy as np
from scipy.optimize import minimize, rosen
import time
import warnings
class TookTooLong(Warning):
pass
class MinimizeStopper(object):
def __init__(self, max_sec=60):
self.max_sec = max_sec
self.start = time.time()
def __call__(self, xk=None):
elapsed = time.time() - self.start
if elapsed > self.max_sec:
warnings.warn("Terminating optimization: time limit reached",
TookTooLong)
else:
# you might want to report other stuff here
print("Elapsed: %.3f sec" % elapsed)
# example usage
x0 = [1.3, 0.7, 0.8, 1.9, 1.2]
res = minimize(rosen, x0, method='Nelder-Mead', callback=MinimizeStopper(1E-3))

No. What you can do is start the optimizer in a separate process, keep track of how long it has been running and terminate it if necessary:
from multiprocessing import Process, Queue
import time
import random
from __future__ import print_function
def f(param, queue):
#do the minimization and add result to queue
#res = minimize(param)
#queue.put(res)
#to make this a working example I'll just sleep a
#a random amount of time
sleep_amount = random.randint(1, 10)
time.sleep(sleep_amount)
res = param*sleep_amount
queue.put(res)
q = Queue()
p = Process(target=f, args=(2.2, q))
max_time = 3
t0 = time.time()
p.start()
while time.time() - t0 < max_time:
p.join(timeout=1)
if not p.is_alive():
break
if p.is_alive():
#process didn't finish in time so we terminate it
p.terminate()
result = None
else:
result = q.get()
print(result)

Related

nvtx markers with Python Multiprocessing

I'm trying to use nvtx markers along with multiprocessing pool in Python, but when only a child process calls an annotated function the operation doesn't appear in the profiling report. Is there any way to get around this, or is this a limitation of python processes? Here's some example code to replicate:
import os
import time
from multiprocessing import Pool, shared_memory
import numpy as np
import nvtx
N_SAMPLES = int(1e6)
SIGNAL = np.random.randn(N_SAMPLES) + 1j * np.random.randn(N_SAMPLES)
#nvtx.annotate(color="red")
def create_shm_array(signal):
# Store the signal in shared memory to share across processes
shm = shared_memory.SharedMemory(create=True, size=signal.nbytes)
shared_array = np.ndarray(signal.shape, dtype=signal.dtype, buffer=shm.buf)
shared_array[:] = signal[:]
return shm
def worker(shm_name):
shm = shared_memory.SharedMemory(name=shm_name)
sig = np.ndarray((N_SAMPLES,), dtype=complex, buffer=shm.buf)
return expensive_op(sig)
#nvtx.annotate(color="blue")
def expensive_op(sig):
time.sleep(2)
return np.sum(sig)
def clean_shm(shm_name):
shm = shared_memory.SharedMemory(name=shm_name)
shm.close()
shm.unlink()
if __name__ == "__main__":
print(f"Total num_bytes: {SIGNAL.nbytes} B | {SIGNAL.nbytes / 1e9} GB")
test = np.random.randn(10)
expensive_op(test)
shared_mem = create_shm_array(SIGNAL)
with Pool(os.cpu_count()) as p:
p.map(worker, [shared_mem.name] * 2)
clean_shm(shared_mem.name)
Here's the Nvidia Nsight Systems Timeline. The Marker appears during the first call from the parent process, but does not appear when called by the child processes
By default, python multiprocessing forks new processes. We need it to spawn them. Working code below.
import os
import time
from multiprocessing import Pool, shared_memory, get_context
import numpy as np
import nvtx
N_SAMPLES = int(1e6)
SIGNAL = np.random.randn(N_SAMPLES) + 1j * np.random.randn(N_SAMPLES)
#nvtx.annotate(color="red")
def create_shm_array(signal):
# Store the signal in shared memory to share across processes
shm = shared_memory.SharedMemory(create=True, size=signal.nbytes)
shared_array = np.ndarray(signal.shape, dtype=signal.dtype, buffer=shm.buf)
shared_array[:] = signal[:]
return shm
def worker(shm_name):
shm = shared_memory.SharedMemory(name=shm_name)
sig = np.ndarray((N_SAMPLES,), dtype=complex, buffer=shm.buf)
return expensive_op(sig)
#nvtx.annotate(color="blue")
def expensive_op(sig):
time.sleep(2)
return np.sum(sig)
def clean_shm(shm_name):
shm = shared_memory.SharedMemory(name=shm_name)
shm.close()
shm.unlink()
if __name__ == "__main__":
print(f"Total num_bytes: {SIGNAL.nbytes} B | {SIGNAL.nbytes / 1e9} GB")
test = np.random.randn(10)
expensive_op(test)
shared_mem = create_shm_array(SIGNAL)
with get_context("spawn").Pool(os.cpu_count()) as p:
p.map(worker, [shared_mem.name] * 2)
clean_shm(shared_mem.name)

Python concurrent.futures.ProcessPoolExecutor() not executing methods inside objects

I am trying to concurrently execute methods from two objects concurrently for a computer vision task. My idea is to use two different feature detectors to compute their respective feature descriptions inside a base class.
In this regard, I built the following toy example to understand python concurrent.futures.ProcessPoolExecutor class.
When executed, the first part of the code runs as expected with 20 Heartbeat (10 from each method executed 10 times in total) strings printed out with the sum for two objects coming out correctly as 100, -100.
But in the second half of the code, it appears the ProcessPoolExecutor is not running the do_math(self, numx) method at all. What am I doing wrong here?
With best,
Azmyin
import numpy as np
import concurrent.futures as cf
import time
def current_milli_time():
# CORE FUNCTION
# Function that returns a time tick in milliseconds
return round(time.time() * 1000)
class masterClass(object):
super_multiplier = 1 # Class variable
def __init__(self, ls):
# Attributes of masterClass
self.var1 = ls[0]
self.sumx = ls[1]
def __rep__(self):
print(f"sumx value -- {self.sumx}")
def apply_sup_mult(self, var_in):
self.sumx = self.sumx + (var_in * masterClass.super_multiplier)
time.sleep(0.025)
print(f"Hearbeat!!")
# This is a regular method
def do_math(self, numx):
self.apply_sup_mult(numx)
ls = [10,0]
ls2 = [-10,0]
numx = 10
obj1 = masterClass(ls)
obj2 = masterClass(ls2)
t1 = current_milli_time()
# Run methods one by one
for _ in range(numx):
obj1.do_math(ls[0])
obj2.do_math(ls2[0])
obj1.__rep__()
obj2.__rep__()
t2 = current_milli_time()
print(f"Time taken -- {t2 - t1} ms")
print()
## Using multiprocessing to concurrently run two methods
# Intentionally reinitialize objects
obj1 = masterClass(ls)
obj1 = masterClass(ls2)
t1 = current_milli_time()
resx = []
with cf.ProcessPoolExecutor() as executor:
for i in range(numx):
#fs = [executor.submit(obj3.do_math, ls[0]), executor.submit(obj4.do_math, ls2[0])]
f1 = executor.submit(obj1.do_math, ls[0])
f2 = executor.submit(obj2.do_math, ls2[0])
# for i,f in enumerate(cf.as_completed(fs)):
# print(f"Done with {f}")
# # State of sumx
obj1.__rep__()
obj2.__rep__()
t2 = current_milli_time()
print(f"Time taken -- {t2 - t1} ms")

How to accumulate results from pool.apply_async call?

I want to make calls to pool.apply_async(func) and accumulate the results as soon as they are available without waiting for each other.
import multiprocessing
import numpy as np
chrNames=['chr1','chr2','chr3']
sims=[1,2,3]
def accumulate_chrBased_simBased_result(chrBased_simBased_result,accumulatedSignalArray,accumulatedCountArray):
signalArray = chrBased_simBased_result[0]
countArray = chrBased_simBased_result[1]
accumulatedSignalArray += signalArray
accumulatedCountArray += countArray
def func(chrName,simNum):
print('%s %d' %(chrName,simNum))
result=[]
signal_array=np.full((10000,), simNum, dtype=float)
count_array = np.full((10000,), simNum, dtype=int)
result.append(signal_array)
result.append(count_array)
return result
if __name__ == '__main__':
accumulatedSignalArray = np.zeros((10000,), dtype=float)
accumulatedCountArray = np.zeros((10000,), dtype=int)
numofProcesses = multiprocessing.cpu_count()
pool = multiprocessing.Pool(numofProcesses)
for chrName in chrNames:
for simNum in sims:
result= pool.apply_async(func, (chrName,simNum,))
accumulate_chrBased_simBased_result(result.get(),accumulatedSignalArray,accumulatedCountArray)
pool.close()
pool.join()
print(accumulatedSignalArray)
print(accumulatedCountArray)
In this way, each pool.apply_async call waits for other call to end.
Is there a way do get rid of this waiting for each other?
You are using result.get() on each iteration, and making the main process wait for the function to be ready in doing so.
Please find below a working version, with prints showing that accumulation is done when "func" is ready, and adding random sleeps to ensure sizable execution time differences.
import multiprocessing
import numpy as np
from time import time, sleep
from random import random
chrNames=['chr1','chr2','chr3']
sims=[1,2,3]
def accumulate_chrBased_simBased_result(chrBased_simBased_result,accumulatedSignalArray,accumulatedCountArray):
signalArray = chrBased_simBased_result[0]
countArray = chrBased_simBased_result[1]
accumulatedSignalArray += signalArray
accumulatedCountArray += countArray
def func(chrName,simNum):
result=[]
sleep(random()*5)
signal_array=np.full((10000,), simNum, dtype=float)
count_array = np.full((10000,), simNum, dtype=int)
result.append(signal_array)
result.append(count_array)
print('%s %d' %(chrName,simNum))
return result
if __name__ == '__main__':
accumulatedSignalArray = np.zeros((10000,), dtype=float)
accumulatedCountArray = np.zeros((10000,), dtype=int)
numofProcesses = multiprocessing.cpu_count()
pool = multiprocessing.Pool(numofProcesses)
results = []
for chrName in chrNames:
for simNum in sims:
results.append(pool.apply_async(func, (chrName,simNum,)))
for i in results:
print(i)
while results:
for r in results[:]:
if r.ready():
print('{} is ready'.format(r))
accumulate_chrBased_simBased_result(r.get(),accumulatedSignalArray,accumulatedCountArray)
results.remove(r)
pool.close()
pool.join()
print(accumulatedSignalArray)
print(accumulatedCountArray)

Lack of scaling for python's multiprocessing pool

I am writing a simple python script that I need to scale to many threads. For simplicity, I have replaced the actual function I need to use with a matrix matrix multiply. I am having trouble getting my code to scale with the number of processors. Any advice to help me get the correct speedup would be helpful! My code and results are as follows:
import numpy as np
import time
import math
from multiprocessing.dummy import Pool
res = 4
#we must iterate over all of these values
wavektests = np.linspace(.1,2.5,res)
omegaratios = np.linspace(.1,2.5,res)
wavekmat,omegamat = np.meshgrid(wavektests,omegaratios)
def solve_for_omegaratio( ind ):
#obtain the indices for this run
x_ind = ind % res
y_ind = math.floor(ind / res)
#obtain the value for this run
wavek = wavektests[x_ind]
omega = omegaratios[y_ind]
#do some work ( I have replaced the real function with this)
randmat = np.random.rand(4000,4000)
nop = np.linalg.matrix_power(randmat,3)
#obtain a scalar value
value = x_ind + y_ind**2.0
return value
list_ind = range(res**2)
#Serial code execution
t0_proc = time.clock()
t0_wall = time.time()
threads = 0
dispersion = map( solve_for_omegaratio , list_ind)
displist = list(dispersion)
t1_proc = time.clock()
t1_wall = time.time()
print('serial execution')
print('wall clock time = ',t1_wall-t0_wall)
print('processor clock time = ',t1_proc-t0_proc)
print('------------------------------------------------')
#Using pool defaults
t0_proc = time.clock()
t0_wall = time.time()
if __name__ == '__main__':
pool = Pool()
dispersion = pool.map( solve_for_omegaratio , list_ind)
displist = list(dispersion)
t1_proc = time.clock()
t1_wall = time.time()
pool.close
print('num of threads = default')
print('wall clock time = ',t1_wall-t0_wall)
print('processor clock time = ',t1_proc-t0_proc)
print('------------------------------------------------')
# Using 4 threads
t0_proc = time.clock()
t0_wall = time.time()
threads = 4
if __name__ == '__main__':
pool = Pool(threads)
dispersion = pool.map( solve_for_omegaratio , list_ind)
displist = list(dispersion)
t1_proc = time.clock()
t1_wall = time.time()
pool.close
print('num of threads = ' + str(threads))
print('wall clock time = ',t1_wall-t0_wall)
print('processor clock time = ',t1_proc-t0_proc)
print('------------------------------------------------')
Results:
serial execution
wall clock time = 66.1561758518219
processor clock time = 129.16376499999998
------------------------------------------------
num of threads = default
wall clock time = 81.86436200141907
processor clock time = 263.45369
------------------------------------------------
num of threads = 4
wall clock time = 77.63390111923218
processor clock time = 260.66285300000004
------------------------------------------------
Because python has a GIL https://wiki.python.org/moin/GlobalInterpreterLock , "python-native" threads can't run execute truly concurrently and thus can't improve the performance of CPU-bound tasks like math. They can be used to parallelize IO bound tasks effectively (eg API calls which spend almost all their time waiting for network I/O). Forking separate processes with multiprocessing rather than dummy's thread-backed implementation will create multiple processes, not threads, which will be able to run concurrently ( at cost of significant memory overhead).

Python 2.x - sleep call at millisecond level on Windows

I was given some very good hints in this forum about how to code a clock object in Python 2. I've got some code working now. It's a clock that 'ticks' at 60 FPS:
import sys
import time
class Clock(object):
def __init__(self):
self.init_os()
self.fps = 60.0
self._tick = 1.0 / self.fps
print "TICK", self._tick
self.check_min_sleep()
self.t = self.timestamp()
def init_os(self):
if sys.platform == "win32":
self.timestamp = time.clock
self.wait = time.sleep
def timeit(self, f, args):
t1 = self.timestamp()
f(*args)
t2 = self.timestamp()
return t2 - t1
def check_min_sleep(self):
"""checks the min sleep time on the system"""
runs = 1000
times = [self.timeit(self.wait, (0.001, )) for n in xrange(runs)]
average = sum(times) / runs
print "average min sleep time:", round(average, 6)
sort = sorted(times)
print "fastest, slowest", sort[0], sort[-1]
def tick(self):
next_tick = self.t + self._tick
t = self.timestamp()
while t < next_tick:
t = self.timestamp()
self.t = t
if __name__ == "__main__":
clock = Clock()
The clock does not do too bad, but in order to avoid a busy loop I'd like Windows to sleep less than the usual about 15 milliseconds. On my system (64-bit Windows 10), it returns me an average of about 15 / 16 msecs when starting the clock if Python is the only application that's running. That's way too long for a min sleep to avoid a busy loop.
Does anybody know how I can get Windows to sleep less than that value?
You can temporarily lower the timer period to the wPeriodMin value returned by timeGetDevCaps. The following defines a timer_resolution context manager that calls the timeBeginPeriod and timeEndPeriod functions.
import timeit
import contextlib
import ctypes
from ctypes import wintypes
winmm = ctypes.WinDLL('winmm')
class TIMECAPS(ctypes.Structure):
_fields_ = (('wPeriodMin', wintypes.UINT),
('wPeriodMax', wintypes.UINT))
def _check_time_err(err, func, args):
if err:
raise WindowsError('%s error %d' % (func.__name__, err))
return args
winmm.timeGetDevCaps.errcheck = _check_time_err
winmm.timeBeginPeriod.errcheck = _check_time_err
winmm.timeEndPeriod.errcheck = _check_time_err
#contextlib.contextmanager
def timer_resolution(msecs=0):
caps = TIMECAPS()
winmm.timeGetDevCaps(ctypes.byref(caps), ctypes.sizeof(caps))
msecs = min(max(msecs, caps.wPeriodMin), caps.wPeriodMax)
winmm.timeBeginPeriod(msecs)
yield
winmm.timeEndPeriod(msecs)
def min_sleep():
setup = 'import time'
stmt = 'time.sleep(0.001)'
return timeit.timeit(stmt, setup, number=1000)
Example
>>> min_sleep()
15.6137827
>>> with timer_resolution(msecs=1): min_sleep()
...
1.2827173000000016
The original timer resolution is restored after the with block:
>>> min_sleep()
15.6229814

Categories

Resources