I've been running an optimization process using the legacy scipy.optimize.leastsq
Now I want to switch to scipy.optimize.least_squares (I need to introduce bounds).
But least_squares throws an error which I can't debug. Below my code, I am doing exactly the same with least_squares as with leastsq.
import scipy
from scipy.optimize import leastsq, least_squares
print(scipy.__version__)
def residuals_cmrset_as_2009JoH(x0, df):
k_max= x0[0]
a= x0[1]
alpha= x0[2]
b= x0[3]
beta= x0[4]
k_Ei_max= x0[5]
k_CMI= x0[6]
C_CMI= x0[7]
CMI_max= x0[8]
EVI_min= x0[9]
EVI_max= x0[10]
df['aet_cmrset'] = aet_cmrset_as_2009JoH(df.evi, df.gvmi, df.pet, df.rain,
k_max, a, alpha, b, beta, k_Ei_max, k_CMI, C_CMI, CMI_max, EVI_min, EVI_max)
return(df.aet_cmrset - df.AET_observed)
print('run calibration with leastsq')
x, flag = leastsq(residuals_cmrset_as_2009JoH,
np.transpose(x0),
args=(df_calibration))
print('this is the result from leastsq')
print(x)
print('run calibration with least_squares')
x, flag = least_squares(residuals_cmrset_as_2009JoH,
np.transpose(x0),
args=(df_calibration))
print('this is the result from least_squares')
print(x)
and this is the output:
1.2.0
run calibration with leastsq
this is the result from leastsq
[ 0.99119625 1.44145154 1.12799561 27.41023799 2.60102797 0.09771226
1.14979708 -0.24298292 1. 0. 0.9 ]
run calibration with least_squares
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-16-bc305703822b> in <module>
30 x, flag = least_squares(residuals_cmrset_as_2009JoH,
31 np.transpose(x0),
---> 32 args=(df_calibration))
33 print('this is the result from least_squares')
34 print(x)
/apps/python/3.7.2/lib/python3.7/site-packages/scipy-1.2.0-py3.7-linux-x86_64.egg/scipy/optimize/_lsq/least_squares.py in least_squares(fun, x0, jac, bounds, method, ftol, xtol, gtol, x_scale, loss, f_scale, diff_step, tr_solver, tr_options, jac_sparsity, max_nfev, verbose, args, kwargs)
796 x0 = make_strictly_feasible(x0, lb, ub)
797
--> 798 f0 = fun_wrapped(x0)
799
800 if f0.ndim != 1:
/apps/python/3.7.2/lib/python3.7/site-packages/scipy-1.2.0-py3.7-linux-x86_64.egg/scipy/optimize/_lsq/least_squares.py in fun_wrapped(x)
791
792 def fun_wrapped(x):
--> 793 return np.atleast_1d(fun(x, *args, **kwargs))
794
795 if method == 'trf':
TypeError: residuals_cmrset_as_2009JoH() takes 2 positional arguments but 11 were given
Any help will be welcome
Both functions specify that args is supposed to be a tuple. But
leastsq has, near the start this
if not isinstance(args, tuple):
args = (args,)
I don't see something equivalent in least_squares. That step "protects" leastsq in case the user makes a mistake and passes an array instead of the specified tuple.
Related
I'm having a constrained optimization problem, which i want to solve using the scipy.optimize package.
from scipy import optimize as opt
import numpy as np
def f(x):
return (x[0]-5)**2 + (x[1]-6)**2
#Bounds and Linear Constraints
bounds = opt.Bounds([0,0],[np.inf, np.inf])
lin_const = opt.LinearConstraint([[1,2],[0,0]], [-np.inf, 0], [4,0])
#Nonlinear Constraints, Jacobian and Hessian
def cons_f(x):
return [x[0]**2 - 4, np.exp(-x[0]) - 1]
def cons_J(x):
return [[2*x[0], 0], [-np.exp(-x[0])]]
def cons_H(x, v):
return v[0]*np.array([[2,0], [0,0]]) + v[1]*np.array([[np.exp(-x[0]), 0], [0,0]])
nonlin_const = opt.NonlinearConstraint(cons_f, -np.inf, 1, jac=cons_J, hess=cons_H)
#Solving the optimization problem
x0 = np.array([0.50, 0.75])
res = opt.minimize(f, x0, method='trust-constr',jac="2-point", hess=opt.SR1(), bounds=bounds, constraints=[lin_const, nonlin_const], options={'verbose': 1})
print(res.x)
I followed the Scipy doc closely but I get the typical numpy boolean index did not match indexed array along dimension 0; dimension is 1 but corresponding boolean dimension is 2 error message, what am I missing? Thanks for your help!
Here is the full Error Message:
IndexError Traceback (most recent call last)
Input In [32], in <cell line: 2>()
1 x0 = np.array([0.50, 0.75])
----> 2 res = opt.minimize(f, x0, method='trust-constr',jac=cons_J, hess=opt.SR1(), bounds=bounds, constraints=[lin_const, nonlin_const], options={'verbose': 1})
3 print(res.x)
File ~\anaconda3\envs\choquetclassifier\lib\site-packages\scipy\optimize\_minimize.py:634, in minimize(fun, x0, args, method, jac, hess, hessp, bounds, constraints, tol, callback, options)
631 return _minimize_slsqp(fun, x0, args, jac, bounds,
632 constraints, callback=callback, **options)
633 elif meth == 'trust-constr':
--> 634 return _minimize_trustregion_constr(fun, x0, args, jac, hess, hessp,
635 bounds, constraints,
636 callback=callback, **options)
637 elif meth == 'dogleg':
638 return _minimize_dogleg(fun, x0, args, jac, hess,
639 callback=callback, **options)
File ~\anaconda3\envs\choquetclassifier\lib\site-packages\scipy\optimize\_trustregion_constr\minimize_trustregion_constr.py:361, in _minimize_trustregion_constr(fun, x0, args, grad, hess, hessp, bounds, constraints, xtol, gtol, barrier_tol, sparse_jacobian, callback, maxiter, verbose, finite_diff_rel_step, initial_constr_penalty, initial_tr_radius, initial_barrier_parameter, initial_barrier_tolerance, factorization_method, disp)
357 prepared_constraints.append(PreparedConstraint(bounds, x0,
358 sparse_jacobian))
360 # Concatenate initial constraints to the canonical form.
--> 361 c_eq0, c_ineq0, J_eq0, J_ineq0 = initial_constraints_as_canonical(
362 n_vars, prepared_constraints, sparse_jacobian)
364 # Prepare all canonical constraints and concatenate it into one.
365 canonical_all = [CanonicalConstraint.from_PreparedConstraint(c)
366 for c in prepared_constraints]
File ~\anaconda3\envs\choquetclassifier\lib\site-packages\scipy\optimize\_trustregion_constr\canonical_constraint.py:352, in initial_constraints_as_canonical(n, prepared_constraints, sparse_jacobian)
350 finite_ub = ub < np.inf
351 c_ineq.append(f[finite_ub] - ub[finite_ub])
--> 352 J_ineq.append(J[finite_ub])
353 elif np.all(ub == np.inf):
354 finite_lb = lb > -np.inf
IndexError: boolean index did not match indexed array along dimension 0; dimension is 1 but corresponding boolean dimension is 2
I'm trying to obtain the signal to noise ratio of two variables I have cross-correlated. This involves using scipy.integrate.quad
In short, I've got two functions: an integrand and the integral function.
The integrand takes four inputs: one array and three scalars.
This integrand function works fine: it swallows the array and spits out another.
I then vectorize the integrand using numpy.vectorize.
I then call scipy.integrate.quad in the following way
integral, integrall_err = quad(lambda array: integrand(array, scalar, scalar, scalar), 0, array).
Then I vectorize the integral as well. When I try and perform it on the actual values I have, it just returns me
'TypeError: only size-1 arrays can be converted to Python scalars'.
Here's the code:
def ISNR(ell, var, NFRB):
ad = CL_tauvals + NL_tauvals
bd = CL_disvals + NFRB
cd = CL_dtauvals*CL_dtauvals
res = ell*((CL_dtauvals**2)/(ad*bd + cd))
return res #dimensionless
ISNR = np.vectorize(ISNR)
test_val = ISNR(1, 300, 4000)
def SNR(ell, var, NFRB, FoV):
Integrand = lambda ell: ISNR(ell, var, NFRB)
snr = quad(Integrand, 0, 100)
prefactors = np.sqrt(4*np.pi*FoV)
return prefactors*np.sqrt(snr)
SNR = np.vectorize(SNR)
SNR_test = SNR(l, 100, 100, 100)
Note that l is an array of 100 values I've defined using np.logspace. The error message I get is
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-28-31c52985c5c2> in <module>
22 SNR = np.vectorize(SNR)
23
---> 24 SNR_test = SNR(l, 100, 100, 100)
/Software/users/modules/7/software/anaconda3/2020.07/lib/python3.8/site-packages/numpy/lib/function_base.py in __call__(self, *args, **kwargs)
2089 vargs.extend([kwargs[_n] for _n in names])
2090
-> 2091 return self._vectorize_call(func=func, args=vargs)
2092
2093 def _get_ufunc_and_otypes(self, func, args):
/Software/users/modules/7/software/anaconda3/2020.07/lib/python3.8/site-packages/numpy/lib/function_base.py in _vectorize_call(self, func, args)
2159 res = func()
2160 else:
-> 2161 ufunc, otypes = self._get_ufunc_and_otypes(func=func, args=args)
2162
2163 # Convert args to object arrays first
/Software/users/modules/7/software/anaconda3/2020.07/lib/python3.8/site-packages/numpy/lib/function_base.py in _get_ufunc_and_otypes(self, func, args)
2119
2120 inputs = [arg.flat[0] for arg in args]
-> 2121 outputs = func(*inputs)
2122
2123 # Performance note: profiling indicates that -- for simple
<ipython-input-28-31c52985c5c2> in SNR(ell, var, NFRB, FoV)
16 def SNR(ell, var, NFRB, FoV):
17 Integrand = lambda ell: ISNR(ell, var, NFRB)
---> 18 snr = quad(Integrand, 0, 100)
19 prefactors = np.sqrt(4*np.pi*FoV)
20 return prefactors*np.sqrt(snr)
/Software/users/modules/7/software/anaconda3/2020.07/lib/python3.8/site-packages/scipy/integrate/quadpack.py in quad(func, a, b, args, full_output, epsabs, epsrel, limit, points, weight, wvar, wopts, maxp1, limlst)
349
350 if weight is None:
--> 351 retval = _quad(func, a, b, args, full_output, epsabs, epsrel, limit,
352 points)
353 else:
/Software/users/modules/7/software/anaconda3/2020.07/lib/python3.8/site-packages/scipy/integrate/quadpack.py in _quad(func, a, b, args, full_output, epsabs, epsrel, limit, points)
461 if points is None:
462 if infbounds == 0:
--> 463 return _quadpack._qagse(func,a,b,args,full_output,epsabs,epsrel,limit)
464 else:
465 return _quadpack._qagie(func,bound,infbounds,args,full_output,epsabs,epsrel,limit)
TypeError: only size-1 arrays can be converted to Python scalars
Can anyone explain to me what's going on and/or how to solve this? I've successfully done integrals of the exact same type with some other code recently and the main difference is just that three scalars are taken as inputs alongside the array and just called by the integrand (which works) within the function. I really don't understand why it's trying to convert the array into a scalar: I just want the integral evaluated at each input value.
Some of the solutions I've found tell me to vectorize and use lambda for the variable I'm integrating with... that's exactly what I've done. The other solutions I've found are just not that relevant to what I'm trying to do. If anyone has any tips, I'd be more than grateful.
[Homework] I am going to solve the linear system Ax=b by the Preconditioned Conjugate Gradient method, and I use spilu function from scipy.sparse.linalg for the preconditioner. A is a sparse symmetric 162*162 matrix. Since the spilu gives an approximation to the inverse of A, say M approximates A, and so spilu(A) gives M^-1, which is the preconditioner. I find that we can directly gives the preconditioner in the python Conjugate Gradient function, but my code below does not work.
M_inverse=scipy.sparse.linalg.spilu(A)
M2=scipy.sparse.linalg.LinearOperator((162,162),M_inverse.solve)
x3=scipy.sparse.linalg.cg(A,b,M2)
TypeError Traceback (most recent call last)
<ipython-input-84-86f8f91df8d2> in <module>()
----> 1 x3=scipy.sparse.linalg.cg(A,b,M2)
/Users/ruobinghan/anaconda/lib/python3.4/site-packages/scipy/sparse/linalg/isolve/iterative.py in cg(A, b, x0, tol, maxiter, xtype, M, callback)
/Users/ruobinghan/anaconda/lib/python3.4/site-packages/scipy/sparse/linalg/isolve/iterative.py in non_reentrant(func, *a, **kw)
83 try:
84 d['__entered'] = True
---> 85 return func(*a, **kw)
86 finally:
87 d['__entered'] = False
/Users/ruobinghan/anaconda/lib/python3.4/site-packages/scipy/sparse/linalg/isolve/iterative.py in cg(A, b, x0, tol, maxiter, xtype, M, callback)
219 #non_reentrant
220 def cg(A, b, x0=None, tol=1e-5, maxiter=None, xtype=None, M=None, callback=None):
--> 221 A,M,x,b,postprocess = make_system(A,M,x0,b,xtype)
222
223 n = len(b)
/Users/ruobinghan/anaconda/lib/python3.4/site-packages/scipy/sparse/linalg/isolve/utils.py in make_system(A, M, x0, b, xtype)
108 x = zeros(N, dtype=xtype)
109 else:
--> 110 x = array(x0, dtype=xtype)
111 if not (x.shape == (N,1) or x.shape == (N,)):
112 raise ValueError('A and x have incompatible dimensions')
TypeError: float() argument must be a string or a number, not 'LinearOperator'
Also, the question hints I will need to use LinearOperator interface, I do not understand what is exactly LinearOperator doing and why we need it here.
Any suggestion would be appreciated!
Thanks in advance!
I think the parameters are in wrong order,
x3=scipy.sparse.linalg.cg(A,b,M2)
In the Error message:
220 def cg(A, b, x0=None, tol=1e-5, maxiter=None, xtype=None, M=None,
callback=None):
--> 221 A,M,x,b,postprocess = make_system(A,M,x0,b,xtype)
M2 is in the place of x0 - the initial guess of the solution but not the preconditioner.
In my host, with correct order, class-LinearOperator is functioning well.
correct version
x3=scipy.sparse.linalg.cg(A,b,M=M2)
Please use "key word" arguments as often as possible.
I'm using python 2.7 in Canopy and I'm trying to fit 6 parameters of a model by minimising mean squared error between data and model predictions. I'm using COBYLA since I need bounds on parameter values, and I don't have a gradient.
Currently, I have:
import numpy as np
import scipy.optimize as opt
def cost_func(pars,y,x):
y_hat = model_output(pars,x)
mse = np.mean((y-y_hat)**2)
return mse
def make_constraints(par_min,par_max):
cons = []
for (i,(a,b)) in enumerate(zip(par_min,par_max)):
lower = lambda x: x[i] - a
upper = lambda x: b - x[i]
cons = cons + [lower] + [upper]
return cons
def estimate_parameters(par_min, par_max,par_init,x,y):
cons = make_constraints(par_min,par_max)
opt_pars = opt.fmin_cobyla(cost_func,pars,cons,args=([y,x]))
return opt_pars
However I get the error:
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-63-9e84e10303e1> in <module>()
----> 1 opt_pars = estimate_parameters(par_min,par_max,par_init,x,y)
<ipython-input-61-f38615d82ee5> in estimate_parameters(par_min,par_max,par_init,x,y)
9 cons = make_constraints(par_min,par_max)
10
---> 11 opt_pars = opt.fmin_cobyla(cost_func,par_init,cons,args=([y,x]))
12 return opt_pars
/home/luke/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/scipy/optimize/cobyla.pyc in fmin_cobyla(func, x0, cons, args, consargs, rhobeg, rhoend, iprint, maxfun, disp, catol)
169
170 sol = _minimize_cobyla(func, x0, args, constraints=con,
--> 171 **opts)
172 if iprint > 0 and not sol['success']:
173 print("COBYLA failed to find a solution: %s" % (sol.message,))
/home/luke/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/scipy/optimize/cobyla.pyc in _minimize_cobyla(fun, x0, args, constraints, rhobeg, tol, iprint, maxiter, disp, catol, **unknown_options)
244 xopt, info = _cobyla.minimize(calcfc, m=m, x=np.copy(x0), rhobeg=rhobeg,
245 rhoend=rhoend, iprint=iprint, maxfun=maxfun,
--> 246 dinfo=info)
247
248 if info[3] > catol:
/home/luke/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/scipy/optimize/cobyla.pyc in calcfc(x, con)
238 f = fun(x, *args)
239 for k, c in enumerate(constraints):
--> 240 con[k] = c['fun'](x, *c['args'])
241 return f
242
TypeError: <lambda>() takes exactly 1 argument (3 given)
This error isn't totally clear to me, but my understanding is that 3 arguments are being passed to my constraint functions. However, I can't work out where these 3 arguments are coming from.
I've looked at other stackoverflow questions about this and taken what I can from them, but I am still having this problem
Specifying constraints for fmin_cobyla in scipy
Python SciPy: optimization issue fmin_cobyla : one constraint is not respected
Python: how to create many constraints for fmin_cobyla optimization using lambda functions
If the argument consargs of fmin_cobyla is None, the constraint functions are also passed *args, where args is the argument given to fmin_cobyla. To pass no additional arguments to the constraint functions, use consargs=().
Alternatively, in the function make_constraints, change this
lower = lambda x: x[i] - a
upper = lambda x: b - x[i]
to
lower = lambda x, *args: x[i] - a
upper = lambda x, *args: b - x[i]
I'm trying to minimise a function of three variables, nonlinear, and very big and nasty. It works in Matlab just fine, but I'm trying to transfer over to python (as a learning experience and more freedom). Anyway, it does work for with the minimize function 'Nelder-Mead', but it is giving me an output that doesn't make sense, so I'm trying to add bounds to my variables.
Here's the code:
bnds = ((0, 1), (0, 1), (0, 1))
x0 = [0.004, 0.1, 0.1]
res = minimize(myObjFun, x0, method='L-BFGS-B', bounds=bnds)
print(res)
The output from Matlab gives me the three values which minimize the function: [0.2182, 0.0684, 0.0048], while the Nelder-Mead in python gave something completely different and way out of the bounds I want (should be between 0 and 1).
Here's the error:
File "****/fixedpoints.py", line 45, in <module>
res = minimize(myObjFun, x0, method='L-BFGS-B', bounds=bnds)
File "/opt/local/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/scipy/optimize/_minimize.py", line 380, in minimize
callback=callback, **options)
File "/opt/local/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/scipy/optimize/lbfgsb.py", line 304, in _minimize_lbfgsb
isave, dsave)
TypeError: _lbfgsb.setulb() 6th argument (f) can't be converted to double
We can't debug unless you give us myObjFun or a similar function (based on simpler or faked data) that has similar behavior during optimization. More specifically your code will work on a well behaved myObjFun; e.g.,
>>> import scipy.optimize
>>> def myObjFun(x):
return (x[0]-.2182)**4 + (x[1]-.0684)**2 + 5*(x[2]-.0048)**2 + 3.2
>>> print scipy.optimize.minimize(myObjFun, [0.004,0.1,0.1], method='L-BFGS-B', bounds=((0,1),(0,1),(0,1)))
status: 0
success: True
nfev: 18
fun: 3.200000001787815
x: array([ 0.21213686, 0.06837957, 0.00480194])
message: 'CONVERGENCE: REL_REDUCTION_OF_F_<=_FACTR*EPSMCH'
jac: array([ -8.88178420e-07, -4.08562073e-05, 1.94511074e-05])
nit: 17
Now parameter f of setulb contains the current value of the objective function as the function is being evaluated [1]:
f is a double precision variable.
On first entry f is unspecified.
On final exit f is the value of the function at x.
So the value of your objective function computed at some point over the search space seems to inadvertently be unconvertible to float for some reason (that seems to be a TypeError).
I can get a similar error (but actually an OverflowError) with say this objective function, which is normally well behaved but blows up whenever x[1] < 0.0685 (which should happen before the minimum is found):
>>> def myObjFun(x):
return (x[0]-.2182)**4 + (x[1]-.0684)**2 + 5*(x[2]-.0048)**2 + 3.2 if x[1] > 0.0684 else 10**999
....:
>>> print scipy.optimize.minimize(myObjFun, [0.004,0.1,0.1], method='L-BFGS-B', bounds=((0,1),(0,1),(0,1)))
---------------------------------------------------------------------------
OverflowError Traceback (most recent call last)
<ipython-input-44-9204b704b51a> in <module>()
----> 1 print scipy.optimize.minimize(myObjFun, [0.004,0.1,0.1], method='L-BFGS-B', bounds=((0,1),(0,1),(0,1)))
lib/python2.7/site-packages/scipy/optimize/_minimize.pyc in minimize(fun, x0, args, method, jac, hess, hessp, bounds, constraints, tol, callback, options)
376 elif meth == 'l-bfgs-b':
377 return _minimize_lbfgsb(fun, x0, args, jac, bounds,
--> 378 callback=callback, **options)
379 elif meth == 'tnc':
380 return _minimize_tnc(fun, x0, args, jac, bounds, callback=callback,
lib/python2.7/site-packages/scipy/optimize/lbfgsb.pyc in _minimize_lbfgsb(fun, x0, args, jac, bounds, disp, maxcor, ftol, gtol, eps, maxfun, maxiter, iprint, callback, **unknown_options)
302 _lbfgsb.setulb(m, x, low_bnd, upper_bnd, nbd, f, g, factr,
303 pgtol, wa, iwa, task, iprint, csave, lsave,
--> 304 isave, dsave)
305 task_str = task.tostring()
306 if task_str.startswith(b'FG'):
OverflowError: _lbfgsb.setulb() 6th argument (f) can't be converted to double
So I'd carefully check your myObjFun and manually evaluate it at many points in the searched domain and see that the returned values are reasonable and of the correct types and match what matlab returns.