dimension error with scipy optimize.minimize - python

I want to minimise the objective function: objective_function_2() subject to the inequality constraints 69 < T(x) < 71. The objective of the optimisation is to find the set of Fourier series parameters [c0, c1, c2, wavelength] that minimises the objective function (curve fitting problem). fx() is the function that calculates the residuals between the data and the estimated values of the Fourier series. T(x) is the function calculate_tightness() that calculates the range of the fourier series estimated at each iteration.
The problem is that I don't understand why I get this error message: ValueError: all the input arrays must have same number of dimensions, but the array at index 0 has 2 dimension(s) and the array at index 1 has 1 dimension(s)
the objective_function_2() produce an n-vector of residuals and calculate_tightness() one scalar value.
Does anyone have any idea why this is happening?
import numpy as np
from scipy.optimize import LinearConstraint, NonlinearConstraint, BFGS, minimize
ref_fld = np.array([-479.9024323 , -469.80142114, -459.70040999, -449.59939883,
-439.49838768, -429.39737652, -419.29636536, -409.19535421,
-399.09434305, -388.9933319 , -378.89232074, -368.79130958,
-358.69029843, -348.58928727, -338.48827611, -328.38726496,
-318.2862538 , -308.18524265, -298.08423149, -287.98322033,
-277.88220918, -267.78119802, -257.68018686, -247.57917571,
-237.47816455, -227.3771534 , -217.27614224, -207.17513108,
-197.07411993, -186.97310877, -176.87209762, -166.77108646,
-156.6700753 , -146.56906415, -136.46805299, -126.36704183,
-116.26603068, -106.16501952, -96.06400837, -85.96299721,
-75.86198605, -65.7609749 , -55.65996374, -45.55895258,
-35.45794143, -25.35693027, -15.25591912, -5.15490796,
4.9461032 , 15.04711435, 25.14812551, 35.24913667,
45.35014782, 55.45115898, 65.55217013, 75.65318129,
85.75419245, 95.8552036 , 105.95621476, 116.05722591,
126.15823707, 136.25924823, 146.36025938, 156.46127054,
166.5622817 , 176.66329285, 186.76430401, 196.86531516,
206.96632632, 217.06733748, 227.16834863, 237.26935979,
247.37037095, 257.4713821 , 267.57239326, 277.67340441,
287.77441557, 297.87542673, 307.97643788, 318.07744904,
328.1784602 , 338.27947135, 348.38048251, 358.48149366,
368.58250482, 378.68351598, 388.78452713, 398.88553829,
408.98654944, 419.0875606 , 429.18857176, 439.28958291,
449.39059407, 459.49160523, 469.59261638, 479.69362754,
489.79463869, 499.89564985, 509.99666101, 520.09767216])
fld = np.array([-300.41522506, -120.9280477 , -274.77413647, 494.45656622,
-44.00495929, -479.90233432, 58.55913797, -326.056248 ,
84.20018256, 443.17449743])
flr = np.array([-13.20752855, 38.56985419, 44.28484794, -51.64708478,
-10.50558888, -49.95878419, -53.88137785, -12.73304144,
-54.2792669 , -7.59544309])
def fourier_series(x, c0, c1, c2, w):
"""
Parameters
----------
x
c0
c1
c2
w
Returns
-------
"""
v = np.array(x.astype(float))
# v.fill(c0)
v = c0 + c1 * np.cos(2 * np.pi / w * x) + c2 * np.sin(2 * np.pi / w * x)
return np.rad2deg(np.arctan(v))
def calculate_splot(ref_fold_frame, popt):
return np.rad2deg(np.arctan(fourier_series(ref_fold_frame, *popt)))
def calculate_tightness(theta):
curve = calculate_splot(ref_fld, theta)
amax = np.arctan(np.deg2rad(curve.max()))
amin = np.arctan(np.deg2rad(curve.min()))
return 180 - np.rad2deg(2*np.tan((amax - amin) / 2))
def fx(theta, x, y):
# function to calculate residuals for optimisation (least squares)
return np.tan(np.deg2rad(y)) - fourier_series(x, *theta)
def objective_function_2(theta):
x = fld
y = flr
return fx(theta, x, y) + calculate_tightness(theta)
Cx = NonlinearConstraint(calculate_tightness, 69, 71, jac='2-point', hess=BFGS())
x0 = [0, 1, 1, 500]
res = minimize(objective_function_2, x0, constraints=[Cx], method='trust-constr',
options={'verbose': 1}, )
Error message
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
/tmp/ipykernel_91/1210121354.py in <module>
1 Cx = NonlinearConstraint(calculate_tightness, 69, 71, jac='2-point', hess=BFGS()) #setup_optimisation_constraints(constraints_matrix, [60, -1e-2], [71, 1e-4], linear=False)
2 x0 = theta[recovery<1][0]
----> 3 res = minimize(objective_function_2, x0, constraints=[Cx], method='trust-constr',
4
5 options={'verbose': 1}, )
/mnt/c/Users/rcha0044/LoopPhD/LOOP_ENV/env_18/lib/python3.8/site-packages/scipy/optimize/_minimize.py in minimize(fun, x0, args, method, jac, hess, hessp, bounds, constraints, tol, callback, options)
632 constraints, callback=callback, **options)
633 elif meth == 'trust-constr':
--> 634 return _minimize_trustregion_constr(fun, x0, args, jac, hess, hessp,
635 bounds, constraints,
636 callback=callback, **options)
/mnt/c/Users/rcha0044/LoopPhD/LOOP_ENV/env_18/lib/python3.8/site-packages/scipy/optimize/_trustregion_constr/minimize_trustregion_constr.py in _minimize_trustregion_constr(fun, x0, args, grad, hess, hessp, bounds, constraints, xtol, gtol, barrier_tol, sparse_jacobian, callback, maxiter, verbose, finite_diff_rel_step, initial_constr_penalty, initial_tr_radius, initial_barrier_parameter, initial_barrier_tolerance, factorization_method, disp)
507
508 elif method == 'tr_interior_point':
--> 509 _, result = tr_interior_point(
510 objective.fun, objective.grad, lagrangian_hess,
511 n_vars, canonical.n_ineq, canonical.n_eq,
/mnt/c/Users/rcha0044/LoopPhD/LOOP_ENV/env_18/lib/python3.8/site-packages/scipy/optimize/_trustregion_constr/tr_interior_point.py in tr_interior_point(fun, grad, lagr_hess, n_vars, n_ineq, n_eq, constr, jac, x0, fun0, grad0, constr_ineq0, jac_ineq0, constr_eq0, jac_eq0, stop_criteria, enforce_feasibility, xtol, state, initial_barrier_parameter, initial_tolerance, initial_penalty, initial_trust_radius, factorization_method)
302 s0 = np.maximum(-1.5*constr_ineq0, np.ones(n_ineq))
303 # Define barrier subproblem
--> 304 subprob = BarrierSubproblem(
305 x0, s0, fun, grad, lagr_hess, n_vars, n_ineq, n_eq, constr, jac,
306 barrier_parameter, tolerance, enforce_feasibility,
/mnt/c/Users/rcha0044/LoopPhD/LOOP_ENV/env_18/lib/python3.8/site-packages/scipy/optimize/_trustregion_constr/tr_interior_point.py in __init__(self, x0, s0, fun, grad, lagr_hess, n_vars, n_ineq, n_eq, constr, jac, barrier_parameter, tolerance, enforce_feasibility, global_stop_criteria, xtol, fun0, grad0, constr_ineq0, jac_ineq0, constr_eq0, jac_eq0)
51 self.xtol = xtol
52 self.fun0 = self._compute_function(fun0, constr_ineq0, s0)
---> 53 self.grad0 = self._compute_gradient(grad0)
54 self.constr0 = self._compute_constr(constr_ineq0, constr_eq0, s0)
55 self.jac0 = self._compute_jacobian(jac_eq0, jac_ineq0, s0)
/mnt/c/Users/rcha0044/LoopPhD/LOOP_ENV/env_18/lib/python3.8/site-packages/scipy/optimize/_trustregion_constr/tr_interior_point.py in _compute_gradient(self, g)
137
138 def _compute_gradient(self, g):
--> 139 return np.hstack((g, -self.barrier_parameter*np.ones(self.n_ineq)))
140
141 def _compute_jacobian(self, J_eq, J_ineq, s):
<__array_function__ internals> in hstack(*args, **kwargs)
/mnt/c/Users/rcha0044/LoopPhD/LOOP_ENV/env_18/lib/python3.8/site-packages/numpy/core/shape_base.py in hstack(tup)
343 return _nx.concatenate(arrs, 0)
344 else:
--> 345 return _nx.concatenate(arrs, 1)
346
347
<__array_function__ internals> in concatenate(*args, **kwargs)
ValueError: all the input arrays must have same number of dimensions, but the array at index 0 has 2 dimension(s) and the array at index 1 has 1 dimension(s)
SciPy/NumPy/Python version information
1.7.1 1.21.3 sys.version_info(major=3, minor=8, micro=10, releaselevel='final', serial=0)

Related

ValueError: too many values to unpack, while using LMfit with solve_ivp

I am facing ValueError: too many values to unpack (expected 2) while optimizing parameters of a system of ODEs using solve_ivp. In fact I get the same error when I tried to use solve_ivp instead of odeint in this SO answer, which you may use as a minimal working example since it has the same problem as far as I am concerned. The only changes I made to that code is swap positions of y, t in arguments for f and similarly while solving it using solve_ivp like so: x = solve_ivp(f, t, x0, args=(paras,)) instead of using odeint in g
Here's the full code for the sake of convenience:
# import libraries
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import odeint, solve_ivp
from lmfit import minimize, Parameters, Parameter, report_fit
def f(t, y, paras):
"""
Your system of differential equations
"""
x1 = y[0]
x2 = y[1]
x3 = y[2]
try:
k0 = paras['k0'].value
k1 = paras['k1'].value
except KeyError:
k0, k1 = paras
# the model equations
f0 = -k0 * x1
f1 = k0 * x1 - k1 * x2
f2 = k1 * x2
return [f0, f1, f2]
def g(t, x0, paras):
"""
Solution to the ODE x'(t) = f(t,x,k) with initial condition x(0) = x0
"""
x = solve_ivp(f, t, x0, args=(paras,))
return x
def residual(paras, t, data):
"""
compute the residual between actual data and fitted data
"""
x0 = paras['x10'].value, paras['x20'].value, paras['x30'].value
model = g(t, x0, paras)
# you only have data for one of your variables
x2_model = model[:, 1]
return (x2_model - data).ravel()
# initial conditions
x10 = 5.
x20 = 0
x30 = 0
y0 = [x10, x20, x30]
# measured data
t_measured = np.linspace(0, 9, 10)
x2_measured = np.array([0.000, 0.416, 0.489, 0.595, 0.506, 0.493, 0.458, 0.394, 0.335, 0.309])
plt.figure()
plt.scatter(t_measured, x2_measured, marker='o', color='b', label='measured data', s=75)
# set parameters including bounds; you can also fix parameters (use vary=False)
params = Parameters()
params.add('x10', value=x10, vary=False)
params.add('x20', value=x20, vary=False)
params.add('x30', value=x30, vary=False)
params.add('k0', value=0.2, min=0.0001, max=2.)
params.add('k1', value=0.3, min=0.0001, max=2.)
# fit model
result = minimize(residual, params, args=(t_measured, x2_measured), method='leastsq') # leastsq nelder
# check results of the fit
data_fitted = g(np.linspace(0., 9., 100), y0, result.params)
# plot fitted data
plt.plot(np.linspace(0., 9., 100), data_fitted[:, 1], '-', linewidth=2, color='red', label='fitted data')
plt.legend()
plt.xlim([0, max(t_measured)])
plt.ylim([0, 1.1 * max(data_fitted[:, 1])])
# display fitted statistics
report_fit(result)
plt.show()
Here's the error traceback:
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
/home/swami/work/scrap/lmfit_example1.ipynb Cell 4 in <cell line: 67>()
64 params.add('k1', value=0.3, min=0.0001, max=2.)
66 # fit model
---> 67 result = minimize(residual, params, args=(t_measured, x2_measured), method='leastsq') # leastsq nelder
68 # check results of the fit
69 data_fitted = g(np.linspace(0., 9., 100), y0, result.params)
File ~/miniconda3/envs/dynamical/lib/python3.10/site-packages/lmfit/minimizer.py:2600, in minimize(fcn, params, method, args, kws, iter_cb, scale_covar, nan_policy, reduce_fcn, calc_covar, max_nfev, **fit_kws)
2460 """Perform the minimization of the objective function.
2461
2462 The minimize function takes an objective function to be minimized,
(...)
2594
2595 """
2596 fitter = Minimizer(fcn, params, fcn_args=args, fcn_kws=kws,
2597 iter_cb=iter_cb, scale_covar=scale_covar,
2598 nan_policy=nan_policy, reduce_fcn=reduce_fcn,
2599 calc_covar=calc_covar, max_nfev=max_nfev, **fit_kws)
-> 2600 return fitter.minimize(method=method)
File ~/miniconda3/envs/dynamical/lib/python3.10/site-packages/lmfit/minimizer.py:2369, in Minimizer.minimize(self, method, params, **kws)
2366 if (key.lower().startswith(user_method) or
2367 val.lower().startswith(user_method)):
2368 kwargs['method'] = val
-> 2369 return function(**kwargs)
File ~/miniconda3/envs/dynamical/lib/python3.10/site-packages/lmfit/minimizer.py:1693, in Minimizer.leastsq(self, params, max_nfev, **kws)
1691 result.call_kws = lskws
1692 try:
-> 1693 lsout = scipy_leastsq(self.__residual, variables, **lskws)
1694 except AbortFitException:
1695 pass
File ~/.local/lib/python3.10/site-packages/scipy/optimize/_minpack_py.py:410, in leastsq(func, x0, args, Dfun, full_output, col_deriv, ftol, xtol, gtol, maxfev, epsfcn, factor, diag)
408 if not isinstance(args, tuple):
409 args = (args,)
--> 410 shape, dtype = _check_func('leastsq', 'func', func, x0, args, n)
411 m = shape[0]
413 if n > m:
File ~/.local/lib/python3.10/site-packages/scipy/optimize/_minpack_py.py:24, in _check_func(checker, argname, thefunc, x0, args, numinputs, output_shape)
22 def _check_func(checker, argname, thefunc, x0, args, numinputs,
23 output_shape=None):
---> 24 res = atleast_1d(thefunc(*((x0[:numinputs],) + args)))
25 if (output_shape is not None) and (shape(res) != output_shape):
26 if (output_shape[0] != 1):
File ~/miniconda3/envs/dynamical/lib/python3.10/site-packages/lmfit/minimizer.py:586, in Minimizer.__residual(self, fvars, apply_bounds_transformation)
583 self.result.success = False
584 raise AbortFitException(f"fit aborted: too many function evaluations {self.max_nfev}")
--> 586 out = self.userfcn(params, *self.userargs, **self.userkws)
588 if callable(self.iter_cb):
589 abort = self.iter_cb(params, self.result.nfev, out,
590 *self.userargs, **self.userkws)
/home/swami/work/scrap/lmfit_example1.ipynb Cell 4 in residual(paras, t, data)
33 """
34 compute the residual between actual data and fitted data
35 """
37 x0 = paras['x10'].value, paras['x20'].value, paras['x30'].value
---> 38 model = g(t, x0, paras)
40 # you only have data for one of your variables
41 x2_model = model[:, 1]
/home/swami/work/scrap/lmfit_example1.ipynb Cell 4 in g(t, x0, paras)
23 def g(t, x0, paras):
24 """
25 Solution to the ODE x'(t) = f(t,x,k) with initial condition x(0) = x0
26 """
---> 27 x = solve_ivp(f, t, x0, args=(paras,))
28 return x
File ~/.local/lib/python3.10/site-packages/scipy/integrate/_ivp/ivp.py:512, in solve_ivp(fun, t_span, y0, method, t_eval, dense_output, events, vectorized, args, **options)
507 if method not in METHODS and not (
508 inspect.isclass(method) and issubclass(method, OdeSolver)):
509 raise ValueError("`method` must be one of {} or OdeSolver class."
510 .format(METHODS))
--> 512 t0, tf = map(float, t_span)
514 if args is not None:
515 # Wrap the user's fun (and jac, if given) in lambdas to hide the
516 # additional parameters. Pass in the original fun as a keyword
517 # argument to keep it in the scope of the lambda.
518 try:
ValueError: too many values to unpack (expected 2)
Any idea what the problem might be?

How to call scipy.minimize over part of an array?

I'm struggling to get scipy.minimize to work for an optimization parameter that's an array, where I'm only looking at a part of the array inside the objective function.
import numpy as np
from scipy.optimize import minimize
n = 5
X_true = np.random.normal(size=(n,n))
X_guess = np.random.normal(size=(n,n))
indices = np.triu_indices(n)
def mean_square_error(X):
return ((X.flatten() - X_true.flatten()) ** 2).mean()
def mean_square_error_over_indices(X):
return ((X[indices].flatten() - X_true[indices].flatten()) ** 2).mean()
# works fine
print(mean_square_error(X_guess))
# works fine
print(mean_square_error_over_indices(X_guess))
# works fine (flatten is necessary inside the objective function)
print(minimize(mean_square_error, X_guess).x)
# IndexError
print(minimize(mean_square_error_over_indices, X_guess).x)
The traceback:
IndexError Traceback (most recent call last)
<ipython-input-1-08d40604e22a> in <module>
20 print(minimize(mean_square_error, X_guess).x) # works fine
21
---> 22 print(minimize(mean_square_error_over_indices, X_guess).x) # error
C:\Anaconda\lib\site-packages\scipy\optimize\_minimize.py in minimize(fun, x0, args, method, jac, hess, hessp, bounds, constraints, tol, callback, options)
593 return _minimize_cg(fun, x0, args, jac, callback, **options)
594 elif meth == 'bfgs':
--> 595 return _minimize_bfgs(fun, x0, args, jac, callback, **options)
596 elif meth == 'newton-cg':
597 return _minimize_newtoncg(fun, x0, args, jac, hess, hessp, callback,
C:\Anaconda\lib\site-packages\scipy\optimize\optimize.py in _minimize_bfgs(fun, x0, args, jac, callback, gtol, norm, eps, maxiter, disp, return_all, **unknown_options)
968 else:
969 grad_calls, myfprime = wrap_function(fprime, args)
--> 970 gfk = myfprime(x0)
971 k = 0
972 N = len(x0)
C:\Anaconda\lib\site-packages\scipy\optimize\optimize.py in function_wrapper(*wrapper_args)
298 def function_wrapper(*wrapper_args):
299 ncalls[0] += 1
--> 300 return function(*(wrapper_args + args))
301
302 return ncalls, function_wrapper
C:\Anaconda\lib\site-packages\scipy\optimize\optimize.py in approx_fprime(xk, f, epsilon, *args)
728
729 """
--> 730 return _approx_fprime_helper(xk, f, epsilon, args=args)
731
732
C:\Anaconda\lib\site-packages\scipy\optimize\optimize.py in _approx_fprime_helper(xk, f, epsilon, args, f0)
662 """
663 if f0 is None:
--> 664 f0 = f(*((xk,) + args))
665 grad = numpy.zeros((len(xk),), float)
666 ei = numpy.zeros((len(xk),), float)
C:\Anaconda\lib\site-packages\scipy\optimize\optimize.py in function_wrapper(*wrapper_args)
298 def function_wrapper(*wrapper_args):
299 ncalls[0] += 1
--> 300 return function(*(wrapper_args + args))
301
302 return ncalls, function_wrapper
<ipython-input-1-08d40604e22a> in mean_square_error_over_indices(X)
11
12 def mean_square_error_over_indices(X):
---> 13 return ((X[indices].flatten() - X_true[indices].flatten()) ** 2).mean()
14
15
IndexError: too many indices for array
Based on the docs scipy.optimize.minimize accepts 1d arrays, so you are right about using "flatten()" but you should also use it for the initial guess that you pass to minimize()`. Here my suggestion to solve your problem:
import numpy as np
from scipy.optimize import minimize
# init
n = 5
x_true = np.random.normal(size=(n,n))
x_guess = np.random.normal(size=(n,n))
indices = np.triu_indices(n)
# flatten initial values for minimize
guess_x0 = x_guess.flatten()
guess_indeices_x0 = x_guess[indices].flatten()
# define objective funcs
mse = lambda x: ((x - x_true.flatten()) ** 2).mean()
mse_over_indices = lambda x: ((x - x_true[indices].flatten()) ** 2).mean()
# works fine
print("MSE: %5f" % mse(guess_x0))
print("MSE for indices: %5f" % mse_over_indices(guess_indeices_x0))
# works fine (flatten is necessary inside the objective function)
print("Result 1:", minimize(mse, guess_x0).x)
print("Result 2:", minimize(mse_over_indices, guess_indeices_x0).x)
Output:
MSE: 2.763674
MSE for indices: 3.192139
Result 1: [-1.2828193 0.49468516 -0.99500157 -0.47284983 1.6380719 -0.33051017
0.13769163 -0.23920633 -0.87430572 0.63945803 1.38327467 0.8484247
0.31888506 -1.15764468 1.06891773 -0.28372002 1.34104286 1.21024251
-0.11020374 1.37024001 1.08940389 1.82391261 0.32469148 0.64567877
0.54364199]
Result 2: [-1.28281964 0.49468503 -0.99500147 -0.47284976 1.63807209 0.13769154
-0.23920624 -0.87430606 0.63945812 0.31888521 -1.15764475 1.06891776
-0.11020373 1.37024006 0.54364213]

Fitting data to Faddeeva function using python's optimize.leastsq() and optimize.curve_fit

Hello Stackoverflow community,
I am trying to fit data to a Faddeeva function (optimize.special.wofz) using pyhton's optimize.leastsq() or optimize.curve_fit(). The fit parameters are the following two: z1 and z2. They are complex, whereas the independent variable (time) and the output of the function (meas_data) are purely real numbers. This is my first attempt to fit the data:
import numpy as np
from scipy import optimize
from scipy import special
meas_data = np.loadtxt('directory')
time = np.loadtxt('directory')
def test(params, time):
z1 = params[0]
z2 = params[1]
a = z1*np.sqrt(time)
b = z2*np.sqrt(time)
a = np.complex(0, a)
b = np.complex(0, b)
c = special.wofz(a)
d = special.wofz(b)
return np.real(c*d)
def test_error(params, time, t_error):
return test(params, time) - t_error
initial_guess = (300+200j, 300-200j)
params_fit, cov_x, infodict, mesg, ier = optimize.leastsq(test_error, initial_guess, args = (time, meas_data), full_output = True)
My second attempt looks like :
import numpy as np
from scipy import optimize
from scipy import special
meas_data = np.loadtxt('directory')
time = np.loadtxt('directory')
def test(time, z1, z2):
a = z1*np.sqrt(time)
b = z2*np.sqrt(time)
a = np.complex(0, a)
b = np.complex(0, b)
c = special.wofz(a)
d = special.wofz(b)
return np.real(c*d)
popt, pcov = optimize.curve_fit(test, time, meas_data)
For both cases, I get a similar error message:
for the first attempt:
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-13-9286b2981692> in <module>()
22
23 initial_guess = (300+200j, 300-200j)
---> 24 params_fit, cov_x, infodict, mesg, ier = optimize.leastsq(test_error, initial_guess, args = (time, msd), full_output = True)
/Users/tthalheim/anaconda/lib/python3.5/site-packages/scipy/optimize/minpack.py in leastsq(func, x0, args, Dfun, full_output, col_deriv, ftol, xtol, gtol, maxfev, epsfcn, factor, diag)
375 if not isinstance(args, tuple):
376 args = (args,)
--> 377 shape, dtype = _check_func('leastsq', 'func', func, x0, args, n)
378 m = shape[0]
379 if n > m:
/Users/tthalheim/anaconda/lib/python3.5/site-packages/scipy/optimize/minpack.py in _check_func(checker, argname, thefunc, x0, args, numinputs, output_shape)
24 def _check_func(checker, argname, thefunc, x0, args, numinputs,
25 output_shape=None):
---> 26 res = atleast_1d(thefunc(*((x0[:numinputs],) + args)))
27 if (output_shape is not None) and (shape(res) != output_shape):
28 if (output_shape[0] != 1):
<ipython-input-13-9286b2981692> in test_error(params, time, t_error)
19
20 def test_error(params, time, t_error):
---> 21 return test(params, time) - t_error
22
23 initial_guess = (z1, z2)
<ipython-input-13-9286b2981692> in test(params, time)
10 b = z2*np.sqrt(time)
11
---> 12 a = np.complex(0, a)
13 b = np.complex(0, b)
14
TypeError: only length-1 arrays can be converted to Python scalars
and for the second attempt:
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-8-8f631a7ede54> in <module>()
16 return np.real(c*d)
17
---> 18 popt, pcov = optimize.curve_fit(test, time, msd)
/Users/tthalheim/anaconda/lib/python3.5/site-packages/scipy/optimize/minpack.py in curve_fit(f, xdata, ydata, p0, sigma, absolute_sigma, check_finite, bounds, method, jac, **kwargs)
674 # Remove full_output from kwargs, otherwise we're passing it in twice.
675 return_full = kwargs.pop('full_output', False)
--> 676 res = leastsq(func, p0, Dfun=jac, full_output=1, **kwargs)
677 popt, pcov, infodict, errmsg, ier = res
678 cost = np.sum(infodict['fvec'] ** 2)
/Users/tthalheim/anaconda/lib/python3.5/site-packages/scipy/optimize/minpack.py in leastsq(func, x0, args, Dfun, full_output, col_deriv, ftol, xtol, gtol, maxfev, epsfcn, factor, diag)
375 if not isinstance(args, tuple):
376 args = (args,)
--> 377 shape, dtype = _check_func('leastsq', 'func', func, x0, args, n)
378 m = shape[0]
379 if n > m:
/Users/tthalheim/anaconda/lib/python3.5/site-packages/scipy/optimize/minpack.py in _check_func(checker, argname, thefunc, x0, args, numinputs, output_shape)
24 def _check_func(checker, argname, thefunc, x0, args, numinputs,
25 output_shape=None):
---> 26 res = atleast_1d(thefunc(*((x0[:numinputs],) + args)))
27 if (output_shape is not None) and (shape(res) != output_shape):
28 if (output_shape[0] != 1):
/Users/tthalheim/anaconda/lib/python3.5/site-packages/scipy/optimize/minpack.py in func_wrapped(params)
453 if weights is None:
454 def func_wrapped(params):
--> 455 return func(xdata, *params) - ydata
456 else:
457 def func_wrapped(params):
<ipython-input-8-8f631a7ede54> in test(time, z1, z2)
7 b = z2*np.sqrt(time)
8
----> 9 a = np.complex(0, a)
10 b = np.complex(0, b)
11
TypeError: only length-1 arrays can be converted to Python scalars
The data I am using for fitting are times in the range of 10e-6 to 10e-2 and measurement data in the range of 10e-19 to 10e-16. Both test functions used for calculating individual numbers given that the z1 and z2 are known work. I think that it has something to do with python's fitting routines which maybe not can handle complex values during their calculation?
I would be very happy, if someone could help me fixing this problem.
The third comment by PRMoureu on my question fixed the problem.

Implementing logistic regression -- why does this not converge?

I am adapting existing implementations of logistic regression, but I can't figure out what I am doing wrong.
Here is my implementation:
from scipy.optimize import fmin_bfgs
import numpy as np
import pandas as pd
# With help from http://stackoverflow.com/questions/13794754/logistic-regression-using-scipy
# as well as https://bryantravissmith.com/2015/12/29/implementing-logistic-regression-from-scratch-part-2-python-code/
def sigma(features, weights):
"""returns sigma(<w,x>)"""
return 1 / (1 + np.exp(-features.dot(weights)))
def log_likelihood(weights, features, labels):
"""calculates -ln p(t|w)"""
s = sigma(features, weights)
#s += 1e-24 # pseudocount to prevent logs of 0
t = labels * np.log(s + 1e-24)
t2 = (1 - labels) * (np.log((1 - s) + 1e-24))
ll = (t + t2).sum()
print -ll
return -ll
def gradient_log_likelihood(weights, features, labels):
"""calculates the gradient (Jacobian) of the log likelihood"""
error = labels - sigma(features, weights)
grad = (error * features).sum(axis=0)
return grad.reshape(grad.shape[0], 1)
Here is a sample dataset:
labels = np.array([0, 1, 1]).reshape(3, 1)
df = pd.DataFrame.from_dict({'a': [1,2,3], 'b': [2,3,4], 'c': [6,7,8]})
n, m = df.shape
weights = np.zeros(m + 1).reshape(m + 1, 1) # zero vector of starting weights
# add the intercept column
features = np.ones((n, m + 1)) # make matrix with all 1's
features[:,1:] = df # replace the 1's in all columns after column 0 with actual data
If I run each of these methods individually on the beginning weight vector, they run. But once I try to optimize, I get a shape error:
optimized = fmin_bfgs(log_likelihood, x0=weights, args=(features, labels), gtol=1e-4, fprime=gradient_log_likelihood)
ValueError Traceback (most recent call last)
<ipython-input-26-34c3cde48ac4> in <module>()
----> 1 optimized = fmin_bfgs(log_likelihood, x0=weights, args=(features, labels), gtol=1e-4, fprime=gradient_log_likelihood)
/Users/ifiddes/anaconda/lib/python2.7/site-packages/scipy/optimize/optimize.pyc in fmin_bfgs(f, x0, fprime, args, gtol, norm, epsilon, maxiter, full_output, disp, retall, callback)
791 'return_all': retall}
792
--> 793 res = _minimize_bfgs(f, x0, args, fprime, callback=callback, **opts)
794
795 if full_output:
/Users/ifiddes/anaconda/lib/python2.7/site-packages/scipy/optimize/optimize.pyc in _minimize_bfgs(fun, x0, args, jac, callback, gtol, norm, eps, maxiter, disp, return_all, **unknown_options)
845 else:
846 grad_calls, myfprime = wrap_function(fprime, args)
--> 847 gfk = myfprime(x0)
848 k = 0
849 N = len(x0)
/Users/ifiddes/anaconda/lib/python2.7/site-packages/scipy/optimize/optimize.pyc in function_wrapper(*wrapper_args)
287 def function_wrapper(*wrapper_args):
288 ncalls[0] += 1
--> 289 return function(*(wrapper_args + args))
290
291 return ncalls, function_wrapper
<ipython-input-3-9678bc972b41> in gradient_log_likelihood(weights, features, labels)
2 """calculates the gradient (Jacobian) of the log likelihood"""
3 error = labels - sigma(features, weights)
----> 4 grad = (error * features).sum(axis=0)
5 return grad.reshape(grad.shape[0], 1)
6
ValueError: operands could not be broadcast together with shapes (3,3) (3,4)
The problem is that somehow this line:
error = (labels - sigma(features, weights))
Converts error from a 3 x 1 vector into a 3 x 3 matrix.
Note that if you print error and run gradient_log_likelihood(weights, features, labels), you get output:
[[-0.5]
[ 0.5]
[ 0.5]]
And if you run the optimization, you get:
[[-0.5 -0.5 -0.5]
[ 0.5 0.5 0.5]
[ 0.5 0.5 0.5]]
in addition to the ValueError. This is because labels - sigma(features, weights) changes the shape.
You can investigate why, but if you hacked around it you can just pull the first column out, error = (labels - sigma(features, weights)).T[0].reshape(3,1) which gives you the same solution when you run gradient_log_likelihood(weights, features, labels) but you get a new error in the optimization function.
optimized = fmin_bfgs(log_likelihood, x0=weights, args=(features, labels), gtol=1e-3, fprime=gradient_log_likelihood)
6.23832462504
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-135-d7e8b04daeba> in <module>()
----> 1 optimized = fmin_bfgs(log_likelihood, x0=weights, args=(features, labels), gtol=1e-3, fprime=gradient_log_likelihood)
/Library/Python/2.7/site-packages/scipy/optimize/optimize.pyc in fmin_bfgs(f, x0, fprime, args, gtol, norm, epsilon, maxiter, full_output, disp, retall, callback)
791 'return_all': retall}
792
--> 793 res = _minimize_bfgs(f, x0, args, fprime, callback=callback, **opts)
794
795 if full_output:
/Library/Python/2.7/site-packages/scipy/optimize/optimize.pyc in _minimize_bfgs(fun, x0, args, jac, callback, gtol, norm, eps, maxiter, disp, return_all, **unknown_options)
863 alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \
864 _line_search_wolfe12(f, myfprime, xk, pk, gfk,
--> 865 old_fval, old_old_fval)
866 except _LineSearchError:
867 # Line search failed to find a better solution.
/Library/Python/2.7/site-packages/scipy/optimize/optimize.pyc in _line_search_wolfe12(f, fprime, xk, pk, gfk, old_fval, old_old_fval, **kwargs)
697 ret = line_search_wolfe1(f, fprime, xk, pk, gfk,
698 old_fval, old_old_fval,
--> 699 **kwargs)
700
701 if ret[0] is None:
/Library/Python/2.7/site-packages/scipy/optimize/linesearch.pyc in line_search_wolfe1(f, fprime, xk, pk, gfk, old_fval, old_old_fval, args, c1, c2, amax, amin, xtol)
95 return np.dot(gval[0], pk)
96
---> 97 derphi0 = np.dot(gfk, pk)
98
99 stp, fval, old_fval = scalar_search_wolfe1(
ValueError: shapes (4,1) and (4,1) not aligned: 1 (dim 1) != 4 (dim 0)

Scipy optimize function: matrixes not aligned

EDIT: The data set is the MNIST data set from the Homework of Week 4 of Andrew Ng's Machine Learning Course
I've checked the question on scipy optimize but I still couldn't figure out what is wrong with my code. I am trying to optimize theta for the oneVsAll question on the Andrew Ng coursera course.
Here is the relevant code
def sigmoid(x):
a = []
for item in x:
a.append(1/(1+math.exp(-item)))
return a
def hypothesis(x, theta):
return np.array(sigmoid(np.dot(x, theta)))
def costFunction(theta, x, y, lamba_):
m = X.shape[0]
part1 = np.dot(y.T, np.log(hypothesis(x, theta)).reshape(m,1))
part2 = np.dot((np.ones((m,1)) - y).T, np.log( 1 - hypothesis(x, theta)).reshape(m,1))
summ = (part1 + part2)
return -summ[0]/m
def gradientVect(theta, x, y, lambda_):
n = X.shape[1]
m = X.shape[0]
gradient = []
theta = theta.reshape(n,1)
beta = hypothesis(x, theta) - y
reg = theta[1:] * lambda_/m
grad = np.dot(X.T, beta) * 1./m
grad[1:] = grad[1:] * reg
return grad.flatten()
from scipy import optimize
def optimizeTheta(x, y, nLabels, lambda_):
for i in np.arange(0, nLabels):
theta = np.zeros((n,1))
res = optimize.minimize(costFunction, theta, args=(x, (y == i)*1, lambda_), method=None,
jac=gradientVect, options={'maxiter':50})
print(res)
return result
but running
optimizeTheta(X, y, 10, 0) # X shape = 401, 500
Gives me the following error:
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-247-e0e6e4c1eddd> in <module>()
3 n = X.shape[1]
4
----> 5 optimizeTheta(X, y, 10, 0)
<ipython-input-246-0a15e9f4769a> in optimizeTheta(x, y, nLabels, lambda_)
54 theta = np.zeros((n,1))
55 res = optimize.minimize(costFunction, x0 = theta, args=(x, (y == i)*1, lambda_), method=None,
---> 56 jac=gradientVect, options={'maxiter':50})
57 print(res)
58 return result
//anaconda/lib/python3.5/site-packages/scipy/optimize/_minimize.py in minimize(fun, x0, args, method, jac, hess, hessp, bounds, constraints, tol, callback, options)
439 return _minimize_cg(fun, x0, args, jac, callback, **options)
440 elif meth == 'bfgs':
--> 441 return _minimize_bfgs(fun, x0, args, jac, callback, **options)
442 elif meth == 'newton-cg':
443 return _minimize_newtoncg(fun, x0, args, jac, hess, hessp, callback,
//anaconda/lib/python3.5/site-packages/scipy/optimize/optimize.py in _minimize_bfgs(fun, x0, args, jac, callback, gtol, norm, eps, maxiter, disp, return_all, **unknown_options)
859 gnorm = vecnorm(gfk, ord=norm)
860 while (gnorm > gtol) and (k < maxiter):
--> 861 pk = -numpy.dot(Hk, gfk)
862 try:
863 alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \
ValueError: shapes (401,401) and (2005000,) not aligned: 401 (dim 1) != 2005000 (dim 0)
And I can't figure out why the shapes are not aligned.
Thanks!
So I realized what was wrong with my question.
The problem was the sigmoid function returning a list and not an integer and therefore it messed up the matrixes multiplications afterwards. The new sigmoid function is
def sigmoid(z):
return(1 / (1 + np.exp(-z)))

Categories

Resources