Related
I am facing ValueError: too many values to unpack (expected 2) while optimizing parameters of a system of ODEs using solve_ivp. In fact I get the same error when I tried to use solve_ivp instead of odeint in this SO answer, which you may use as a minimal working example since it has the same problem as far as I am concerned. The only changes I made to that code is swap positions of y, t in arguments for f and similarly while solving it using solve_ivp like so: x = solve_ivp(f, t, x0, args=(paras,)) instead of using odeint in g
Here's the full code for the sake of convenience:
# import libraries
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import odeint, solve_ivp
from lmfit import minimize, Parameters, Parameter, report_fit
def f(t, y, paras):
"""
Your system of differential equations
"""
x1 = y[0]
x2 = y[1]
x3 = y[2]
try:
k0 = paras['k0'].value
k1 = paras['k1'].value
except KeyError:
k0, k1 = paras
# the model equations
f0 = -k0 * x1
f1 = k0 * x1 - k1 * x2
f2 = k1 * x2
return [f0, f1, f2]
def g(t, x0, paras):
"""
Solution to the ODE x'(t) = f(t,x,k) with initial condition x(0) = x0
"""
x = solve_ivp(f, t, x0, args=(paras,))
return x
def residual(paras, t, data):
"""
compute the residual between actual data and fitted data
"""
x0 = paras['x10'].value, paras['x20'].value, paras['x30'].value
model = g(t, x0, paras)
# you only have data for one of your variables
x2_model = model[:, 1]
return (x2_model - data).ravel()
# initial conditions
x10 = 5.
x20 = 0
x30 = 0
y0 = [x10, x20, x30]
# measured data
t_measured = np.linspace(0, 9, 10)
x2_measured = np.array([0.000, 0.416, 0.489, 0.595, 0.506, 0.493, 0.458, 0.394, 0.335, 0.309])
plt.figure()
plt.scatter(t_measured, x2_measured, marker='o', color='b', label='measured data', s=75)
# set parameters including bounds; you can also fix parameters (use vary=False)
params = Parameters()
params.add('x10', value=x10, vary=False)
params.add('x20', value=x20, vary=False)
params.add('x30', value=x30, vary=False)
params.add('k0', value=0.2, min=0.0001, max=2.)
params.add('k1', value=0.3, min=0.0001, max=2.)
# fit model
result = minimize(residual, params, args=(t_measured, x2_measured), method='leastsq') # leastsq nelder
# check results of the fit
data_fitted = g(np.linspace(0., 9., 100), y0, result.params)
# plot fitted data
plt.plot(np.linspace(0., 9., 100), data_fitted[:, 1], '-', linewidth=2, color='red', label='fitted data')
plt.legend()
plt.xlim([0, max(t_measured)])
plt.ylim([0, 1.1 * max(data_fitted[:, 1])])
# display fitted statistics
report_fit(result)
plt.show()
Here's the error traceback:
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
/home/swami/work/scrap/lmfit_example1.ipynb Cell 4 in <cell line: 67>()
64 params.add('k1', value=0.3, min=0.0001, max=2.)
66 # fit model
---> 67 result = minimize(residual, params, args=(t_measured, x2_measured), method='leastsq') # leastsq nelder
68 # check results of the fit
69 data_fitted = g(np.linspace(0., 9., 100), y0, result.params)
File ~/miniconda3/envs/dynamical/lib/python3.10/site-packages/lmfit/minimizer.py:2600, in minimize(fcn, params, method, args, kws, iter_cb, scale_covar, nan_policy, reduce_fcn, calc_covar, max_nfev, **fit_kws)
2460 """Perform the minimization of the objective function.
2461
2462 The minimize function takes an objective function to be minimized,
(...)
2594
2595 """
2596 fitter = Minimizer(fcn, params, fcn_args=args, fcn_kws=kws,
2597 iter_cb=iter_cb, scale_covar=scale_covar,
2598 nan_policy=nan_policy, reduce_fcn=reduce_fcn,
2599 calc_covar=calc_covar, max_nfev=max_nfev, **fit_kws)
-> 2600 return fitter.minimize(method=method)
File ~/miniconda3/envs/dynamical/lib/python3.10/site-packages/lmfit/minimizer.py:2369, in Minimizer.minimize(self, method, params, **kws)
2366 if (key.lower().startswith(user_method) or
2367 val.lower().startswith(user_method)):
2368 kwargs['method'] = val
-> 2369 return function(**kwargs)
File ~/miniconda3/envs/dynamical/lib/python3.10/site-packages/lmfit/minimizer.py:1693, in Minimizer.leastsq(self, params, max_nfev, **kws)
1691 result.call_kws = lskws
1692 try:
-> 1693 lsout = scipy_leastsq(self.__residual, variables, **lskws)
1694 except AbortFitException:
1695 pass
File ~/.local/lib/python3.10/site-packages/scipy/optimize/_minpack_py.py:410, in leastsq(func, x0, args, Dfun, full_output, col_deriv, ftol, xtol, gtol, maxfev, epsfcn, factor, diag)
408 if not isinstance(args, tuple):
409 args = (args,)
--> 410 shape, dtype = _check_func('leastsq', 'func', func, x0, args, n)
411 m = shape[0]
413 if n > m:
File ~/.local/lib/python3.10/site-packages/scipy/optimize/_minpack_py.py:24, in _check_func(checker, argname, thefunc, x0, args, numinputs, output_shape)
22 def _check_func(checker, argname, thefunc, x0, args, numinputs,
23 output_shape=None):
---> 24 res = atleast_1d(thefunc(*((x0[:numinputs],) + args)))
25 if (output_shape is not None) and (shape(res) != output_shape):
26 if (output_shape[0] != 1):
File ~/miniconda3/envs/dynamical/lib/python3.10/site-packages/lmfit/minimizer.py:586, in Minimizer.__residual(self, fvars, apply_bounds_transformation)
583 self.result.success = False
584 raise AbortFitException(f"fit aborted: too many function evaluations {self.max_nfev}")
--> 586 out = self.userfcn(params, *self.userargs, **self.userkws)
588 if callable(self.iter_cb):
589 abort = self.iter_cb(params, self.result.nfev, out,
590 *self.userargs, **self.userkws)
/home/swami/work/scrap/lmfit_example1.ipynb Cell 4 in residual(paras, t, data)
33 """
34 compute the residual between actual data and fitted data
35 """
37 x0 = paras['x10'].value, paras['x20'].value, paras['x30'].value
---> 38 model = g(t, x0, paras)
40 # you only have data for one of your variables
41 x2_model = model[:, 1]
/home/swami/work/scrap/lmfit_example1.ipynb Cell 4 in g(t, x0, paras)
23 def g(t, x0, paras):
24 """
25 Solution to the ODE x'(t) = f(t,x,k) with initial condition x(0) = x0
26 """
---> 27 x = solve_ivp(f, t, x0, args=(paras,))
28 return x
File ~/.local/lib/python3.10/site-packages/scipy/integrate/_ivp/ivp.py:512, in solve_ivp(fun, t_span, y0, method, t_eval, dense_output, events, vectorized, args, **options)
507 if method not in METHODS and not (
508 inspect.isclass(method) and issubclass(method, OdeSolver)):
509 raise ValueError("`method` must be one of {} or OdeSolver class."
510 .format(METHODS))
--> 512 t0, tf = map(float, t_span)
514 if args is not None:
515 # Wrap the user's fun (and jac, if given) in lambdas to hide the
516 # additional parameters. Pass in the original fun as a keyword
517 # argument to keep it in the scope of the lambda.
518 try:
ValueError: too many values to unpack (expected 2)
Any idea what the problem might be?
I'm brand new to Python and struggling with this error 'x and y must be the same size'
Here is the code for my scatter plot
def plotNumericalConvergence(paramArr, GrArr, Label):
plt.figure()
x = paramArr
y = GrArr
plt.scatter(x=x,y=y)
plt.xlabel(Label)
plt.ylabel('Gr')
plt.title('title')
plt.show()
and here is the code for what its taking in to plot:
def numericalConvergence(Position, Velocity, Charge, Mass, dt, B):
gyroArr = np.array([])
gyroArr2 = np.array([])
gyroArr3 = np.array([])
dtArr = np.array([])
fieldArr = np.array([])
chargeArr = np.array([])
dtArr = np.append(dtArr, [dt])
gyroArr = np.append(gyroArr, [6.324555320336759])
gyroArr2 = np.append(gyroArr, [6.324555320336759])
gyroArr3 = np.append(gyroArr, [6.324555320336759])
fieldArr = np.append(fieldArr, [[0,0,1]])
chargeArr = np.append(chargeArr, Charge)
# Incrementing timestep
for i in range (10):
start = time.time()
dt = dt + 0.1000
print('\n'"Timestep", i+1)
trv= pstep(qom,Position,Velocity,0.0,dt,N_t)
Gr = MeasuredGr(trv)
PredGr = GyroRadius(Position, Velocity, Charge, Mass, dt, B)
gyroArr = np.append(gyroArr, [Gr])
dtArr = np.append(dtArr, [dt])
end = time.time()
print("Predicted gyro radius =", PredGr)
print("Measured gryo radius =", Gr)
print("Timestep =", dt)
print("Magnetic Field =", B)
print("Charge =", Charge)
print("nt =",(end - start)/dt) # need to fix this## Predicted gyro radius
Label = "DT"
plotNumericalConvergence(dtArr, gyroArr, Label)
# Incrementing magnetic field
for i in range (10):
start = time.time()
dt=0.001
B = [float(x) + 1 for x in B] # Increments all numbers in magnetic field array by 1
print('\n'"Magnetic Field", i+1)
trv = pstep(qom,Position,Velocity,0.0,dt,N_t)
Gr = MeasuredGr(trv)
PredGr = GyroRadius(Position, Velocity, Charge, Mass, dt, B)
gyroArr2 = np.append(gyroArr2, [Gr])
fieldArr = np.append(fieldArr, [[B]])
end = time.time()
print("Predicted gyro radius =", PredGr)
print("Measured gryo radius =", Gr)
print("Timestep =", dt)
print("Magnetic Field =", B)
print("Charge =", Charge)
print("nt =",(end - start)/dt)
Label = "Magnetic Field"
plotNumericalConvergence(fieldArr, gyroArr2, Label)
# Incrementing Charge
for i in range (10):
start = time.time()
B = [0,0,1]
Charge = Charge + 0.1
print('\n'"Charge", i+1)
# add label param for y, new gr array each loop - no 2nd method needed
trv=pstep(qom,Position,Velocity,0.0,dt,N_t)
Gr = MeasuredGr(trv)
PredGr = GyroRadius(Position, Velocity, Charge, Mass, dt, B)
gyroArr3 = np.append(gyroArr3, [Gr])
chargeArr = np.append(chargeArr, [Charge])
print("Predicted gyro radius =", PredGr)
print("Measured gryo radius =", Gr)
print("Timestep =", dt)
print("Magnetic Field =", B)
print("Charge =", Charge)
print("nt =",(end - start)/dt)
Label = "Charge"
print(gyroArr3)
print(chargeArr)
plotNumericalConvergence(chargeArr, gyroArr3, Label)
The plot works for the dt, but not the magnetic field or charge. I've seen stuff on here about reshaping arrays and something along the lines of [:,0] kind of thing but I am really stuck and don't understand Python 100% lol. Thanks!
EDIT - Full traceback:
ValueError Traceback (most recent call last)
Cell In [249], line 25
22 bf=EvalB(ipos)
23 vel = Boris(qom,ivel,ef,bf,-0.5*dt)
---> 25 numericalConvergence(ipos, vel, Charge, Mass, dt, B)
26 #print(gyroArr)
27 #print(dtArr)
28 #plotNumericalConvergence(dtArr, gyroArr)
Cell In [246], line 101, in numericalConvergence(Position, Velocity, Charge, Mass, dt, B)
99 print(gyroArr3)
100 print(chargeArr)
--> 101 plotNumericalConvergence(chargeArr, gyroArr3, Label)
103 return gyroArr, dtArr
Cell In [247], line 8, in plotNumericalConvergence(paramArr, GrArr, Label)
5 x = paramArr
6 y = GrArr
----> 8 plt.scatter(x=x,y=y)
10 plt.xlabel(Label)
11 plt.ylabel('Gr')
File /Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/matplotlib/pyplot.py:2790, in scatter(x, y, s, c, marker, cmap, norm, vmin, vmax, alpha, linewidths, edgecolors, plotnonfinite, data, **kwargs)
2785 #_copy_docstring_and_deprecators(Axes.scatter)
2786 def scatter(
2787 x, y, s=None, c=None, marker=None, cmap=None, norm=None,
2788 vmin=None, vmax=None, alpha=None, linewidths=None, *,
2789 edgecolors=None, plotnonfinite=False, data=None, **kwargs):
-> 2790 __ret = gca().scatter(
2791 x, y, s=s, c=c, marker=marker, cmap=cmap, norm=norm,
2792 vmin=vmin, vmax=vmax, alpha=alpha, linewidths=linewidths,
2793 edgecolors=edgecolors, plotnonfinite=plotnonfinite,
2794 **({"data": data} if data is not None else {}), **kwargs)
2795 sci(__ret)
2796 return __ret
File /Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/matplotlib/__init__.py:1423, in _preprocess_data.<locals>.inner(ax, data, *args, **kwargs)
1420 #functools.wraps(func)
1421 def inner(ax, *args, data=None, **kwargs):
1422 if data is None:
-> 1423 return func(ax, *map(sanitize_sequence, args), **kwargs)
1425 bound = new_sig.bind(ax, *args, **kwargs)
1426 auto_label = (bound.arguments.get(label_namer)
1427 or bound.kwargs.get(label_namer))
File /Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/matplotlib/axes/_axes.py:4520, in Axes.scatter(self, x, y, s, c, marker, cmap, norm, vmin, vmax, alpha, linewidths, edgecolors, plotnonfinite, **kwargs)
4518 y = np.ma.ravel(y)
4519 if x.size != y.size:
-> 4520 raise ValueError("x and y must be the same size")
4522 if s is None:
4523 s = (20 if mpl.rcParams['_internal.classic_mode'] else
4524 mpl.rcParams['lines.markersize'] ** 2.0)
ValueError: x and y must be the same size
When you generate a scatter plot, then both x and y should be
1-D arrays of equal size.
Check sizes of x and y, their sizes are probably different.
I am working on an implementation of MAML (see https://arxiv.org/pdf/1703.03400.pdf) in Jax.
When training on a distribution of simple linear regression tasks it seems to perform fine (takes a while to converge but ultimately works).
However when training on a tasks distributed like A * sin(B + X) where A, B are random variables all the weights in the network converge to 0. training results
This is clearly not right.
Thanks in advance for any help provided.
Full code here https://colab.research.google.com/drive/1YoOkwo5tI42LeIbBOxpImkN55Kg9wScl?usp=sharing or see below for minimal code.
Task Generation code:
class MAMLDataLoader:
def __init__(self, sample_task_fn, num_tasks, batch_size):
self.sample_task_fn = sample_task_fn
self.num_tasks = num_tasks
self.batch_size = batch_size
def sample_tasks(self, key):
XS = jnp.empty((self.num_tasks, 2 * self.batch_size, 1))
YS = jnp.empty((self.num_tasks, 2 * self.batch_size, 1))
for i in range(self.num_tasks):
key, subkey = random.split(key)
xs, ys = self.sample_task_fn(self.batch_size * 2, subkey)
XS = XS.at[i].set(xs)
YS = YS.at[i].set(ys)
x_train, x_test = XS[:, :self.batch_size], XS[:, self.batch_size:]
y_train, y_test = YS[:, :self.batch_size], YS[:, self.batch_size:]
return x_train, y_train, x_test, y_test
def dummy_input(self):
key = random.PRNGKey(0)
x = self.sample_task_fn(1, key)[0][0]
return x
def sample_sinusoidal_task(samples, key):
# y = a * sin(b + x)
xs_key, amplitude_key, phase_key = random.split(key, num=3)
amplitude = random.uniform(amplitude_key, (1, 1))
phase = random.uniform(phase_key, (1, 1)) * jnp.pi * 2
xs = (random.uniform(xs_key, (samples, 1)) * 4 - 2) * jnp.pi
ys = amplitude * jnp.sin(xs + phase)
return xs, ys
Here is the main MAML code:
class MAMLTrainer:
def __init__(self, model, alpha, optimiser, inner_steps=1):
self.model = model
self.alpha = alpha
self.optimiser = optimiser
self.inner_steps = inner_steps
self.jit_step = jit(self.step)
def loss(self, params, x, y):
preds = self.model.apply(params, x)
return jnp.mean(jnp.inner(y - preds, y - preds) / 2.0)
def update(self, params, x, y, inner_steps=None):
if inner_steps is None:
inner_steps = self.inner_steps
loss_grad = grad(self.loss)
def _update(i, params):
grads = loss_grad(params, x, y)
new_params = tree_map(lambda p, g: p - self.alpha * g, params, grads)
return new_params
return lax.fori_loop(0, inner_steps, _update, params)
def meta_loss(self, params, x1, y1, x2, y2):
return self.loss(self.update(params, x1, x2), x2, y2)
def batch_meta_loss(self, params, x1, y1, x2, y2):
return jnp.mean(vmap(partial(self.meta_loss, params))(x1, y1, x2, y2))
def step(self, params, optimiser, x1, y1, x2, y2):
loss, grads = value_and_grad(self.batch_meta_loss)(params, x1, y1, x2, y2)
updates, opt_state = self.optimiser.update(grads, optimiser, params)
params = optax.apply_updates(params, updates)
return params, loss
def train(self, dataloader, steps, key, params=None):
if params is None:
key, subkey = random.split(key)
params = self.model.init(subkey, dataloader.dummy_input())
optimiser = self.optimiser.init(params)
pbar, losses = tqdm(range(steps), desc='Training'), []
for epoch in pbar:
key, subkey = random.split(key)
params, loss = self.jit_step(params, optimiser, *dataloader.sample_tasks(subkey))
losses.append(loss)
if epoch % 100 == 0:
avg_loss = jnp.mean(jnp.array(losses[-100:]))
pbar.set_postfix_str(f'current_loss: {loss:.3f}, running_loss_100_epochs: {avg_loss:.3f}')
return params, jnp.array(losses)
def n_shot_learn(self, x_train, y_train, params, n):
return self.update(params, x_train, y_train, n)
Training Code:
class SimpleMLP(nn.Module):
features: Sequence[int]
#nn.compact
def __call__(self, inputs):
x = inputs
for i, feat in enumerate(self.features[:-1]):
x = nn.Dense(feat)(x)
x = nn.relu(x)
return nn.Dense(self.features[-1])(x)
model = SimpleMLP([64, 64, 1])
optimiser = optax.adam(1e-3)
trainer = MAMLTrainer(model, 0.1, optimiser, 1)
dataloader = MAMLDataLoader(sample_sinusoidal_task, 2, 100)
key = random.PRNGKey(0)
params, losses = trainer.train(dataloader, 10000, key)
Long time lurking, first time posting.
I am working with a chemical system that is detected only for a certain period of time, so I will have the reaction and the decay of the signal. The equation is given by:
Derivative(GL, t): (-k*GL) - GL/a,
Derivative(GM, t): (k*GL) - GM/b,
I have managed to fit my data by using symfit package (image below to give an idea of the system), however since I will need to do Monte Carlo simulation, I need to fit my data using scipy. Chemical reaction and fitting using symfit
I have tried to define the equation in this way:
def f(C, xdata):
GL = ydataScaled
GM = ydataScaled2
dGLdt = -k*GL - GL/a
dGMdt = k*GL - GM/b
return [dGLdt, dGMdt]
However, I am not able to fit neither by using optimize.minimize or odeint. What would be the right approach in this case to fit two dataset in y that share some parameters?
Full code:
import nmrglue as ng
import matplotlib.pyplot as plt
import numpy as np
import scipy as sp
from scipy import integrate
from scipy.optimize import curve_fit
from scipy.integrate import odeint
from symfit import variables, parameters, Fit, ODEModel, Derivative, D, exp, sin, Model, cos, integrate
# read in the bruker formatted data
dic,data = ng.bruker.read_pdata('/opt/topspin4.1.0/NMR/2021_09_27_Glutamine/90/pdata/1')
#Bruker to NMRPipe data
C = ng.convert.converter()
C.from_bruker(dic, data)
pdic, ppdata = C.to_pipe()
#process the spectrum
ZF_Number = 16384
ppdata = ng.proc_base.di(ppdata) # discard the imaginaries
show = ppdata[2] #show the spectra number X
# determind the ppm scale
udic = ng.bruker.guess_udic(dic, data)
uc = ng.fileiobase.uc_from_udic(udic)
ppm_scale = uc.ppm_scale()
ppms = uc.ppm_scale()
#Plot the spectra
fig1 = plt.figure()
bx = fig1.add_subplot(111)
bx.plot(ppms, show)
plt.xlabel('Chemical Shift (ppm)')
plt.ylabel('Intensity')
First = 0
End = 80
#Integration for every i in the range
Area = []
Area2 = []
Area3 = [] #noise measurement, using the same chemical shift lenght as the product-peak.
#limits = [(176, 180), (180, 183)]
for i in range(First,End):
Area.append(ng.analysis.integration.integrate(ppdata[i], uc, (177.15, 177.80), unit = "ppm", noise_limits = None, norm_to_range = None, calibrate = 1.0))
NP_Area = np.asarray(Area)
for i in range(First, End):
Area2.append(ng.analysis.integration.integrate(ppdata[i], uc, (180.80, 181.10), unit = "ppm", noise_limits = None, norm_to_range = None, calibrate = 1.0))
NP_Area2 = np.asarray(Area2)
for i in range(First, End):
Area3.append(ng.analysis.integration.integrate(ppdata[i], uc, (20.0, 20.3), unit = "ppm", noise_limits = None, norm_to_range = None, calibrate = 1.0))
NP_Area3 = np.asarray(Area3)
#Plot the buildUP
fig2 = plt.figure()
cx = fig2.add_subplot(111)
cx.plot(NP_Area)
cx.plot(NP_Area2)
plt.xlabel('Time (seconds)')
plt.ylabel('Intensity')
#Fitting
d1 = dic['acqus']['D'][1]
xdata = (np.arange(First, End) - First)*d1
ydata = NP_Area[:,0]
ydata2 = NP_Area2[:,0]
ydataScaled = ydata/max(ydata) #normalized to the initial value of the Glu signal to compensate for any variations in the polarization level
ydataScaled2 = ydata2/max(ydata) # same as above
#GL, GM, t = variables('GL, GM, t')
a, b, k = parameters('a, b, k')
# Define the equation considering the enzymatic reaction Gl -> Gm with the HP decay.
def f(C, xdata):
GL = ydataScaled
GM = ydataScaled2
dGLdt = -k*GL - GL/a
dGMdt = k*GL - GM/b
return [dGLdt, dGMdt]
C0 = [1, 0]
popt, pcov = sp.optimize.minimize(f, xdata, args = (ydataScaled, ydataScaled2))```
And the error:
runfile('/Users/karensantos/Desktop/Codes/Stack_question.py', wdir='/Users/karensantos/Desktop/Codes')
2
(512, 32768)
float64
/opt/anaconda3/lib/python3.8/site-packages/nmrglue/fileio/convert.py:68: UserWarning: Incompatible dtypes, conversion not recommended
warn("Incompatible dtypes, conversion not recommended")
Traceback (most recent call last):
File "/Users/karensantos/Desktop/Codes/Stack_question.py", line 112, in <module>
popt, pcov = sp.optimize.minimize(f, xdata, args = (ydataScaled, ydataScaled2))
File "/opt/anaconda3/lib/python3.8/site-packages/scipy/optimize/_minimize.py", line 612, in minimize
return _minimize_bfgs(fun, x0, args, jac, callback, **options)
File "/opt/anaconda3/lib/python3.8/site-packages/scipy/optimize/optimize.py", line 1101, in _minimize_bfgs
sf = _prepare_scalar_function(fun, x0, jac, args=args, epsilon=eps,
File "/opt/anaconda3/lib/python3.8/site-packages/scipy/optimize/optimize.py", line 261, in _prepare_scalar_function
sf = ScalarFunction(fun, x0, args, grad, hess,
File "/opt/anaconda3/lib/python3.8/site-packages/scipy/optimize/_differentiable_functions.py", line 76, in __init__
self._update_fun()
File "/opt/anaconda3/lib/python3.8/site-packages/scipy/optimize/_differentiable_functions.py", line 166, in _update_fun
self._update_fun_impl()
File "/opt/anaconda3/lib/python3.8/site-packages/scipy/optimize/_differentiable_functions.py", line 73, in update_fun
self.f = fun_wrapped(self.x)
File "/opt/anaconda3/lib/python3.8/site-packages/scipy/optimize/_differentiable_functions.py", line 70, in fun_wrapped
return fun(x, *args)
TypeError: f() takes 2 positional arguments but 3 were given
I have a tried curve_fit function for multiple variables. I have encountered a problem with "sigma has incorrect shape". I tried the following code. Could anyone explain why I get this error?
Here x and y are my independent variables and p,q,r are parameters I want to fit
xdata = [214.737191559, -5.64912101538e-36, 36.1372453686, 189.459700978, 233.562136902, 201.230228832, -5.59364882619e-36, -36.3232002416, -188.192199081, -212.837139143, -232.342545403, -200.699429716]
ydata = [-5.88273617837e-37, -211.536123799, -186.67108047, -35.9497006815, 200.282998159, 232.085860035, 213.44274878, 187.945919272, 35.7227474297, -6.00785257974e-37, -199.746844708, -230.856058666]
xdata = np.array(xdata)
ydata = np.array(ydata)
def func1(X,a,b,c):
x,y = X
n = 8
# % A = ydata
# % B = -xdata
# % C = xdata. - ydata
# % H = zdata
g = np.subtract(x,y)
I_0 = np.subtract(x,y) # x-y = C
I_1 = np.multiply(I_0,c) # c(x-y) = cC
I_2 = np.multiply(b,-x) #b(-x) = bB
I_3 = np.multiply(a,y) # aA
I3_0 = np.subtract(I_1,I_2) # cC-bB
I3_1 = np.subtract(I_3,I_1) # aA-cC
I3_2 = np.subtract(I_2,I_3) # bB-aA
I3_00 = np.multiply(I3_0,I3_1) # (cC-bB)(aA-cC)
I3_01 = np.multiply(I3_00,I3_2) # (cC-bB)(aA-cC)(bB-aA)
I3 = np.divide(I3_01,54) # (cC-bB)(aA-cC)(bB-aA)/54
I2_0 = np.power((I3_1),2) # (aA-cC)^2
I2_1 = np.power((I3_0),2) # (cC-bB)^2
I2_2 = np.power((I3_2),2) # (bB-aA)^2
I2_00 = np.add(I2_0,I2_1) # (aA-cC)^2 + (cC-bB)^2
I2_01 = np.add(I2_00,I2_2) # (aA-cC)^2 + (cC-bB)^2 + (bB-aA)^2
I2 = np.divide(I2_01,54) # ((aA-cC)^2 + (cC-bB)^2 + (bB-aA)^2)/54
th_0 = np.divide(I3,(np.power(I2,(3/2)))) # I3/(I2^(3/2))
th = np.arccos(np.clip((th_0),-1,1)) # arccos(I3/(I2^(3/2)))
ans_0 = np.divide(np.add((2*th),(np.pi)),6) # (2*th + pi)/6
ans_1 = np.divide(np.add((2*th),(3*np.pi)),6) # (2*th + 3*pi)/6
ans_2 = np.divide(np.add((2*th),(5*np.pi)),6) # (2*th + 5*pi)/6
ans_00 = np.multiply(np.cos(ans_0),2) # 2*cos((2*th + pi)/6)
ans_11 = np.multiply(np.cos(ans_1),2) # 2*cos((2*th + 3*pi)/6)
ans_22 = np.multiply(np.cos(ans_2),2) # 2*cos((2*th + 5*pi)/6)
ans_000 = np.power(np.absolute(ans_00),n) # (abs(2*cos((2*th + pi)/6)))^n
ans_111 = np.power(np.absolute(ans_11),n) # (abs(2*cos((2*th + 3*pi)/6)))^n
ans_222 = np.power(np.absolute(ans_22),n) # (abs(2*cos((2*th + 5*pi)/6)))^n
ans_0000 = np.add((np.power(np.absolute(ans_00),n)),(np.power(np.absolute(ans_11),n))) # (abs(2*cos((2*th + pi)/6)))^n + (abs(2*cos((2*th + 3*pi)/6)))^n
ans_1111 = np.add((ans_0000),(np.power(np.absolute(ans_22),n))) # (abs(2*cos((2*th + pi)/6)))^n + (abs(2*cos((2*th + 3*pi)/6)))^n + (abs(2*cos((2*th + 5*pi)/6)))^n
sna_0 = np.power(np.multiply(3,I2),(n/2)) # (3*I2)^(n/2) !!
sna_1 = 2*(np.power(190,n)) # 2*(sigma^n) !!
sna_00 = np.multiply(sna_0,ans_1111)
sna_11 = np.subtract(sna_00,sna_1)
return sna_11
a, b, c = 10., 4., 6.
z = func1((xdata,ydata), a, b, c) * 1 + np.random.random(12) / 100
# initial guesses for a,b,c:
a, b, c = 1, 1, 1
p0 = np.array([a, b, c])
# p0 = 8., 2., 7.
popt,pcov = (curve_fit(func1, (xdata,ydata),z, p0))
popt
When I run this I go the following error
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-13-484bc542850b> in <module>()
6 p0 = np.array([a, b, c])
7 # p0 = 8., 2., 7.
----> 8 popt,pcov = (curve_fit(func1, (xdata,ydata), p0))
9 popt
~/.conda/envs/ML/lib/python3.6/site-packages/scipy/optimize/minpack.py in curve_fit(f, xdata, ydata, p0, sigma, absolute_sigma, check_finite, bounds, method, jac, **kwargs)
749 # Remove full_output from kwargs, otherwise we're passing it in twice.
750 return_full = kwargs.pop('full_output', False)
--> 751 res = leastsq(func, p0, Dfun=jac, full_output=1, **kwargs)
752 popt, pcov, infodict, errmsg, ier = res
753 cost = np.sum(infodict['fvec'] ** 2)
~/.conda/envs/ML/lib/python3.6/site-packages/scipy/optimize/minpack.py in leastsq(func, x0, args, Dfun, full_output, col_deriv, ftol, xtol, gtol, maxfev, epsfcn, factor, diag)
381 if not isinstance(args, tuple):
382 args = (args,)
--> 383 shape, dtype = _check_func('leastsq', 'func', func, x0, args, n)
384 m = shape[0]
385 if n > m:
~/.conda/envs/ML/lib/python3.6/site-packages/scipy/optimize/minpack.py in _check_func(checker, argname, thefunc, x0, args, numinputs, output_shape)
25 def _check_func(checker, argname, thefunc, x0, args, numinputs,
26 output_shape=None):
---> 27 res = atleast_1d(thefunc(*((x0[:numinputs],) + args)))
28 if (output_shape is not None) and (shape(res) != output_shape):
29 if (output_shape[0] != 1):
~/.conda/envs/ML/lib/python3.6/site-packages/scipy/optimize/minpack.py in func_wrapped(params)
461 if transform is None:
462 def func_wrapped(params):
--> 463 return func(xdata, *params) - ydata
464 elif transform.ndim == 1:
465 def func_wrapped(params):
ValueError: operands could not be broadcast together with shapes (12,) (3,)
The error you are receiving ValueError: ``sigma`` has incorrect shape. is related to the incorrect call of curve_fit and the difference between what the function is expecting and what are you feeding it. Here is an example of a correct call:
p, q, r = 1, 1, 1
p0 = np.array([p, q, r])
cfit = curve_fit(func, xdata, ydata, p0)
print(cfit)
Unfortunately, that is not the only thing porblematic in your code. Your func1 will require from you some editing. You can refer to this post on how to use curve_fit.
Update:
I shortened your code and optimized some lines plus -as mentioned in the comments- you need an output variable so I generated some custom zdata that you can later replace with your data.
import numpy as np
from scipy.optimize import curve_fit
xdata = [214.737, -5.649e-36, 36.137, 189.459, 233.562, 201.230, -5.593e-36, -36.323, -188.192, -212.837, -232.342, -200.699]
ydata = [-5.882e-37, -211.536, -186.671, -35.949, 200.282, 232.085, 213.442, 187.945, 35.722, -6.007, -199.746, -230.856]
def func(X, p, q, r):
x = np.array(X[0])
y = np.array(X[1])
n = 8
a1 = (p * y) - (r * (x-y))
b1 = (q * -1 * x) - (p * y)
c1 = (r * (x - y)) - (q * -1 * x)
I3 = (a1 * b1 * c1) / 54
I2 = (a1**2 + b1**2 + c1**2) / 54
th = np.arccos( I3 / (I2**(3/2)) )
an1 = (np.abs(2 * np.cos((2 * th + 1 * np.pi) /6)))**n
an2 = (np.abs(2 * np.cos((2 * th + 3 * np.pi) /6)))**n
an3 = (np.abs(2 * np.cos((2 * th + 5 * np.pi) /6)))**n
res = ( (3 * I2)**(n/2) ) * (an1 + an2 + an3) - (2 * (189.32)**8)
return res
# init
p, q, r = 1, 1, 1
p0 = np.array([p, q, r])
# artificial zdata
zdata = func((xdata, ydata), p, q, r) + np.random.random(np.array(xdata).shape)
cfit = curve_fit(func, (xdata, ydata), zdata, p0)
# print output
print(cfit)
I still don't exactly get what you have inside of func which is causing a RuntimeWarning: due to invalid value encountered in arccos and that is why I edited the data you provided too.