Related
I have two parameters I'd like to maximise the log likelihood for and get the optimal parameter values. The code here calculates an ODE system, from which I will use two trajectories as x and y data:
import numpy as np
from scipy.integrate import odeint
import matplotlib.pyplot as plt
from scipy.optimize import minimize
import math
# Total population, N.
N = 1
# Initial number of infected and recovered individuals, I0 and R0.
I0, R0 = 0.001, 0
# Everyone else, S0, is susceptible to infection initially.
U0 = N - I0 - R0
J0 = I0
Lf0, Ls0 = 0, 0
# Contact rate, beta, and mean recovery rate, gamma, (in 1/days).
beta, gamma = 8, 0.4
mu, muTB, sigma, rho = 1/80, 1/6, 1/6, 0.03
u, v, w = 0.88, 0.083, 0.0006
t = np.linspace(0, 500, 500+1)
# The SIR model differential equations.
def deriv(y, t, N, beta, gamma, mu, muTB, sigma, rho, u, v, w):
U, Lf, Ls, I, R, cInc = y
b = (mu * (U + Lf + Ls + R)) + (muTB * I)
lamda = beta * I
clamda = 0.2 * lamda
dU = b - ((lamda + mu) * U)
dLf = (lamda*U) + ((clamda)*(Ls + R)) - ((u + v + mu) * Lf)
dLs = (u * Lf) - ((w + clamda + mu) * Ls)
dI = w*Ls + v*Lf - ((gamma + muTB + sigma) * I) + (rho * R)
dR = ((gamma + sigma) * I) - ((rho + clamda + mu) * R)
cI = w*Ls + v*Lf + (rho * R)
return dU, dLf, dLs, dI, dR, cI
# Integrate the SIR equations over the time grid, t.
solve = odeint(deriv, (U0, Lf0, Ls0, I0, R0, J0), t, args=(N, beta, gamma, mu, muTB, sigma, rho, u, v, w))
U, Lf, Ls, I, R, cInc = solve.T
My next function simply calculates the log likelihood when values for beta and gamma are passed in, which is what I'm trying to optimise
def loglik(beta, gamma):
solve = odeint(deriv, (U0, Lf0, Ls0, I0, R0, J0), t, args=(N, beta, gamma, mu, muTB, sigma, rho, u, v, w))
U, Lf, Ls, I, R, cInc = solve.T #get trajectories
muPrev, sigmaPrev = I[-1]*100000, 40 #I (prevalence)
muInc, sigmaInc = (cInc[1:] - cInc[:-1])[-1]*100000, 30 #cInc (incidence)
n = 10000
# logPrev = np.random.lognormal(np.log((muPrev**2) / (muPrev**2 + sigmaPrev**2)**0.5), (np.log(1 + (sigmaPrev**2 / muPrev**2)))**0.5, n) #lognormal
# logInc = np.random.lognormal(np.log((muInc**2) / (muInc**2 + sigmaInc**2)**0.5), (np.log(1 + (sigmaInc**2 / muInc**2)))**0.5, n) #lognormal
xPrev = I[-1]*100000 #value of x in formula for log of pdf
xInc = (cInc[1:] - cInc[:-1])[-1]*100000 #value of x in formula for log of pdf
logmuPrev = np.log((muPrev**2) / (muPrev**2 + sigmaPrev**2)**0.5) #lognormal params
logsdPrev = (np.log(1 + (sigmaPrev**2 / muPrev**2)))**0.5
logmuInc = np.log((muInc**2) / (muInc**2 + sigmaInc**2)**0.5)#lognormal params
logsdInc = (np.log(1 + (sigmaInc**2 / muInc**2)))**0.5
L_prev = -0.5*((np.log(xPrev) - logmuPrev) / logsdPrev)**2 - np.log(xPrev * logsdPrev * (2*math.pi)**0.5) #log of pdf for prev and inc
L_inc = -0.5*((np.log(xInc) - logmuInc) / logsdInc)**2 - np.log(xInc * logsdInc * (2*math.pi)**0.5)
logsum = L_prev + L_inc #summing logs
np.exp(logsum) #exp for likelihood
return np.exp(logsum)
Finally, I try to optimise, but my arrays have different shapes so I cannot optimise this. How do I fix this so I can correctly maximise the log likelihood?
x = I[:-1]
y = cInc[1:] - cInc[:-1]
lik_model = minimize(loglik, 8, 0.4, method='L-BFGS-B')
----edit----
I have managed to get the minimise to run, but it is just returning the 8 and 0.4 values I fed into it?
results:
fun: 0.00013295432301190784
hess_inv: <2x2 LbfgsInvHessProduct with dtype=float64>
jac: array([-5.75331835e-08, 6.87479045e-07])
message: 'CONVERGENCE: NORM_OF_PROJECTED_GRADIENT_<=_PGTOL'
nfev: 3
nit: 0
njev: 1
status: 0
success: True
x: array([8. , 0.4])
My code solves an ODE system using two random values of randomly generated parameters from a list. I put this in a for loop to generate some 50 outputs using those parameter values. I use an if loop within my for loop, to set conditions so that if the values calculated fall within a range, it prints an acceptance message, else print a rejection message. However, what I want is this information (the output values, and the corresponding param values used to generate those outputs) to be stored in memory, for example within a list. However my method only saves the last value in the list, or does not save anything in the list at all. Depending on how I position code within the loops, it will end up printing various elements 50 times in a row, which I do not want. My code is this:
import numpy as np
from scipy.integrate import odeint
import matplotlib.pyplot as plt
from scipy.optimize import minimize
beta_samples = np.random.uniform(0, 30, 50)
gamma_samples = np.random.uniform(0, 2, 50)
for i, j in zip(beta_samples, gamma_samples):
# Total population, N.
N = 1
# Initial number of infected and recovered individuals, I0 and R0.
I0, R0 = 0.001, 0
# Everyone else, S0, is susceptible to infection initially.
U0 = N - I0 - R0
J0 = I0
Lf0, Ls0 = 0, 0
# Contact rate, beta, and mean recovery rate, gamma, (in 1/days).
beta, gamma = i, j
mu, muTB, sigma, rho = 1/80, 1/6, 1/6, 0.03
u, v, w = 0.88, 0.083, 0.0006
t = np.linspace(0, 500, 500+1)
# The SIR model differential equations.
def deriv(y, t, N, beta, gamma, mu, muTB, sigma, rho, u, v, w):
U, Lf, Ls, I, R, cInc = y
b = (mu * (U + Lf + Ls + R)) + (muTB * I)
lamda = beta * I
clamda = 0.2 * lamda
dU = b - ((lamda + mu) * U)
dLf = (lamda*U) + ((clamda)*(Ls + R)) - ((u + v + mu) * Lf)
dLs = (u * Lf) - ((w + clamda + mu) * Ls)
dI = w*Ls + v*Lf - ((gamma + muTB + sigma) * I) + (rho * R)
dR = ((gamma + sigma) * I) - ((rho + clamda + mu) * R)
cI = w*Ls + v*Lf + (rho * R)
return dU, dLf, dLs, dI, dR, cI
# Integrate the SIR equations over the time grid, t.
solve = odeint(deriv, (U0, Lf0, Ls0, I0, R0, J0), t, args=(N, beta, gamma, mu, muTB, sigma, rho, u, v, w))
U, Lf, Ls, I, R, cInc = solve.T
if 320 < I[-1]*100000 < 480 and 240 < (cInc[1:] - cInc[:-1])[-1]*100000 < 360:
acc = [320 < I[-1]*100000 < 480]
acc.append(320 < I[-1]*100000 < 480)
print('for beta of', beta, 'and gamma of', gamma, 'pprevalence is ', I[-1]*100000, 'incidence is ', (cInc[1:] - cInc[:-1])[-1]*100000)
else:
rejected.append(beta_samples)
print('values of', beta, 'and gamma of', gamma, 'rejected')
Where is my code going wrong? I simply want a list of the values that come under the 'if' conditions, and those coming under the 'else' conditions
I have code which estimates a parameter beta in an ODE system, given that all parameters are known other than beta and the peak of the 'epidemic' simulation, is 10% of the starting population. However, I realise solving the root might not always work to find the value. Is there any method of using scipy.optimize to find an alternate way of estimating this, by taking the squared difference of sum at the 10% peak, squaring the whole thing, then minimising that? This is the current code:
import numpy as np
from scipy.integrate import odeint
from scipy.optimize import root
def peak_infections(beta, days = 100):
# Total population, N.
N = 1000
# Initial number of infected and recovered individuals, I0 and R0.
I0, R0 = 10, 0
# Everyone else, S0, is susceptible to infection initially.
S0 = N - I0 - R0
J0 = I0
# Contact rate, beta, and mean recovery rate, gamma, (in 1/days).
gamma = 1/7
# A grid of time points (in days)
t = np.linspace(0, days, days + 1)
# The SIR model differential equations.
def deriv(y, t, N, beta, gamma):
S, I, R, J = y
dS = ((-beta * S * I) / N)
dI = ((beta * S * I) / N) - (gamma * I)
dR = (gamma * I)
dJ = ((beta * S * I) / N)
return dS, dI, dR, dJ
# Initial conditions are S0, I0, R0
# Integrate the SIR equations over the time grid, t.
solve = odeint(deriv, (S0, I0, R0, J0), t, args=(N, beta, gamma))
S, I, R, J = solve.T
return np.max(I)/N
root(lambda b: peak_infections(b)-0.1, x0 = 0.5).x
Using scipy.optimize(root(lambda b: peak_infections(b)-0.1, x0 = 0.5).x) only returns a misuse of function error.
EDIT ---------------------------------------------------
I am wondering how this approach could be applied to if instead of having 10% of the peak as a key piece of information, I had a dataframe of weekly new numbers. How could a similar method be used to take that data into account in helping estimate beta? If we say
import pandas as pd
d = {'Week': [1, 2,3,4,5,6,7,8,9,10,11], 'incidence': [206.1705794,2813.420201,11827.9453,30497.58655,10757.66954,7071.878779,3046.752723,1314.222882,765.9763902,201.3800578,109.8982006]}
df = pd.DataFrame(data=d)
Now this is our data, rather than knowing the peak of the simulation is 10% of the N starting population. How can this be used to minimise and find a beta estimate?
-----EDIT 2-------
import numpy as np
from scipy.integrate import odeint
import matplotlib.pyplot as plt
from scipy.optimize import minimize
import pandas as pd
from scipy.optimize import leastsq
###############################################################################
########## WITH WEEKLY DATA
###############################################################################
#t = np.arange(0,84,7)
t = np.linspace(0, 77, 77+1)
d = {'Week': [t[7],t[14],t[21],t[28],t[35],t[42],t[49],t[56],t[63],t[70],t[77]], 'incidence': [206.1705794,2813.420201,11827.9453,30497.58655,10757.66954,7071.878779,3046.752723,1314.222882,765.9763902,201.3800578,109.8982006]}
df = pd.DataFrame(data=d)
#d = {'Week': t, 'incidence': [0,206.1705794,2813.420201,11827.9453,30497.58655,10757.66954,7071.878779,3046.752723,1314.222882,765.9763902,201.3800578,109.8982006]}
#df = pd.DataFrame(data=d)
def peak_infections(beta, df):
# Weeks for which the ODE system will be solved
#weeks = df.Week.to_numpy()
# Total population, N.
N = 100000
# Initial number of infected and recovered individuals, I0 and R0.
I0, R0 = 10, 0
# Everyone else, S0, is susceptible to infection initially.
S0 = N - I0 - R0
J0 = I0
# Contact rate, beta, and mean recovery rate, gamma, (in 1/days).
#reproductive no. R zero is beta/gamma
gamma = 1/6 #rate should be in weeks now
# A grid of time points
t7 = np.arange(7,84,7)
# The SIR model differential equations.
def deriv(y, t7, N, beta, gamma):
S, I, R, J = y
dS = ((-beta * S * I) / N)
dI = ((beta * S * I) / N) - (gamma * I)
dR = (gamma * I)
dJ = ((beta * S * I) / N)
return dS, dI, dR, dJ
# Initial conditions are S0, I0, R0
# Integrate the SIR equations over the time grid, t.
solve = odeint(deriv, (S0, I0, R0, J0), t7, args=(N, beta, gamma))
S, I, R, J = solve.T
return np.max(I)/N
def residual(x, df):
# Total population, N.
N = 100000
incidence = df.incidence.to_numpy()/N
return np.sum((peak_infections(x, df) - incidence) ** 2)
x0 = 0.5
res = minimize(residual, x0, args=(df), method="Nelder-Mead").x
print(res)
Yes, you can do this using scipy.optimize.minimize.
One approach would be as follows:
from scipy.optimize import minimize
def residual(x):
return (peak_infections(x) - 0.1) ** 2
x0 = 0.5
res = minimize(residual, x0, method="Nelder-Mead", options={'fatol':1e-04})
print(res)
This is right now giving almost the same answer as the root method you posted but works as an alternative.
Edit
As per the discussion in the comment section of this answer, and according to the edit to you question, I propose the following solution:
import numpy as np
from scipy.integrate import odeint
from scipy.optimize import minimize
import pandas as pd
d = {'Week': [1, 2,3,4,5,6,7,8,9,10,11], 'incidence': [206.1705794,2813.420201,11827.9453,30497.58655,10757.66954,7071.878779,3046.752723,1314.222882,765.9763902,201.3800578,109.8982006]}
df = pd.DataFrame(data=d)
def peak_infections(beta, df):
# Weeks for which the ODE system will be solved
weeks = df.Week.to_numpy()
# Total population, N.
N = 1000
# Initial number of infected and recovered individuals, I0 and R0.
I0, R0 = 10, 0
# Everyone else, S0, is susceptible to infection initially.
S0 = N - I0 - R0
J0 = I0
# Contact rate, beta, and mean recovery rate, gamma, (in 1/days).
gamma = 1/7 * 7 #rate should be in weeks now
# A grid of time points (in days)
t = np.linspace(0, weeks[-1], weeks[-1] + 1)
# The SIR model differential equations.
def deriv(y, t, N, beta, gamma):
S, I, R, J = y
dS = ((-beta * S * I) / N)
dI = ((beta * S * I) / N) - (gamma * I)
dR = (gamma * I)
dJ = ((beta * S * I) / N)
return dS, dI, dR, dJ
# Initial conditions are S0, I0, R0
# Integrate the SIR equations over the time grid, t.
solve = odeint(deriv, (S0, I0, R0, J0), t, args=(N, beta, gamma))
S, I, R, J = solve.T
return I/N
def residual(x, df):
# Total population, N.
N = 1000
incidence = df.incidence.to_numpy()/N
return np.sum((peak_infections(x, df)[1:] - incidence) ** 2)
x0 = 0.5
res = minimize(residual, x0, args=(df), method="Nelder-Mead", options={'fatol':1e-04})
print(res)
Here, I calculate the ODE system for 11 weeks and compare the result directly with the 11 incidence values from the provided dataframe. After the squared difference (element-by-element), a sum is taken and that sum is minimized. The result, however, is not very promising.
I have been attempting to calibrate my model, but I am running into issues with scipy.optimize module. I have tried various scipy optimizers, but they all return the error "TypeError: can only concatenate tuple (not "list") to tuple". Does anyone know how to resolve this issue? Thank you for your time.
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
from numba import jit, njit,float64
from scipy.optimize import fmin_slsqp
i=complex(0,1)
sigma, kappa, theta, volvol, rho = 0.1, 0.1, 0.1, 0.1, 0.1
params=[sigma, kappa, theta, volvol, rho]
strikes=[4650,4655,4660,4665,4670]
maturities=[1/48,2/48,3/48,1/12,5/48]
marketPrices=[70.00,66.70,63.50,60.35,57.30,82.50,79.20,76.0,72.80,69.70,92.65,89.35,86.10,82.90,79.75,101.60,98.30,95.10,91.90,88.75,109.85,106.60,103.35,100.20,97.00]
marketPrices=np.array(marketPrices)
rates=[0.05,0.05,0.05,0.05,0.05]
St=4662
np.shape(marketPrices)
#jit
def fHeston(s, St, K, r, T, sigma, kappa, theta, volvol, rho):
# To be used a lot
prod = rho * sigma *i *s
# Calculate d
d1 = (prod - kappa)**2
d2 = (sigma**2) * (i*s + s**2)
d = np.sqrt(d1 + d2)
# Calculate g
g1 = kappa - prod - d
g2 = kappa - prod + d
g = g1/g2
# Calculate first exponential
exp1 = np.exp(np.log(St) * i *s) * np.exp(i * s* r* T)
exp2 = 1 - g * np.exp(-d *T)
exp3 = 1- g
mainExp1 = exp1 * np.power(exp2/ exp3, -2 * theta * kappa/(sigma **2))
# Calculate second exponential
exp4 = theta * kappa * T/(sigma **2)
exp5 = volvol/(sigma **2)
exp6 = (1 - np.exp(-d * T))/(1 - g * np.exp(-d * T))
mainExp2 = np.exp((exp4 * g1) + (exp5 *g1 * exp6))
return (mainExp1 * mainExp2)
#jit(forceobj=True)
def priceHestonMid(St, K, r, T, sigma, kappa, theta, volvol, rho):
P, iterations, maxNumber = 0,1000,100
ds = maxNumber/iterations
element1 = 0.5 * (St - K * np.exp(-r * T))
# Calculate the complex integral
# Using j instead of i to avoid confusion
for j in range(1, iterations):
s1 = ds * (2*j + 1)/2
s2 = s1 - i
numerator1 = fHeston(s2, St, K, r, T, sigma, kappa, theta, volvol, rho)
numerator2 = K * fHeston(s1, St, K, r, T, sigma, kappa, theta, volvol, rho)
denominator = np.exp(np.log(K) * i * s1) *i *s1
P = P + ds *(numerator1 - numerator2)/denominator
element2 = P/np.pi
return np.real((element1 + element2))
# vectorify
def strikematurePriceHestonMid(St, W, r, Q, sigma, kappa, theta, volvol, rho):
stuff=[]
volsur=[]
e=0
p=0
for p in range(5):
for e in range(5):
stuff.append(priceHestonMid(St, W[e], r, Q[p], sigma, kappa, theta, volvol, rho))
#volsur[e][p]=stuff[4*p::4*p+4]
#print(priceHestonMid(St, W[e], r, Q[p], sigma, kappa, theta, volvol, rho))
volsur=np.reshape(stuff,(5,5))
stuff=np.array(stuff)
return stuff
def calibratorHeston(St, initialValues = [0.5,0.5,0.5,0.5,-0.5],
lowerBounds = [1e-2,1e-2,1e-2,1e-2,-1],
upperBounds = [10,10,10,10,0]):
objectiveFunctionHeston = ((marketPrices) - (strikematurePriceHestonMid(St, strikes,
rates[0],
maturities,
sigma,
kappa,
theta,
volvol,
rho))).sum()
result=fmin_slsqp(objectiveFunctionHeston,initialValues,args=params)
return result
calibratorHeston(4662)
UPDATE:
I was able to figure out how to get it done, I am still not sure why it was not working before nonetheless I got it working with SciPy. Thank you all.
from scipy.optimize import minimize
def objectiveFunctionHeston(x,St, strikes,rates, maturities):
objective = ((marketPrices)-(strikematurePriceHestonMid(St, strikes,
rates,
maturities,
sigma=x[0],
kappa=x[1],
theta=x[2],
volvol=x[3],
rho=x[4])))/marketPrices
objective=np.square(np.dot(objective,objective))
return objective
bounds=((1e-2,5),(1e-2,8),(1e-2,10),(1e-2,10),(-1,1))
res = minimize(objectiveFunctionHeston, method='SLSQP', x0=[sigma, kappa, theta, volvol, rho],args=(St,strikes,rates[0],maturities),
bounds = bounds, tol=1e-20,
options={"maxiter":1000})
print(res)
I'm translating some code from MATLAB to python. This code simulate the behaviour of a model and I want to estimate parameters from it. The problem is that results obtained with python and with MATLAB are very different. I've tought it was related to the difference between the MATLAB's
fmincon and the python scipy.optimize.minimize function, but according to this tutorial that I've found on youtube (https://www.youtube.com/watch?v=SwogAa1719M) the results are almost the same,so problems must be in my code but I can't find them.
I report a minimum example of my code
def cost_function(parameters, measured, t, x0, total_active):
Tp = simulate(parameters, t, x0, total_active)
measured = np.squeeze(measured)
Tp = Tp[:,2]
return (np.sum(np.power((np.divide(np.diff(measured), measured[1:])-np.divide(np.diff(Tp),Tp[1:])),2)))
def SIR_model(x, t, params, total_active):
S0, _, R0 = x
v, tau, I0 = params
dSdt = - v * S0 * I0 / total_active(t)
dIdt = v * S0 * I0 / total_active(t) - g * I0 - tau * I0
dCdt = tau * I0
return [dSdt, dIdt, dCdt]
def simulate(p, t, x0, total_active):
T = np.zeros((len(t), 3))
T[0, :] = x0
for i in range(len(t) - 1):
ts = [t[i], t[i + 1]]
y = odeint(SIR_model, x0, ts, args=(p, total_active))
x0 = y[-1]
T[i + 1, :] = x0
return T
def identify_model(data, initial_guess, t, N, total_active):
# Set initial condition of the model
x0 = []
Q0 = data[0]
x0.append(N - initial_guess['It0'] - Q0)
x0.append(initial_guess['It0'])
x0.append(Q0)
It0 = initial_guess['It0']
v = initial_guess['v']
tau = initial_guess['tau']
lim_sup = [v * 10, tau * 1.5, It0 * 1.3]
lim_inf = [v * 0, tau * 0.9, It0 * 0.7]
bounds = Bounds(lim_inf, lim_sup)
options = {"maxiter": 1000,"ftol": 1e-08}
return minimize(cost_function, np.asarray([initial_guess['v'], initial_guess['tau'],initial_guess['It0']]),args=(data, t, x0, total_active), bounds=bounds,options=options, tol=1e-08,method="SLSQP")['x']
data=[275.5,317.,457.33333333,646.,888.66666667,1236.66666667,1619.33333333,2077.33333333,2542.33333333]
times = [i for i in range(len(data))]
total_active_data=[59999725.,59999684.33333334,59999558.66666666,59999385.33333334,59999158.33333333,59998823.,59998474.66666666,59998053.33333333,59997652.66666666]
total_active = interp1d([i for i in range(len(total_active_data))], total_active_data, fill_value="extrapolate")
initial_guess = {"v": 0.97, "tau": 0.066, "It0": 100}
print(identify_model(data,initial_guess,times,60e6,total_active))
This snippet gives, as result, [0.97099097,0.099,130.].
The (I hope so) equivalent MATLAB code is:
function [pars] = Identify_Model(data,initial_guess,lim_inf,lim_sup,times,N,total_active)
scalefac=100;
%Initialize the initial guess for each parameter
v=initial_guess.v;
It0=initial_guess.It0;
tau=initial_guess.tau;
g=0.07;
lim_inf(3)=lim_inf(3)/scalefac;
lim_sup(3)=lim_sup(3)/scalefac;
%Identify the model parameters
options=optimset('MaxIter',100,'TolFun',1e-6,'TolX',1e-6);
parmin=fmincon(#(pars) error_SIR(data,[pars(1),pars(2),g,scalefac*pars(3)],times,N,total_active),[v,tau,It0],[],[],[],[],lim_inf,lim_sup,[],options);
pars=num2cell([parmin(1:2),scalefac*parmin(3)]);
pars=[pars(1),pars(2),g,pars(3)];
end
function [costo]=error_SIR(data,pars,tempi,N,totale_attivi)
%Assign the parameters
pars=num2cell(pars);
[v,tau,g,I0]=pars{:};
%Simulate the model
Q0=data(1,1);
S0=N-I0-Q0;
[~,y]=ode45(#(t,x) SIR(t,x,v,tau,g,totale_attivi),tempi,[S0;I0;Q0]);
if length(tempi)==2
y=[y(1,:);y(end,:)];
end
%Identify on the normalized data (Data/mean data)
costo=sum(((diff(sum(data,1))./sum(data(:,2:end),1))-(diff(sum(y(:,3),2))./sum(y(2:end,3),2))').^2);
end
function y=SIR(t,x,v,tau,g,total_active)
y=zeros(3,1);
y(1)=-v*x(1)*x(2)/total_active(t); % S
y(2)=v*x(1)*x(2)/total_active(t)-(tau+g)*x(2); % I
y(3)=tau*x(2); % C
end
total_active_data=[59999725.,59999684.333,59999558.666,59999385.333,59999158.33,59998823.,59998474.66,59998053.333,59997652.66666666]
total_active = #(t) interp1(1:9,total_active_data,t);
initial_guess.It0=100;
initial_guess.v=0.97;
initial_guess.g=0.07;
initial_guess.tau=0.066;
g=initial_guess.g;
It0=initial_guess.It0;
v=initial_guess.v;
tau=initial_guess.tau;
N=60e6
%Define the constrints for the identification
lim_sup=[v*10, tau*1.5, It0*1.3];
lim_inf=[v*0, tau*0.9, It0*0.7];
data=[275.5,317.,457.33333333,646.,888.66666667,1236.66666667,1619.33333333,2077.33333333,2542.33333333]
times=1:9;
%identify the model parameters
pars=Identify_Model(data,initial_guess,lim_inf,lim_sup,times,N,total_active)
And the result is {[0.643004812025865]} {[0.0989999761533351]} {[0.07]} {[129.9999687237]} (don't consider the value 0.07, it's fixed). I thought that the problem could linked to the fact that I want to minimize a non-convex function, and maybe fmincon is more powerful than the scipy.optimize.minimize function?