Curve fitting with determination of phonon number associated with each motional state - python

I have to write a program in python for curve fitting for at least 20 different parameters of occupation probability as explained below.
I have added a model for fitting as well.
Later when from the fit we have the values of fitted occupation probability, we have to determine the mean phonon or vibrational quantum number by thermal population distribution for Pn.
I am attaching a code below just for one parameter P0.
import numpy as np
import pandas as pd
from lmfit import Minimizer, Parameters, report_fit
df = pd.read_csv('Fock0_1st BSB.csv')
x = pd.DataFrame(df["Untitled"]).to_numpy()
data = pd.DataFrame(df["Untitled 1"]).to_numpy()
x = [i[0] for i in x]data = [i[0] for i in data]
x = np.asarray (x)
data = np.asarray(data)
x = x/1000
data = abs(data-100)/100
n=0 #Ground State Measurements for n=0
def function(params,x,data):
v=params.valuesdict()
model = 0.5*(1+(v['P0'])*np.cos(np.sqrt(n+1)*v['omega0']*v['eta']*x + v['phase'])*np.exp(-(v['gamma']((n+1)**0.7))*x)) - v['decay']*x
return model - data
params=Parameters()
params.add('P0',value=0.97,min=0.01,max=0.999)
params.add('omega0',value=0.1967,min=0.156,max=0.23,vary=True)
params.add('eta',value=0.0629,min=0.01,max=0.11,vary=True)
params.add('gamma',value=5.6E-4)
params.add('phase',value=0.143)
params.add('decay',value=0.1E-6)
minner = Minimizer(function, params, fcn_args=(x, data))
result = minner.minimize()
final = data + result.residual
report_fit(result)
try:
import matplotlib.pyplot as plt
plt.plot(x, data, '+')#
plt.plot(x, final)
plt.show()
except ImportError:
pass

Related

Using scipy.optimize.curve_fit with more than one input data and p0= two variables

I am using scipy.optimize.curve_fit to fit measured (test in code) data to theoretical (run in code) data. Attached are two codes. In the first one I have one measured and theoretical data. When I use scipy.optimize.curve_fit I get approximately the correct temperature. The problem comes when I need to extend scipy.optimize.curve_fit to more one measured and theoretical data. The second code is my progress so far. How do I deal with two input data, i.e, what do I replace x-and y-data with. For example do I a need to combine the data in some manner. I have tried a few ways to non-success. Any help would be appreciated.
import pandas as pd
import numpy as np
from scipy import interpolate
# mr data: wave, counts, temp
run_1 = pd.read_excel("run_1.xlsx")
run_1_temp = np.array(run_1['temp'])
run_1_counts = np.array(run_1['count'])
# test data: wave, counts, temp = 30
test_1 = pd.read_excel("test_1.xlsx")
xdata = test_1['wave']
ydata = test_1['counts']
# Interpolate
inter_run_1 = interpolate.interp1d(run_1_temp,run_1_counts, kind='linear', fill_value='extrapolation')
run_1_temp_new = np.linspace(20,50,0.1)
run_1_count_new = inter_run_1(run_1_temp_new)
# Curve-fit
def f(wave, temp):
signal = inter_run_1(temp)
return signal
popt, pcov = scipy.optimize.curve_fit(f,xdata,ydata,p0=[30])
print(popt, pcov)
import pandas as pd
import numpy as np
from scipy import interpolate
# mr data: wave, counts, temp
run_1 = pd.read_excel("run_1.xlsx")
run_2 = pd.read_excel("run_2.xlsx")
# test data 1: wave, counts, temp = 30
test_1 = pd.read_excel("test_1.xlsx")
xdata = test_1['wave']
ydata = test_1['counts']
test data 1: wave, counts, temp = 40
test_2 = pd.read_excel("test_2.xlsx")
x1data = test_2['wave']
y1data = test_2['counts']
run_1_temp = np.array(run_1['temp'])
run_1_counts = np.array(run_1['count'])
run_2_temp = np.array(run_2['temp'])
run_2_counts = np.array(run_2['count'])
# Interpolate
inter_run_1 = interpolate.interp1d(run_1_temp,run_1_counts, kind='linear', fill_value='extrapolation')
run_1_temp_new = np.linspace(20,50,0.1)
run_1_count_new = inter_run_1(run_1_temp_new)
inter_run_2 = interpolate.interp1d(run_2_temp,run_2_counts, kind='linear', fill_value='extrapolation')
run_2_temp_new = np.linspace(20,50,0.1)
run_2_count_new = inter_run_2(run_2_temp_new)
def f(wave,temp1,temp2):
signal_1 = inter_run_1(temp1)
signal_2 = inter_run_2(temp2)
signal = signal_1 + signal_1
return signal
popt, pcov = scipy.optimize.curve_fit(f,xdata,ydata,p0=[30,50])
print(popt, pcov)

Regression problem with MLPRegressor (scikit)

I need to develop a neural network able to produce as output values of a 2D map (for example of a gaussian distribution) starting from fewparameter in input (offset, limit, sigma). In the code below I tried to start, probably in the wrong way, with a simpler case study with the 1D map of a gaussian distribution.
Output are not as expected, I don't know if I miss the data formatting or the instance of the neural network. Any sugestion?
from sklearn.neural_network import MLPRegressor
import numpy as np
import matplotlib.pyplot as plt
import math
def gaussian(x, alpha, r):
return 1./(math.sqrt(alpha**math.pi))*np.exp(-alpha*np.power((x - r), 2.))
features = 20000
output = 1000
w = []
j = []
for iii in range(0,features):
mu,sigma = 0.,(iii+1)
x = np.linspace(-(iii+1), (iii+1), output)
t = gaussian(x, sigma, iii)
t = t.tolist()
dummy = np.zeros(3)
dummy[0] = sigma
dummy[1] = (iii+1)
dummy[2] = (iii)
dummy = dummy.tolist()
w.append(t)
j.append(dummy)
nn = MLPRegressor(hidden_layer_sizes=(5000,10), activation='tanh', solver='lbfgs')
model = nn.fit(j,w)
test_i = [[1.0,1.0,0.0]]
test_o = nn.predict(test_i)

Python Polynomial Regression with Gradient Descent

I try to implement Polynomial Regression with Gradient Descent. I want to fit the following function:
The code I use is:
import numpy as np
import matplotlib.pyplot as plt
import scipy.linalg
from sklearn.preprocessing import PolynomialFeatures
np.random.seed(seed=42)
def create_data():
x = PolynomialFeatures(degree=5).fit_transform(np.linspace(-10,10,100).reshape(100,-1))
l = lambda x_i: (1/3)*x_i**3-2*x_i**2+2*x_i+2
data = l(x[:,1])
noise = np.random.normal(0,0.1,size=np.shape(data))
y = data+noise
y= y.reshape(100,1)
return {'x':x,'y':y}
def plot_function(x,y):
fig = plt.figure(figsize=(10,10))
plt.plot(x[:,1],[(1/3)*x_i**3-2*x_i**2+2*x_i+2 for x_i in x[:,1]],c='lightgreen',linewidth=3,zorder=0)
plt.scatter(x[:,1],y)
plt.show()
def w_update(y,x,batch,w_old,eta):
derivative = np.sum([(y[i]-np.dot(w_old.T,x[i,:]))*x[i,:] for i in range(np.shape(x)[0])])
print(derivative)
return w_old+eta*(1/batch)*derivative
# initialize variables
w = np.random.normal(size=(6,1))
data = create_data()
x = data['x']
y = data['y']
plot_function(x,y)
# Update w
w_s = []
Error = []
for i in range(500):
error = (1/2)*np.sum([(y[i]-np.dot(w.T,x[i,:]))**2 for i in range(len(x))])
Error.append(error)
w_prime = w_update(y,x,np.shape(x)[0],w,0.001)
w = w_prime
w_s.append(w)
# Plot the predicted function
plt.plot(x[:,1],np.dot(x,w))
plt.show()
# Plot the error
fig3 = plt.figure()
plt.scatter(range(len(Error[10:])),Error[10:])
plt.show()
But as result I receive smth. strange which is completely out of bounds...I have also tried to alter the number of iterations as well as the parameter theta but it did not help. I assume I have made an mistake in the update of w.
I have found the solution. The Problem is indeed in the part where I calculate the weights. Specifically in:
np.sum([(y[d]-np.dot(w_old.T,x[d,:]))*x[d,:] for d in range(np.shape(x)[0])])
which should be like:
np.sum([-(y[d]-np.dot(w.T.copy(),x[d,:]))*x[d,:].reshape(np.shape(w)) for d in range(len(x))],axis=0)
We have to add np.sum(axis=0) to get the dimensionality we want --> Dimensionality must be equal to w. The numpy sum documentation sais
The default, axis=None, will sum all of the elements of the input
array.
This is not what we want to achieve. Adding axis = 0 sums over the first axis of our array which is of dimensionality (100,7,1) hence the 100 elements of dimensionality (7,1) are summed up and the resulting array is of dimensionality (7,1) which is exactly what we want. Implementing this and cleaning up the code yields:
import numpy as np
import matplotlib.pyplot as plt
import scipy.linalg
from sklearn.preprocessing import PolynomialFeatures
from sklearn.preprocessing import MinMaxScaler
np.random.seed(seed=42)
def create_data():
x = PolynomialFeatures(degree=6).fit_transform(np.linspace(-2,2,100).reshape(100,-1))
x[:,1:] = MinMaxScaler(feature_range=(-2,2),copy=False).fit_transform(x[:,1:])
l = lambda x_i: np.cos(0.8*np.pi*x_i)
data = l(x[:,1])
noise = np.random.normal(0,0.1,size=np.shape(data))
y = data+noise
y= y.reshape(100,1)
# Normalize Data
return {'x':x,'y':y}
def plot_function(x,y,w,Error,w_s):
fig,ax = plt.subplots(nrows=1,ncols=2,figsize=(40,10))
ax[0].plot(x[:,1],[np.cos(0.8*np.pi*x_i) for x_i in x[:,1]],c='lightgreen',linewidth=3,zorder=0)
ax[0].scatter(x[:,1],y)
ax[0].plot(x[:,1],np.dot(x,w))
ax[0].set_title('Function')
ax[1].scatter(range(iterations),Error)
ax[1].set_title('Error')
plt.show()
# initialize variables
data = create_data()
x = data['x']
y = data['y']
w = np.random.normal(size=(np.shape(x)[1],1))
eta = 0.1
iterations = 10000
batch = 10
def stochastic_gradient_descent(x,y,w,eta):
derivative = -(y-np.dot(w.T,x))*x.reshape(np.shape(w))
return eta*derivative
def batch_gradient_descent(x,y,w,eta):
derivative = np.sum([-(y[d]-np.dot(w.T.copy(),x[d,:]))*x[d,:].reshape(np.shape(w)) for d in range(len(x))],axis=0)
return eta*(1/len(x))*derivative
def mini_batch_gradient_descent(x,y,w,eta,batch):
gradient_sum = np.zeros(shape=np.shape(w))
for b in range(batch):
choice = np.random.choice(list(range(len(x))))
gradient_sum += -(y[choice]-np.dot(w.T,x[choice,:]))*x[choice,:].reshape(np.shape(w))
return eta*(1/batch)*gradient_sum
# Update w
w_s = []
Error = []
for i in range(iterations):
# Calculate error
error = (1/2)*np.sum([(y[i]-np.dot(w.T,x[i,:]))**2 for i in range(len(x))])
Error.append(error)
# Stochastic Gradient Descent
"""
for d in range(len(x)):
w-= stochastic_gradient_descent(x[d,:],y[d],w,eta)
w_s.append(w.copy())
"""
# Minibatch Gradient Descent
"""
w-= mini_batch_gradient_descent(x,y,w,eta,batch)
"""
# Batch Gradient Descent
w -= batch_gradient_descent(x,y,w,eta)
# Show predicted weights
print(w_s)
# Plot the predicted function and the Error
plot_function(x,y,w,Error,w_s)
As result we receive:
Which surely can be improved by altering eta and the number of iterations as well as switching to Stochastic or Mini Batch Gradient Descent or more sophisticated optimization algorithms.

lmfit minimize (or scipy.optimize leastsq) on complex equation/data

Edit:
Modeling and fitting with this approach work fine, the data in here is not good.-------------------
I want to do a curve-fitting on a complex dataset. After thorough reading and searching, I found that i can use a couple of methods (e.g. lmfit optimize, scipy leastsq).
But none gives me a good fit at all.
here is the fit equation:
here is the data to be fitted (list of y values):
[(0.00011342104914066835+8.448890220616275e-07j),
(0.00011340386404065371+7.379293582429708e-07j),
(0.0001133540327309949+6.389834505824625e-07j),
(0.00011332170913939336+5.244566142401774e-07j),
(0.00011331311156154074+4.3841061618015007e-07j),
(0.00011329383047059048+3.6163513508002877e-07j),
(0.00011328700094846502+3.0542249453666894e-07j),
(0.00011327650033983806+2.548725558622188e-07j),
(0.00011327702539337786+2.2508174567697671e-07j),
(0.00011327342238146558+1.9607648998100523e-07j),
(0.0001132710747364799+1.721721661949941e-07j),
(0.00011326933241850936+1.5246061350710235e-07j),
(0.00011326798040984542+1.3614817802178457e-07j),
(0.00011326752037650585+1.233483784504962e-07j),
(0.00011326758290166552+1.1258801448459512e-07j),
(0.00011326813100914905+1.0284749122099354e-07j),
(0.0001132684076390416+9.45791423595816e-08j),
(0.00011326982474882009+8.733105218572698e-08j),
(0.00011327158639135678+8.212191452217794e-08j),
(0.00011327366823516856+7.747920115589205e-08j),
(0.00011327694366034208+7.227069986108343e-08j),
(0.00011327915327873038+6.819405851172907e-08j),
(0.00011328181165961218+6.468392148750885e-08j),
(0.00011328531688122571+6.151393311227958e-08j),
(0.00011328857849500441+5.811704586613896e-08j),
(0.00011329241716561626+5.596645863242474e-08j),
(0.0001132970129528527+5.4722461511610696e-08j),
(0.0001133002881788021+5.064523218904898e-08j),
(0.00011330507671740223+5.0307457368330284e-08j),
(0.00011331106068787993+4.7703959367963307e-08j),
(0.00011331577350707601+4.634615394867111e-08j),
(0.00011332064001939156+4.6914747648361504e-08j),
(0.00011333034985824086+4.4992151257444304e-08j),
(0.00011334188526870483+4.363662798446445e-08j),
(0.00011335491299924776+4.364164366097129e-08j),
(0.00011337451201475147+4.262881852644385e-08j),
(0.00011339778209066752+4.275096587356569e-08j),
(0.00011342832992628646+4.4463907608604945e-08j),
(0.00011346526768580432+4.35706649329342e-08j),
(0.00011351108008292451+4.4155812379491554e-08j),
(0.00011356967192325835+4.327004709646922e-08j),
(0.00011364164970635006+4.420660396556604e-08j),
(0.00011373150199883139+4.3672898914161596e-08j),
(0.00011384660942003356+4.326171366194325e-08j),
(0.00011399193321804955+4.1493065523925126e-08j),
(0.00011418043916260295+4.0762418512759096e-08j),
(0.00011443271767970721+3.91359909722939e-08j),
(0.00011479600563688605+3.845666332695652e-08j),
(0.0001153652105925112+3.6224677316584614e-08j),
(0.00011638635682516399+3.386843079212692e-08j),
(0.00011836223959714231+3.6692295450490655e-08j)]
here is the list of x values:
[999.9999960000001,
794.328231,
630.957342,
501.18723099999994,
398.107168,
316.22776400000004,
251.188642,
199.52623,
158.489318,
125.89254,
99.999999,
79.432823,
63.095734,
50.118722999999996,
39.810717,
31.622776,
25.118864000000002,
19.952623000000003,
15.848932000000001,
12.589253999999999,
10.0,
7.943282000000001,
6.309573,
5.011872,
3.981072,
3.1622779999999997,
2.511886,
1.9952619999999999,
1.584893,
1.258925,
1.0,
0.7943279999999999,
0.630957,
0.5011869999999999,
0.398107,
0.316228,
0.251189,
0.199526,
0.15848900000000002,
0.125893,
0.1,
0.079433,
0.063096,
0.050119,
0.039811,
0.031623000000000005,
0.025119,
0.019953,
0.015849000000000002,
0.012589,
0.01]
and here is the code which works but not the way I want:
import numpy as np
import matplotlib.pyplot as plt
from lmfit import minimize, Parameters
#%% the equation
def ColeCole(params, fr): #fr is x values array and params are the fitting parameters
sig0 = params['sig0']
m = params['m']
tau = params['tau']
c = params['c']
w = fr*2*np.pi
num = 1
denom = 1+(1j*w*tau)**c
sigComplex = sig0*(1.0+(m/(1-m))*(1-num/denom))
return sigComplex
def res(params, fr, data): #calculating reseduals of fit
resedual = ColeCole(params, fr) - data
return resedual.view(np.float)
#%% Adding model parameters and fitting
params = Parameters()
params.add('sig0', value=0.00166)
params.add('m', value=0.19,)
params.add('tau', value=0.05386)
params.add('c', value=0.80)
params['tau'].min = 0 # these conditions must be met but even if I remove them the fit is ugly!!
params['m'].min = 0
out= minimize(res, params , args= (np.array(fr2), np.array(data)))
#%%plotting Imaginary part
fig, ax = plt.subplots()
plotX = fr2
plotY = data.imag
fitplot = ColeCole(out.params, fr2)
ax.semilogx(plotX,plotY,'o',label='imc')
ax.semilogx(plotX,fitplot.imag,label='fit')
#%%plotting real part
fig2, ax2 = plt.subplots()
plotX2 = fr2
plotY2 = data.real
fitplot2 = ColeCole(out.params, fr2)
ax2.semilogx(plotX2,plotY2,'o',label='imc')
ax2.semilogx(plotX2,fitplot2.real,label='fit')
I might be doing it completely wrong, please help me if you know the proper solution to do a curve fitting on complex data.
I would suggest first converting the complex data to numpy arrays and get real, imag pairs separately and then using lmfit Model to model that same sort of data. Perhaps something like this:
cdata = np.array((0.00011342104914066835+8.448890220616275e-07j,
0.00011340386404065371+7.379293582429708e-07j,
0.0001133540327309949+6.389834505824625e-07j,
0.00011332170913939336+5.244566142401774e-07j,
0.00011331311156154074+4.3841061618015007e-07j,
0.00011329383047059048+3.6163513508002877e-07j,
0.00011328700094846502+3.0542249453666894e-07j,
0.00011327650033983806+2.548725558622188e-07j,
0.00011327702539337786+2.2508174567697671e-07j,
0.00011327342238146558+1.9607648998100523e-07j,
0.0001132710747364799+1.721721661949941e-07j,
0.00011326933241850936+1.5246061350710235e-07j,
0.00011326798040984542+1.3614817802178457e-07j,
0.00011326752037650585+1.233483784504962e-07j,
0.00011326758290166552+1.1258801448459512e-07j,
0.00011326813100914905+1.0284749122099354e-07j,
0.0001132684076390416+9.45791423595816e-08j,
0.00011326982474882009+8.733105218572698e-08j,
0.00011327158639135678+8.212191452217794e-08j,
0.00011327366823516856+7.747920115589205e-08j,
0.00011327694366034208+7.227069986108343e-08j,
0.00011327915327873038+6.819405851172907e-08j,
0.00011328181165961218+6.468392148750885e-08j,
0.00011328531688122571+6.151393311227958e-08j,
0.00011328857849500441+5.811704586613896e-08j,
0.00011329241716561626+5.596645863242474e-08j,
0.0001132970129528527+5.4722461511610696e-08j,
0.0001133002881788021+5.064523218904898e-08j,
0.00011330507671740223+5.0307457368330284e-08j,
0.00011331106068787993+4.7703959367963307e-08j,
0.00011331577350707601+4.634615394867111e-08j,
0.00011332064001939156+4.6914747648361504e-08j,
0.00011333034985824086+4.4992151257444304e-08j,
0.00011334188526870483+4.363662798446445e-08j,
0.00011335491299924776+4.364164366097129e-08j,
0.00011337451201475147+4.262881852644385e-08j,
0.00011339778209066752+4.275096587356569e-08j,
0.00011342832992628646+4.4463907608604945e-08j,
0.00011346526768580432+4.35706649329342e-08j,
0.00011351108008292451+4.4155812379491554e-08j,
0.00011356967192325835+4.327004709646922e-08j,
0.00011364164970635006+4.420660396556604e-08j,
0.00011373150199883139+4.3672898914161596e-08j,
0.00011384660942003356+4.326171366194325e-08j,
0.00011399193321804955+4.1493065523925126e-08j,
0.00011418043916260295+4.0762418512759096e-08j,
0.00011443271767970721+3.91359909722939e-08j,
0.00011479600563688605+3.845666332695652e-08j,
0.0001153652105925112+3.6224677316584614e-08j,
0.00011638635682516399+3.386843079212692e-08j,
0.00011836223959714231+3.6692295450490655e-08j))
fr = np.array((999.9999960000001, 794.328231, 630.957342,
501.18723099999994, 398.107168, 316.22776400000004,
251.188642, 199.52623, 158.489318, 125.89254, 99.999999,
79.432823, 63.095734, 50.118722999999996, 39.810717,
31.622776, 25.118864000000002, 19.952623000000003,
15.848932000000001, 12.589253999999999, 10.0,
7.943282000000001, 6.309573, 5.011872, 3.981072,
3.1622779999999997, 2.511886, 1.9952619999999999, 1.584893,
1.258925, 1.0, 0.7943279999999999, 0.630957,
0.5011869999999999, 0.398107, 0.316228, 0.251189, 0.199526,
0.15848900000000002, 0.125893, 0.1, 0.079433, 0.063096,
0.050119, 0.039811, 0.031623000000000005, 0.025119, 0.019953,
0.015849000000000002, 0.012589, 0.01))
data = np.concatenate((cdata.real, cdata.imag))
# model function for lmfit
def colecole_function(x, sig0, m, tau, c):
w = x*2*np.pi
denom = 1+(1j*w*tau)**c
sig = sig0*(1.0+(m/(1.0-m))*(1-1.0/denom))
return np.concatenate((sig.real, sig.imag))
mod = Model(colecole_function)
params = mod.make_params(sig0=0.002, m=-0.19, tau=0.05, c=0.8)
params['tau'].min = 0
result = mod.fit(data, params, x=fr)
print(result.fit_report())
You would then want to plot the results like
nf = len(fr)
plt.plot(fr, data[:nf], label='data(real)')
plt.plot(fr, result.best_fit[:nf], label='fit(real)')
and similarly
plt.plot(fr, data[nf:], label='data(imag)')
plt.plot(fr, result.best_fit[nf:], label='fit(imag)')
Note that I think you're going to want to allow m to be negative (or maybe I misuderstand your model). I did not work carefully on getting a great fit, but I think this should get you started.

python: setting width to fit parameters

I have been trying to fit a data file with unknown fit parameter "ga" and "MA". What I want to do is set a range withing which the value of "MA" will reside and fit the data, for example I want the fitted value of MA in the range [0.5,0.8] and want to keep "ga" as an arbitrary fit paramter. I am not sure how to do it. I am copying the python code here:
#!/usr/bin/env python3
# to the data in "data_file", each line of which contains the data for one point, x_i, y_i, sigma_i.
import numpy as np
from pylab import *
from scipy.optimize import curve_fit
from scipy.stats import chi2
fname = sys.argv[1] if len(sys.argv) > 1000 else 'data.txt'
x, y, err = np.loadtxt(fname, unpack = True)
n = len(x)
p0 = [-1,1]
f = lambda x, ga, MA: ga/((1+x/(MA*MA))*(1+x/(MA*MA)))
p, covm = curve_fit(f, x, y, p0, err)
ga, MA = p
chisq = sum(((f(x, ga, MA) -y)/err)**2)
ndf = n -len(p)
Q = 1. -chi2.cdf(chisq, ndf)
chisq = chisq / ndf
gaerr, MAerr = sqrt(diag(covm)/chisq) # correct the error bars
print 'ga = %10.4f +/- %7.4f' % (ga, gaerr)
print 'MA = %10.4f +/- %7.4f' % (MA, MAerr)
print 'chi squared / NDF = %7.4lf' % chisq
print (covm)
You might consider using lmfit (https://lmfit.github.io/lmfit-py) for this problem. Lmfit provides a higher-level interface to optimization and curve fitting, including treating Parameters as python objects that have bounds.
Your script might be translated to use lmfit as
import numpy as np
from lmfit import Model
fname = sys.argv[1] if len(sys.argv) > 1000 else 'data.txt'
x, y, err = np.loadtxt(fname, unpack = True)
# define the fitting model function, similar to your `f`:
def f(x, ga, ma):
return ga/((1+x/(ma*ma))*(1+x/(ma*ma)))
# turn this model function into a Model:
mymodel = Model(f)
# now create parameters for this model, giving initial values
# note that the parameters will be *named* from the arguments of your model function:
params = mymodel.make_params(ga=-1, ma=1)
# params is now an orderded dict with parameter names ('ga', 'ma') as keys.
# you can set min/max values for any parameter:
params['ma'].min = 0.5
params['ma'].max = 2.0
# you can fix the value to not be varied in the fit:
# params['ga'].vary = False
# you can also constrain it to be a simple mathematical expression of other parameters
# now do the fit to your `y` data with `params` and your `x` data
# note that you pass in weights for the residual, so 1/err:
result = mymodel.fit(y, params, x=x, weights=1./err)
# print out fit report with fit statistics and best fit values
# and uncertainties and correlations for variables:
print(result.fit_report())
You can get access to the best-fit parameters as result.params; the initial params will not be changed by the fit. There are also routines to plot the best-fit result and/or residual.

Categories

Resources