Scaling error and Information cirteria in lmfit - python

I am using lmfit for solving a non-linear optimization problem. It works fine to the point where I am trying to implement a measurement error as the standard deviation of the dependent variable y (sigma_y). My problem is, that I cannot interpret the Information criteria properly. When implementing the return (model - y)/(sigma_y) they just raise from really low to very high values.
i.e. [left: return (model - y) -weighting-> right: return (model - y)/(sigma_y)]:
chi-square 0.00159805 -> 47.3184972
reduced chi-square 1.7756e-04 -> 5.25761080 expectation value is 1 || SO discussion
Akaike info crit -93.2055413 -> 20.0490661 the more negative, the better
Bayesian info crit -92.4097507 -> 20.8448566 the more negative, the better
My guess is, that this is somehow connected to bad usage of lmfit (wrong calculation of Information criteria, bad error scaling) or to a general lack of understanding these criteria (to me reduced chi-square of 5.258 (under-estimated) or 1.776e-4 (over-estimated) sounds like a really poor fit, but the plot of residuals etc. looks okay for me...)
Here is my example code that reproduces the problem:
import lmfit
import numpy as np
import matplotlib.pyplot as plt
y = np.array([1.42774324, 1.45919099, 1.58891696, 1.78432768, 1.97403125,
2.17091161, 2.02065766, 1.83449279, 1.64412613, 1.47265405,
1.4507 ])
sigma_y = np.array([0.00312633, 0.00391546, 0.00873894, 0.01252675, 0.00639111,
0.00452488, 0.0050566 , 0.01127185, 0.01081748, 0.00227587,
0.00190191])
x = np.array([0.02372331, 0.07251188, 0.50685822, 1.30761963, 2.10535442,
2.90597497, 2.30733552, 1.50906939, 0.7098041 , 0.09580686,
0.04777082])
offset = np.array([1.18151707, 1.1815602 , 1.18202847, 1.18187962, 1.18047493,
1.17903493, 1.17962602, 1.18141625, 1.18216907, 1.18222051,
1.18238949])
parameter = lmfit.Parameters()
parameter.add('par_1', value = 1e-5)
parameter.add('par_2', value = 0.18)
def U_reg(parameter, x, offset):
par_1 = parameter['par_1']
par_2 = parameter['par_2']
model = offset + 0.03043211217 * np.arcsinh(x / (2 * par_1)) + par_2 * x
return (model - y)/(sigma_y)
mini = lmfit.Minimizer(U_reg, parameter, fcn_args=(x, offset), nan_policy='omit', scale_covar=False)
regr1 = mini.minimize(method='least_sq') #Levenberg-Marquardt
print("Levenberg-Marquardt:")
print(lmfit.fit_report(regr1, min_correl=0.9))
def U_plot(parameter, x, offset):
par_1 = parameter['par_1']
par_2 = parameter['par_2']
model = offset + 0.03043211217 * np.arcsinh(x / (2 * par_1)) + par_2 * x
return model - y
plt.title("Model vs Data")
plt.plot(x, y, 'b')
plt.plot(x, U_plot(regr1.params, x, offset) + y, 'r--', label='Levenberg-Marquardt')
plt.ylabel("y")
plt.xlabel("x")
plt.legend(loc='best')
plt.show()
I know that it might be more convient to implement weighting via lmfit.Model, due to multiple regressors I want to keep the lmfit.Minimizer method. My python version is 3.8.5, lmfit is 1.0.2

Well, in order for the magnitude of chi-square to be meaningful (for example, that it be around (N_data - N_varys), the scale of the uncertainties has to be correct -- giving the 1-sigma standard deviation for each observation.
It's not really possible for lmfit to detect whether the sigma you use has the right scale.

Related

Fitting two peaks with gauss in python

Curve_fit is not fit properly. I'm trying to fit experimental data with curve_fit. The data is imported from a .txt file to a array:
d = np.loadtxt("data.txt")
data_x = np.array(d[:, 0])
data_y = np.array(d[:, 2])
data_y_err = np.array(d[:, 3])
Since i know there must be two peaks, my model is a sum of two gaussian curves:
def model_dGauss(x, xc, A, y0, w, dx):
P = A/(w*np.sqrt(2*np.pi))
mu1 = (x - (xc - dx/3))/(2*w**2)
mu2 = (x - (xc + 2*dx/3))/(2*w**2)
return y0 + P * ( np.exp(-mu1**2) + 0.5 * np.exp(-mu2**2))
Using values for the guess is very sensitive to my guess values. Where is the point of fitting data if just nearly perfect fitting parameter will provide a result? Or am I doing something completely wrong?
t = np.linspace(8.4, 10, 300)
guess_dG = [32, 1, 10, 0.1, 0.2]
popt, pcov = curve_fit(model_dGauss, data_x, data_y, p0=guess_dG, sigma=data_y_err, absolute_sigma=True)
A, xc, y0, w, dx = popt
Plotting the data
plt.scatter(data_x, data_y)
plt.plot(t, model_dGauss(t1,*popt))
plt.errorbar(data_x, data_y, yerr=data_y_err)
yields:
Plot result
The result is just a straight line at the bottom of my graph while the evaluated parameters are not that bad. How can that be?
A complete example of code is always appreciated (and, ahem, usually expected here on SO). To remove much of the confusion about using curve_fit here, allow me to suggest that you will have an easier time using lmfit (https://lmfit.github.io/lmfit-py) and especially its builtin model functions and its use of named parameters. With lmfit, your code for two Gaussians plus a constant offset might look like this:
from lmfit.models import GaussianModel, ConstantModel
# start with 1 Gaussian + Constant offset:
model = GaussianModel(prefix='p1_') + ConstantModel()
# this model will have parameters named:
# p1_amplitude, p1_center, p1_sigma, and c.
# here we give initial values to these parameters
params = model.make_params(p1_amplitude=10, p1_center=32, p1_sigma=0.5, c=10)
# optionally place bounds on parameters (probably not needed here):
params['p1_amplitude'].min = 0.
## params['p1_center'].vary = False # fix a parameter from varying in fit
# now do the fit (including weighting residual by 1/y_err):
result = model.fit(data_y, params, x=data_x, weights=1.0/data_y_err)
# print out param values, uncertainties, and fit statistics, or get best-fit
# parameters from `result.params`
print(result.fit_report())
# plot results
plt.errorbar(data_x, data_y, yerr=data_y_err, label='data')
plt.plot(data_x, result.best_fit, label='best fit')
plt.legend()
plt.show()
To add a second Gaussian, you could just do
model = GaussianModel(prefix='p1_') + GaussianModel(prefix='p2_') + ConstantModel()
# and then:
params = model.make_params(p1_amplitude=10, p1_center=32, p1_sigma=0.5, c=10,
p2_amplitude=2, p2_center=31.75, p1_sigma=0.5)
and so on.
Your model has the two gaussian sharing or at least having "linked" values - the sigma values should be the same for the two peaks and the amplitude of the 2nd is half that of the 1st. As defined so far, the 2-Gaussian model has all the parameters being independent. But lmfit has a mechanism for setting constraints on any parameter by giving an algebraic expression in terms of other parameters. So, for example, you could say
params['p2_sigma'].expr = 'p1_sigma'
params['p2_amplitude'].expr = 'p1_amplitude / 2.0'
Now, p2_amplitude and p2_sigma will not be independently varied in the fit but will be constrained to have those values.

Trying to fit a trig function to data with scipy

I am trying to fit some data using scipy.optimize.curve_fit. I have read the documentation and also this StackOverflow post, but neither seem to answer my question.
I have some data which is simple, 2D data which looks approximately like a trig function. I want to fit it with a general trig function
using scipy.
My approach is as follows:
from __future__ import division
import numpy as np
from scipy.optimize import curve_fit
#Load the data
data = np.loadtxt('example_data.txt')
t = data[:,0]
y = data[:,1]
#define the function to fit
def func_cos(t,A,omega,dphi,C):
# A is the amplitude, omega the frequency, dphi and C the horizontal/vertical shifts
return A*np.cos(omega*t + dphi) + C
#do a scipy fit
popt, pcov = curve_fit(func_cos, t,y)
#Plot fit data and original data
fig = plt.figure(figsize=(14,10))
ax1 = plt.subplot2grid((1,1), (0,0))
ax1.plot(t,y)
ax1.plot(t,func_cos(t,*popt))
This outputs:
where blue is the data orange is the fit. Clearly I am doing something wrong. Any pointers?
If no values are provided for initial guess of the parameters p0 then a value of 1 is assumed for each of them. From the docs:
p0 : array_like, optional
Initial guess for the parameters (length N). If None, then the initial values will all be 1 (if the number of parameters for the function can be determined using introspection, otherwise a ValueError is raised).
Since your data has very large x-values and very small y-values an initial guess of 1 is far from the actual solution and hence the optimizer does not converge. You can help the optimizer by providing suitable initial parameter values that can be guessed / approximated from the data:
Amplitude: A = (y.max() - y.min()) / 2
Offset: C = (y.max() + y.min()) / 2
Frequency: Here we can estimate the number of zero crossing by multiplying consecutive y-values and check which products are smaller than zero. This number divided by the total x-range gives the frequency and in order to get it in units of pi we can multiply that number by pi: y_shifted = y - offset; oemga = np.pi * np.sum(y_shifted[:-1] * y_shifted[1:] < 0) / (t.max() - t.min())
Phase shift: can be set to zero, dphi = 0
So in summary, the following initial parameter guess can be used:
offset = (y.max() + y.min()) / 2
y_shifted = y - offset
p0 = (
(y.max() - y.min()) / 2,
np.pi * np.sum(y_shifted[:-1] * y_shifted[1:] < 0) / (t.max() - t.min()),
0,
offset
)
popt, pcov = curve_fit(func_cos, t, y, p0=p0)
Which gives me the following fit function:

Fitting Voigt function to data in Python

I recently got a script running to fit a gaussian to my absorption profile with help of SO. My hope was that things would work fine if I simply replace the Gauss function by a Voigt one, but this seems not to be the case. I think mainly due to the fact that it is a shifted voigt.
Edit: The profiles are absorption lines that vary in optical thickness. In practice they will be a mix between optically thick and thin features. Like the bottom part in this diagram. The current data will be more like the top image, but maybe the bottom is already flattened a bit. (And we only see the left side of the profile, a bit beyond the center)
For a Gauss it looks like this and as predicted the bottom seems to be less deep than the fit wants it to be, but still quite close. The profile itself should still be a voigt though. But now I realize that the central points might throw off the fit. So maybe a weight should be added based on wing position?
I'm mostly wondering if the shifted function could be mis-defined or if its my starting values.
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
import numpy as np
from scipy.special import wofz
x = np.arange(13)
xx = xx = np.linspace(0, 13, 100)
y = np.array([19699.959 , 21679.445 , 21143.195 , 20602.875 , 16246.769 ,
11635.25 , 8602.465 , 7035.493 , 6697.0337, 6510.092 ,
7717.772 , 12270.446 , 16807.81 ])
# weighted arithmetic mean (corrected - check the section below)
#mean = 2.4
sigma = 2.4
gamma = 2.4
def Gauss(x, y0, a, x0, sigma):
return y0 + a * np.exp(-(x - x0)**2 / (2 * sigma**2))
def Voigt(x, x0, y0, a, sigma, gamma):
#sigma = alpha / np.sqrt(2 * np.log(2))
return y0 + a * np.real(wofz((x - x0 + 1j*gamma)/sigma/np.sqrt(2))) / sigma /np.sqrt(2*np.pi)
popt, pcov = curve_fit(Voigt, x, y, p0=[8, np.max(y), -(np.max(y)-np.min(y)), sigma, gamma])
#p0=[8, np.max(y), -(np.max(y)-np.min(y)), mean, sigma])
plt.plot(x, y, 'b+:', label='data')
plt.plot(xx, Voigt(xx, *popt), 'r-', label='fit')
plt.legend()
plt.show()
I may be misunderstanding the model you're using, but I think you would need to include some sort of constant or linear background.
To do that with lmfit (which has Voigt, Gaussian, and many other models built in, and tries very hard to make these interchangeable), I would suggest starting with something like this:
import numpy as np
import matplotlib.pyplot as plt
from lmfit.models import GaussianModel, VoigtModel, LinearModel, ConstantModel
x = np.arange(13)
xx = np.linspace(0, 13, 100)
y = np.array([19699.959 , 21679.445 , 21143.195 , 20602.875 , 16246.769 ,
11635.25 , 8602.465 , 7035.493 , 6697.0337, 6510.092 ,
7717.772 , 12270.446 , 16807.81 ])
# build model as Voigt + Constant
## model = GaussianModel() + ConstantModel()
model = VoigtModel() + ConstantModel()
# create parameters with initial values
params = model.make_params(amplitude=-1e5, center=8,
sigma=2, gamma=2, c=25000)
# maybe place bounds on some parameters
params['center'].min = 2
params['center'].max = 12
params['amplitude'].max = 0.
# do the fit, print out report with results
result = model.fit(y, params, x=x)
print(result.fit_report())
# plot data, best fit, fit interpolated to `xx`
plt.plot(x, y, 'b+:', label='data')
plt.plot(x, result.best_fit, 'ko', label='fitted points')
plt.plot(xx, result.eval(x=xx), 'r-', label='interpolated fit')
plt.legend()
plt.show()
And, yes, you can simply replace VoigtModel() with GaussianModel() or LorentzianModel() and redo the fit and compare the fit statistics to see which model is better.
For the Voigt model fit, the printed report would be
[[Model]]
(Model(voigt) + Model(constant))
[[Fit Statistics]]
# fitting method = leastsq
# function evals = 41
# data points = 13
# variables = 4
chi-square = 17548672.8
reduced chi-square = 1949852.54
Akaike info crit = 191.502014
Bayesian info crit = 193.761811
[[Variables]]
amplitude: -173004.338 +/- 30031.4068 (17.36%) (init = -100000)
center: 8.06574198 +/- 0.16209266 (2.01%) (init = 8)
sigma: 1.96247322 +/- 0.23522096 (11.99%) (init = 2)
c: 23800.6655 +/- 1474.58991 (6.20%) (init = 25000)
gamma: 1.96247322 +/- 0.23522096 (11.99%) == 'sigma'
fwhm: 7.06743644 +/- 0.51511574 (7.29%) == '1.0692*gamma+sqrt(0.8664*gamma**2+5.545083*sigma**2)'
height: -18399.0337 +/- 2273.61672 (12.36%) == '(amplitude/(max(2.220446049250313e-16, sigma*sqrt(2*pi))))*wofz((1j*gamma)/(max(2.220446049250313e-16, sigma*sqrt(2)))).real'
[[Correlations]] (unreported correlations are < 0.100)
C(amplitude, c) = -0.957
C(amplitude, sigma) = -0.916
C(sigma, c) = 0.831
C(center, c) = -0.151
Note that by default gamma is constrained to be the same value as sigma. This constraint can be lifted and gamma made to vary independently with params['gamma'].set(expr=None, vary=True, min=1.e-9). I think that you may not have enough data points in this data set to robustly and independently determine gamma.
The plot for that fit would look like this:
I managed to get something, but not very satisfying. If you remove the offset as a parameter and add 20000 directly in the Voigt function, with starting values [8, 126000, 0.71, 2] (the particular values don't' matter much) you'll get something like
Now, the fit produces a value for gamma which is negative which I cannot really justify. I would expect gamma to always be positive, but maybe I'm wrong and it's completely fine.
One thing you could try is to mirror your data so that its a "positive" peak (and while at it removing the background) and/or normalize the values. That might help you in the convergence.
I have no idea why when using the offset as a parameter the solver has problems finding an optimum. Maybe you need a different optimizer routine.
Maybe it'll be a better option to use the lmfit package that it's a wrapper over scipy to fit nonlinear functions with many prebuilt lineshapes. There is even an example of fitting a Voigt profile.

Linear Regression - mean square error coming too large

I have a house-sales dataset and on that, I am applying linear regression. After getting, slope and y-intercept, I plot the graph and compute cost and the result I get is little odd to me, because
Line from parameters is fitting the data well
But the cost value from the same parameter is huge
Here's the code for plotting the straight line
def plotLine(slope, yIntercept, X, y):
abline_values = [slope * i + yIntercept for i in X]
plt.scatter(X, y)
plt.plot(X, abline_values, 'black')
plt.title(slope)
plt.show()
Following is the function for computing cost
def computeCost(m, parameters, x, y):
[yIntercept, slope] = parameters
hypothesis = yIntercept - np.dot(x, slope)
loss = hypothesis - y
cost = np.sum(loss ** 2) / (2 * m)
return cost
And following lines of code gives me the x vs y plot with the line from computed parameters (for the sake of simplicity of this question, I've manually set the parameters) and cost value.
yIntercept = -70000
slope = 0.85
print("Starting gradient descent at b = %d, m = %f, error = %f" % (yIntercept, slope, computeCost(m, parameters, X, y)))
plotLine(slope, yIntercept, X, y)
And the output of above snippet is
So, my questions are:
1. Is this the right way to plot straight line over x vs y plot?
2. Why cost value is too big, and is it possible to have cost value to be so big even parameters are fitting data well.
Edit 1
The m in print statement is slope value and not size of X, i.e, len(X)
1. Your way to plot seems right, you can probably simplify
abline_values = [slope * i + yIntercept for i in X]
to
abline_values = slope * X + yIntercept
2. Did you set m=0.85 in your example? It seems so, but I can not tell since you did not provide the call to the cost function. Shouldn't it be the size of the sample? If you add up all the squared errors and divide them by 2*0.85, the size of the error depends on your sample size. And since it is not a relative error and the values are rather big, it is possible that all these errors add up to that huge number. Try to set m to the size of your sample.
In addition there is an error in the sign of the computation of the hypothesis value, it should be a +. Otherwise you would have a negative slope, which explains large errors as well.
def computeCost(parameters, x, y):
[yIntercept, slope] = parameters
hypothesis = yIntercept + np.dot(x, slope)
loss = hypothesis - y
cost = np.sum(loss ** 2) / (2 * len(x))
return cost
The error value is large due to the unnormalized input data. According to your code, x varies from 0 to 250k. In this case, I would suggest that you normalize x to be in [0, 1]. With that, I would expect that the loss is small, and so are the learnt parameters (slope and intercept).

Including probability density of poly-disperse ensembles in a fitting of a Langevin-Derivative function

I am aware that following will require patience and I do appreciate the effort you will be giving.
I have a measured data, which represent the derivative of the magnetic moment : dM/dH. A good mathematical model of M(H) curve is the langevin function : where:
M(H) = 1/coth(xi) - 1/xi , xi = cte*Vi³
so the derivative of the magnetic moment can be obtained from the derivative of the derivative of the langevin function :
dM/dH = 1/xi² - 1/(sinh²(xi))
For the fitting I used this function as a fitting function :
def langevinDeriv(xx):
if not hasattr(xx, '__iter__'):
xx = [ xx ]
res = np.zeros(len(xx))
eps = 1e-1
for i in range(len(xx)):
x = xx[i]
if np.fabs(x) < eps:
res[i] = 1./3. - x**2/15. + 2.* x**4 / 189. - x**6/675. + 2.* x**8 / 10395. - 1382. * x**10 / 58046625. + 4. * x**12 / 1403325.
else:
res[i] = (1./x**2 - 1./np.sinh(x)**2)
return res
and minimized the error with a simple Least square function.
Here is what I got : comparaison : fit and data
I would say, that the fit is not good, because actually I don't have one diameter of particles but polydisperse ensembles with different diameters and so with different Langevin_derivative functions.
My question is, how can I integrate this probability density for the diameter to my fitting function, so that the program would fit to a probability distribution and not a single Diameter Vi. The function of the probability density is given here:
http://www.originlab.de/doc/LabTalk/ref/Lognpdf-func
So I fiddled around bit. As mentioned in the comments, fit will never give super results as the model does not capture the drop in signal at the ends (as well as the step-like behaviour on the graph). The results, however looks much better than a simple Langevin derivative. I basically sum up functions with different particle volume providing a max diameter. You can control the max diameter and the number of diameters used in the range of 0 to max diameter. The only two fit parameters are the standard deviation and the overall amplitude. In detail you have to be careful with the scaling to get physically meaningful results. I played already a little with n and d_max finding that in my scaling 15,3 is OK. I guess d_max should be sufficiently larger than s and n reasonably large to have several values near the max of the log-normal distribution.
import matplotlib
matplotlib.use('Qt5Agg')
from matplotlib import pyplot as plt
import numpy as np
from scipy.optimize import curve_fit ,leastsq
def log_gauss(x,s):
if x==0 or s==0:
out=0
else:
exponent=-np.log(x)**2/(2*s**2)
if abs(exponent)>100:
out=0
else:
out=np.exp(exponent)/np.sqrt(2 * np.pi * x**2 * s**2)
return out
def langevin(x,epsilon=1e-4):
if abs(x)<epsilon:
out=x/3.-x**3/45.+2*x**5/945.
else:
out=1./np.tanh(x)-1./x
return out
def langevin_d(x,epsilon=1e-4):
if abs(x)<epsilon:
out=1/3.-x**2/15.+2*x**4/189.
elif abs(x)>100.:
out= 1./x**2
else:
out=-1./np.sinh(x)**2+1./x**2
return out
def langevin_d_distributed(h,s,n=25,dMax=10):
diaList=np.linspace(.01,dMax,n)
pdiaList=[log_gauss(d,s) for d in diaList]
volList=[d**3 for d in diaList]
dm=0
for v,p in zip(volList,pdiaList):
dm+=p*langevin_d(h*v)
return dm
def residuals(parameters,dataPoint,n=25,dMax=10):
a,s = abs(parameters)
dist = [y -a*langevin_d_distributed(x,s,n=n,dMax=dMax) for x,y in dataPoint]
return dist
meas_x,meas_y=np.loadtxt('OBaPH.txt', delimiter=',',unpack=True)
meas_x=meas_x*300
meas_y=meas_y-min(meas_y)
hList=np.linspace(0,8,155)
langevinDList=[langevin_d(h) for h in hList]
langevinDList=np.array(langevinDList)/langevinDList[0]
distList_01=[langevin_d_distributed(h,.29) for h in hList]
distList_01=np.array(distList_01)/distList_01[0]
dataTupel=zip(meas_x,meas_y)
estimate = [1,0.29]
bestFitValues=dict()
myFit=dict()
for nnn,ddd in [(15,3),(15,1.5),(15,10),(5,3),(25,3)]:
bestFitValues[(nnn,ddd)], ier = leastsq(residuals, estimate,args=(dataTupel,nnn,ddd))
print bestFitValues[(nnn,ddd)]
myFit[(nnn,ddd)]= [bestFitValues[(nnn,ddd)][0]*langevin_d_distributed(h,bestFitValues[(nnn,ddd)][1],n=nnn,dMax=ddd) for h in hList]
fig=plt.figure()
ax=fig.add_subplot(1,1,1)
ax.plot(meas_x,meas_y,linestyle='',marker='o',label='rescaled data')
ax.plot(hList,langevinDList,label='Langevin')
ax.plot(hList,distList_01,label='log_norm test')
for key,val in myFit.iteritems():
ax.plot(hList,val,label=key)
ax.legend(loc=0)
plt.show()

Categories

Resources