python curve_fitting with bad results - python

the link of data from dropboxbadfittingI tried use the curve_fit to fit the data with my pre_defined function in python, but the result was far to perfect. The code is simple and shown as below. I have no idea what's wrong.
Since I am new to python, are there any other optimization or fitting methods which are suitable for my case with predefined function?
Thanks in advance!
import numpy as np
import math
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
def func(x, r1, r2, r3,l,c):
w=2*math.pi*x
m=r1+(r2*l*w)/(r2**2+l**2*w**2)+r3/(1+r3*c**2*w**2)
n=(r2**2*l*w)/(r2**2+l**2*w**2)-r3**3*c*w/(1+r3*c**2*w**2)
y= (m**2+n**2)**.5
return y
def readdata(filename):
x = filename.readlines()
x = list(map(lambda s: s.strip(), x))
x = list(map(float, x))
return x
# test data
f_x= open(r'C:\Users\adm\Desktop\simpletry\fre.txt')
xdata = readdata(f_x)
f_y= open(r'C:\Users\adm\Desktop\simpletry\impedance.txt')
ydata = readdata(f_y)
xdata = np.array(xdata)
ydata = np.array(ydata)
plt.semilogx(xdata, ydata, 'b-', label='data')
popt, pcov = curve_fit(func, xdata, ydata, bounds=((0, 0, 0, 0, 0), (np.inf, np.inf, np.inf, np.inf, np.inf)))
plt.semilogx(xdata, func(xdata, *popt), 'r-', label='fitted curve')
print(popt)
plt.xlabel('x')
plt.ylabel('y')
plt.legend()
plt.show()
as you guessed, this is a LCR circuit model. now I am trying to fit two curves with the same parameters like
def func1(x, r1, r2, r3,l,c):
w=2*math.pi*x
m=r1+(r2*l*w)/(r2**2+l**2*w**2)+r3/(1+r3*c**2*w**2)
return m
def func2(x, r1, r2, r3,l,c):
w=2*math.pi*x
n=(r2**2*l*w)/(r2**2+l**2*w**2)-r3**3*c*w/(1+r3*c**2*w**2)
return n
is it possible to use the curve_fitting to optimize the parameters?

Here are my results using scipy's differential_evolution genetic algorithm module to generate the initial parameter estimates for curve_fit, along with a simple "brick wall" in the function to ensure all parameters are positive. Scipy's implementation of Differential Evolution uses the Latin Hypercube algorithm to ensure a thorough search of parameter space, which requires bounds within which to search - in this example, those bounds are taken from the data maximum and minimum values. My results:
RMSE: 7.415
R-squared: 0.999995
r1 = 1.16614005e+00
r2 = 2.00000664e+05
r3 = 1.54718886e+01
l = 1.94473531e+04
c = 4.32515535e+05
import numpy, scipy, matplotlib
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
from scipy.optimize import differential_evolution
import warnings
def func(x, r1, r2, r3,l,c):
# "brick wall" ensuring all parameters are positive
if r1 < 0.0 or r2 < 0.0 or r3 < 0.0 or l < 0.0 or c < 0.0:
return 1.0E10 # large value gives large error, curve_fit hits a brick wall
w=2*numpy.pi*x
m=r1+(r2*l*w)/(r2**2+l**2*w**2)+r3/(1+r3*c**2*w**2)
n=(r2**2*l*w)/(r2**2+l**2*w**2)-r3**3*c*w/(1+r3*c**2*w**2)
y= (m**2+n**2)**.5
return y
def readdata(filename):
x = filename.readlines()
x = list(map(lambda s: s.strip(), x))
x = list(map(float, x))
return x
# test data
f_x= open('/home/zunzun/temp/data/fre.txt')
xData = readdata(f_x)
f_y= open('/home/zunzun/temp/data/impedance.txt')
yData = readdata(f_y)
xData = numpy.array(xData)
yData = numpy.array(yData)
# function for genetic algorithm to minimize (sum of squared error)
def sumOfSquaredError(parameterTuple):
warnings.filterwarnings("ignore") # do not print warnings by genetic algorithm
val = func(xData, *parameterTuple)
return numpy.sum((yData - val) ** 2.0)
def generate_Initial_Parameters():
# min and max used for bounds
maxX = max(xData)
minX = min(xData)
maxY = max(yData)
minY = min(yData)
minBound = min(minX, minY)
maxBound = max(maxX, maxY)
parameterBounds = []
parameterBounds.append([minBound, maxBound]) # search bounds for r1
parameterBounds.append([minBound, maxBound]) # search bounds for r2
parameterBounds.append([minBound, maxBound]) # search bounds for r3
parameterBounds.append([minBound, maxBound]) # search bounds for l
parameterBounds.append([minBound, maxBound]) # search bounds for c
# "seed" the numpy random number generator for repeatable results
result = differential_evolution(sumOfSquaredError, parameterBounds, seed=3)
return result.x
# by default, differential_evolution completes by calling curve_fit() using parameter bounds
geneticParameters = generate_Initial_Parameters()
# now call curve_fit without passing bounds from the genetic algorithm,
# just in case the best fit parameters are aoutside those bounds
fittedParameters, pcov = curve_fit(func, xData, yData, geneticParameters)
print('Fitted parameters:', fittedParameters)
print()
modelPredictions = func(xData, *fittedParameters)
absError = modelPredictions - yData
SE = numpy.square(absError) # squared errors
MSE = numpy.mean(SE) # mean squared errors
RMSE = numpy.sqrt(MSE) # Root Mean Squared Error, RMSE
Rsquared = 1.0 - (numpy.var(absError) / numpy.var(yData))
print()
print('RMSE:', RMSE)
print('R-squared:', Rsquared)
print()
##########################################################
# graphics output section
def ModelAndScatterPlot(graphWidth, graphHeight):
f = plt.figure(figsize=(graphWidth/100.0, graphHeight/100.0), dpi=100)
axes = f.add_subplot(111)
# first the raw data as a scatter plot
plt.semilogx(xData, yData, 'D')
# create data for the fitted equation plot
yModel = func(xData, *fittedParameters)
# now the model as a line plot
plt.semilogx(xData, yModel)
axes.set_xlabel('X Data') # X axis data label
axes.set_ylabel('Y Data') # Y axis data label
plt.show()
plt.close('all') # clean up after using pyplot
graphWidth = 800
graphHeight = 600
ModelAndScatterPlot(graphWidth, graphHeight)

To have your Least Squares regression make sense, you'll have to at least supply initial parameters which make sense.
As all parameters are by default initiated to the value 1, the biggest influence on the initial regression will be resistor r1 which adds a constant to the mix.
Most probably you'll end up in something like the following configuration:
popt
Out[241]:
array([1.66581563e+03, 2.43663552e+02, 1.13019744e+00, 1.20233767e+00,
5.04984535e-04])
Which will output a neat-looking flat line, due to m = something big + ~0 + ~0 ; n=~0 - ~0, so y = r1.
However, if you initialize your parameters somewhat differently,
popt, pcov = curve_fit(func, xdata.flatten(), ydata.flatten(), p0=[0.1,1e5,1000,1000,0.2],
bounds=((0, 0, 0, 0, 0), (np.inf, np.inf, np.inf, np.inf, np.inf)))
You will get a better looking fit,
popt
Out[244]:
array([1.14947146e+00, 4.12512324e+05, 1.36182466e+02, 8.29771756e+04,
1.77593448e+03])
((fitted-ydata.flatten())**2).mean()
Out[257]: 0.6099524982664816
#RMSE hence 0.78
P.s. My data starts at the second data point, due to a conversion error with pd.read_clipboard where the first row became headers instead of data. Shouldn't change the overall picture though.

Related

Scipy curve_fit gives wrong answer

I have an oscillating data as shown in the below figure and want to fit a sine curve to it. However, my result is not correct.
The function that I want to fit to this curve is:
def radius (z,phi, a0, k0,):
Z = z.reshape(z.shape[0],1)
k = np.array([k0,])
a = np.array([a0,])
r0 = 110
rs = r0 + np.sum(a*np.sin(k*Z +phi), axis=1)
return rs
a correct solution could look like this:
r_fit = radius(z, phi=np.pi/.8, a0=10,k0=0.017)
plt.plot(z, r, label='data')
plt.plot(z, r_fit, label='fitted curve')
plt.legend()
My result however from fitting the curve looks:
from scipy.optimize import curve_fit
popt, pcov = curve_fit(radius, xdata=z, ydata=r)
r_fit = radius(z, *popt)
plt.plot(z, r, label='data')
plt.plot(z, r_fit, label='fitted curve')
plt.legend()
My data is also as follow:
r = np.array([100.09061214, 100.17932773, 100.45526772, 102.27891728,
113.12440802, 119.30644014, 119.86570527, 119.75184665,
117.12160143, 101.55081608, 100.07280857, 100.12880236,
100.39251753, 103.05404178, 117.15257288, 119.74048706,
119.86955437, 119.37452005, 112.83384329, 101.0507198 ,
100.05521567])
z = np.array([-407.90074345, -360.38004677, -312.99221012, -266.36934609,
-224.36240585, -188.55933945, -155.21242348, -122.02778866,
-87.84335638, -47.0274899 , 0. , 47.54559191,
94.97469981, 141.33801462, 181.59490575, 215.77219256,
248.95956379, 282.28027286, 318.16440024, 360.7246922 ,
407.940799 ])
since my function simply represents a Fourier series, I also tried scipy.fftpack.fft(r) but I couldn't reproduce a close signal to that of which I have calculated the fft.
Here is a graphical Python fitter with a sine equation and your data using the scipy.optimize Differential Evolution genetic algorithm module to determine initial parameter estimates for curve_fit's non-linear solver. That scipy module uses the Latin Hypercube algorithm to ensure a thorough search of parameter space requiring bounds within which to search. In this example those bounds are taken from the data maximum and minimum values.
import numpy, scipy, matplotlib
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
from scipy.optimize import differential_evolution
import warnings
r = numpy.array([100.09061214, 100.17932773, 100.45526772, 102.27891728,
113.12440802, 119.30644014, 119.86570527, 119.75184665,
117.12160143, 101.55081608, 100.07280857, 100.12880236,
100.39251753, 103.05404178, 117.15257288, 119.74048706,
119.86955437, 119.37452005, 112.83384329, 101.0507198 ,
100.05521567])
z = numpy.array([-407.90074345, -360.38004677, -312.99221012, -266.36934609,
-224.36240585, -188.55933945, -155.21242348, -122.02778866,
-87.84335638, -47.0274899 , 0. , 47.54559191,
94.97469981, 141.33801462, 181.59490575, 215.77219256,
248.95956379, 282.28027286, 318.16440024, 360.7246922 ,
407.940799 ])
# rename data to match previous example code
xData = z
yData = r
def func (x, amplitude, center, width, offset): # equation sine[radians] + offset from zunzun.com
return amplitude * numpy.sin(numpy.pi * (x - center) / width) + offset
# function for genetic algorithm to minimize (sum of squared error)
def sumOfSquaredError(parameterTuple):
warnings.filterwarnings("ignore") # do not print warnings by genetic algorithm
val = func(xData, *parameterTuple)
return numpy.sum((yData - val) ** 2.0)
def generate_Initial_Parameters():
# min and max used for bounds
maxX = max(xData)
minX = min(xData)
maxY = max(yData)
minY = min(yData)
diffY = maxY - minY
diffX = maxX - minX
parameterBounds = []
parameterBounds.append([0.0, diffY]) # search bounds for amplitude
parameterBounds.append([minX, maxX]) # search bounds for center
parameterBounds.append([0.0, diffX]) # search bounds for width
parameterBounds.append([minY, maxY]) # search bounds for offset
# "seed" the numpy random number generator for repeatable results
result = differential_evolution(sumOfSquaredError, parameterBounds, seed=3)
return result.x
# by default, differential_evolution completes by calling curve_fit() using parameter bounds
geneticParameters = generate_Initial_Parameters()
# now call curve_fit without passing bounds from the genetic algorithm,
# just in case the best fit parameters are aoutside those bounds
fittedParameters, pcov = curve_fit(func, xData, yData, geneticParameters)
print('Fitted parameters:', fittedParameters)
print()
modelPredictions = func(xData, *fittedParameters)
absError = modelPredictions - yData
SE = numpy.square(absError) # squared errors
MSE = numpy.mean(SE) # mean squared errors
RMSE = numpy.sqrt(MSE) # Root Mean Squared Error, RMSE
Rsquared = 1.0 - (numpy.var(absError) / numpy.var(yData))
print()
print('RMSE:', RMSE)
print('R-squared:', Rsquared)
print()
##########################################################
# graphics output section
def ModelAndScatterPlot(graphWidth, graphHeight):
f = plt.figure(figsize=(graphWidth/100.0, graphHeight/100.0), dpi=100)
axes = f.add_subplot(111)
# first the raw data as a scatter plot
axes.plot(xData, yData, 'D')
# create data for the fitted equation plot
xModel = numpy.linspace(min(xData), max(xData))
yModel = func(xModel, *fittedParameters)
# now the model as a line plot
axes.plot(xModel, yModel)
axes.set_xlabel('X Data') # X axis data label
axes.set_ylabel('Y Data') # Y axis data label
plt.show()
plt.close('all') # clean up after using pyplot
graphWidth = 800
graphHeight = 600
ModelAndScatterPlot(graphWidth, graphHeight)
The problem is that without providing an initial guess, the solution is not able to converge. Try adding a sensible initial guess:
p0 = [np.pi/.8, 10, 0.017]
popt, pcov = curve_fit(radius, xdata=z, ydata=r, p0=p0)
Note that if you were to use one of the other methods such as trf or dogbox then without the initial guess this would be more likely to return a RunTime error due to the parameters not being able to converge.

Curve fitting with nth order polynomial having sine ripples

I'm modeling measurement errors in a certain measuring device. This is how the data looks: high frequency sine ripples on a low frequency polynomial. My model should capture the ripples too.
The curve that fits the error should be of the form: error(x) = a0 + a1*x + a2*x^2 + ... an*x^n + Asin(x/lambda). The order n of the polynomial is not known. My plan is to iterate n from 1-9 and select the one that has the highest F-value.
I've played with numpy.polyfit and scipy.optimize.curve_fit so far. numpy.polyfit is only for polynomials, so while I can generate the "best fit" polynomial, there's no way to determine the parameters A and lambda for the sine term. scipy.optimize.curve_fit would have worked great if I already knew the order of the polynomial for the polynomial part of error(x).
Is there a clever way to use both numpy.polyfit and scipy.optimize.curve_fit to get this done? Or another library-function perhaps?
Here's the code for how I'm using numpy.polyfit to select the best polynomial:
def GetErrorPolynomial(X, Y):
maxFval = 0.0
for i in range(1, 10): # i is the order of the polynomial (max order = 9)
error_func = np.polyfit(X, Y, i)
error_func = np.poly1d(error_func)
# F-test (looking for the largest F value)
numerator = np.sum(np.square(error_func(X) - np.mean(Y))) / i
denominator = np.sum(np.square(Y - error_func(X))) / (Y.size - i - 1)
Fval = numerator / denominator
if Fval > maxFval:
maxFval = Fval
maxFvalPolynomial = error_func
return maxFvalPolynomial
And here's the code for how I'm using curve_fit:
def poly_sine_fit(x, a, b, c, d, l):
return a*np.square(x) + b*x + c + d*np.sin(x/l)
param, _ = curve_fit(poly_sine_fit, x_data, y_data)
It's "hardcoded" to a quadratic function, but I want to select the "best" order as I'm doing above with np.polyfit
I finally found a way to model the ripples and can answer my own question. This 2006 paper does curve-fitting on ripples that resemble my dataset.
First off, I did a least squares polynomial fit and then subtracted this polynomial curve from the original data. This left me with only the ripples. Applying the Fourier transform, I picked out the dominant frequencies which let me reconstruct the sine ripples. Then I simply added these ripples to the polynomial curve I had obtained in the beginning. That did it.
Use Scikit-learn Linear Regression
Here is a code sample I used to perform a linear regression with a polynom of degree 3 that pass by the point 0 with value 1 and null derivative. You just have to adapt the function create_vector with the function you want.
from sklearn import linear_model
import numpy as np
def create_vector(x):
# currently representing a polynom Y = a*X^3 + b*X^2
x3 = np.power(x, 3)
x2 = np.power(x, 2)
X = np.append(x3, x2, axis=1)
return X
data_x = [some_data_input]
data_y = [some_data_output]
x = np.array(data_x).reshape(-1, 1)
y_data = np.array(data_y).reshape(-1, 1)-1 # -1 to pass by the point (0,1)
X = create_vector(x)
regr = linear_model.LinearRegression(fit_intercept=False)
regr.fit(X, y_data)
I extracted data from the scatterplot for analysis and found that a polynomial + sine did not seem to be an optimal model, because lower order polynomials were not following the shape of the data very well and higher order polynomials were exhibiting Runge's phenomenon of high curvature at the data extremes. I performed an equation search to find what the high-frequency sine wave might be imposed upon, and a good candidate seemed to be the Extreme Value peak equation "a * exp(-1.0 * exp(-1.0 * ((x-b)/c))-((x-b)/c) + 1.0) + offset" as shown below.
Here is a graphical Python curve fitter for this equation, at the top of the file I load the data I had extracted so you would need to replace this with the actual data. This fitter uses scipy's differential_evolution genetic algorithm module to estimate initial parameter values for the non-linear fitter, which uses the Latin Hypercube algorithm to ensure a thorough search of parameter space and requires bounds within which to search. Here those bounds are taken from the data maximum and minimum values.
Subtracting the model predictions from this fitted curve should leave you with only the sine component to be modeled. I noted that there seems to be an additional narrow, low-amplitude peak at approximately x = 275.
import numpy, scipy, matplotlib
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
from scipy.optimize import differential_evolution
import warnings
##########################################################
# load data section
f = open('/home/zunzun/temp/temp.dat')
textData = f.read()
f.close()
xData = []
yData = []
for line in textData.split('\n'):
if line: # ignore blank lines
spl = line.split()
xData.append(float(spl[0]))
yData.append(float(spl[1]))
xData = numpy.array(xData)
yData = numpy.array(yData)
##########################################################
# model to be fitted
def func(x, a, b, c, offset): # Extreme Valye Peak equation from zunzun.com
return a * numpy.exp(-1.0 * numpy.exp(-1.0 * ((x-b)/c))-((x-b)/c) + 1.0) + offset
##########################################################
# fitting section
# function for genetic algorithm to minimize (sum of squared error)
def sumOfSquaredError(parameterTuple):
warnings.filterwarnings("ignore") # do not print warnings by genetic algorithm
val = func(xData, *parameterTuple)
return numpy.sum((yData - val) ** 2.0)
def generate_Initial_Parameters():
# min and max used for bounds
maxX = max(xData)
minX = min(xData)
maxY = max(yData)
minY = min(yData)
minData = min(minX, minY)
maxData = max(maxX, maxY)
parameterBounds = []
parameterBounds.append([minData, maxData]) # search bounds for a
parameterBounds.append([minData, maxData]) # search bounds for b
parameterBounds.append([minData, maxData]) # search bounds for c
parameterBounds.append([minY, maxY]) # search bounds for offset
# "seed" the numpy random number generator for repeatable results
result = differential_evolution(sumOfSquaredError, parameterBounds, seed=3)
return result.x
# by default, differential_evolution completes by calling curve_fit() using parameter bounds
geneticParameters = generate_Initial_Parameters()
# now call curve_fit without passing bounds from the genetic algorithm,
# just in case the best fit parameters are aoutside those bounds
fittedParameters, pcov = curve_fit(func, xData, yData, geneticParameters)
print('Fitted parameters:', fittedParameters)
print()
modelPredictions = func(xData, *fittedParameters)
absError = modelPredictions - yData
SE = numpy.square(absError) # squared errors
MSE = numpy.mean(SE) # mean squared errors
RMSE = numpy.sqrt(MSE) # Root Mean Squared Error, RMSE
Rsquared = 1.0 - (numpy.var(absError) / numpy.var(yData))
print()
print('RMSE:', RMSE)
print('R-squared:', Rsquared)
print()
##########################################################
# graphics output section
def ModelAndScatterPlot(graphWidth, graphHeight):
f = plt.figure(figsize=(graphWidth/100.0, graphHeight/100.0), dpi=100)
axes = f.add_subplot(111)
# first the raw data as a scatter plot
axes.plot(xData, yData, 'D')
# create data for the fitted equation plot
xModel = numpy.linspace(min(xData), max(xData))
yModel = func(xModel, *fittedParameters)
# now the model as a line plot
axes.plot(xModel, yModel)
axes.set_xlabel('X Data') # X axis data label
axes.set_ylabel('Y Data') # Y axis data label
plt.show()
plt.close('all') # clean up after using pyplot
graphWidth = 800
graphHeight = 600
ModelAndScatterPlot(graphWidth, graphHeight)
UPDATE -------
If the high-frequency sine component is constant (which I do not know) then modeling a small portion of the data with only a few cycles will be sufficient to determine the equation and initial parameter estimates for fitting the sine wave portion of the model. Here I have done this with the following result:
from the following equation:
amplitude = -1.0362957093184177E+00
center = 3.6632754608370377E+01
width = 5.0813421718648293E+00
Offset = 5.1940843481496088E+00
pi = 3.14159265358979323846 # constant not fitted
y = amplitude * sin(pi * (x - center) / width) + Offset
Combining these two models using the actual data, rather than my scatterplot-extracted data, should be close to what you need.

Why does Python's curve_fit not finish the optimization?

I need to find two parameters of an equation that best fit the given values of x and y.
I'm using Python 3, with Numpy and Scipy.
from scipy.optimize import curve_fit
def func(dx, d50, p):
return (1 / (1 + ((d50 / dx) ** p)))
xdata = [280, 150, 75, 45, 38, 20, 10, 5.1, 2.6]
ydata = [99.57592773, 95.53773499, 81.14313507, 67.08183289, 62.93716431, 49.961483, 37.80876923, 24.53152657, 13.2219696]
# curve fit:
popt, pcov = curve_fit(func, xdata, ydata)
print(popt)
I expect a d50 ~ 20 and a p > 0.
But Python send to me:
[0.00221498 1.60291553]
> /usr/local/lib/python3.6/dist-packages/ipykernel_launcher.py:4:
> RuntimeWarning: invalid value encountered in power
after removing the cwd from sys.path.
I was unable to obtain a good fit to your data using the equation in your post. My equation search found that a standard Weibull peak equation, "a * exp(-0.5 * pow(log(x/b) / c, 2.0))", gave RMSE= 1.619 and R-squared = 0.997 for parameters a = 103.1533969, b = 498.93546398 and c = 2.67321918 as shown below. I have included a Python graphical fitter using this equation and the standard scipy differential_evolution genetic algorithm module to find initial parameter estimates for curve_fit(), this scipy module uses the Latin Hypercube algorithm to ensure a thorough search of parameter space and that algorithm requires bounds within which to search. In this example, the search bounds are derived from the data. It is much easier to determine ranges for the initial parameter estimates than to find specific values.
import numpy, scipy, matplotlib
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
from scipy.optimize import differential_evolution
import warnings
xData = [280, 150, 75, 45, 38, 20, 10, 5.1, 2.6]
yData = [99.57592773, 95.53773499, 81.14313507, 67.08183289, 62.93716431, 49.961483, 37.80876923, 24.53152657, 13.2219696]
def func(x, a, b, c): # Peak_WeibullPeak_model from zunzun.com
return a * numpy.exp(-0.5 * numpy.power(numpy.log(x/b) / c, 2.0))
# function for genetic algorithm to minimize (sum of squared error)
def sumOfSquaredError(parameterTuple):
warnings.filterwarnings("ignore") # do not print warnings by genetic algorithm
val = func(xData, *parameterTuple)
return numpy.sum((yData - val) ** 2.0)
def generate_Initial_Parameters():
# min and max used for bounds
maxX = max(xData)
minX = min(xData)
maxY = max(yData)
minY = min(yData)
minData = min(minX, minY)
maxData = max(maxY, maxX)
parameterBounds = []
parameterBounds.append([minData, maxData]) # search bounds for a
parameterBounds.append([minData, maxData]) # search bounds for b
parameterBounds.append([minData, maxData]) # search bounds for c
# "seed" the numpy random number generator for repeatable results
result = differential_evolution(sumOfSquaredError, parameterBounds, seed=3)
return result.x
# by default, differential_evolution completes by calling curve_fit() using parameter bounds
geneticParameters = generate_Initial_Parameters()
# now call curve_fit without passing bounds from the genetic algorithm,
# just in case the best fit parameters are aoutside those bounds
fittedParameters, pcov = curve_fit(func, xData, yData, geneticParameters)
print('Fitted parameters:', fittedParameters)
print()
modelPredictions = func(xData, *fittedParameters)
absError = modelPredictions - yData
SE = numpy.square(absError) # squared errors
MSE = numpy.mean(SE) # mean squared errors
RMSE = numpy.sqrt(MSE) # Root Mean Squared Error, RMSE
Rsquared = 1.0 - (numpy.var(absError) / numpy.var(yData))
print()
print('RMSE:', RMSE)
print('R-squared:', Rsquared)
print()
##########################################################
# graphics output section
def ModelAndScatterPlot(graphWidth, graphHeight):
f = plt.figure(figsize=(graphWidth/100.0, graphHeight/100.0), dpi=100)
axes = f.add_subplot(111)
# first the raw data as a scatter plot
axes.plot(xData, yData, 'D')
# create data for the fitted equation plot
xModel = numpy.linspace(min(xData), max(xData))
yModel = func(xModel, *fittedParameters)
# now the model as a line plot
axes.plot(xModel, yModel)
axes.set_xlabel('X Data') # X axis data label
axes.set_ylabel('Y Data') # Y axis data label
plt.show()
plt.close('all') # clean up after using pyplot
graphWidth = 800
graphHeight = 600
ModelAndScatterPlot(graphWidth, graphHeight)

Use UnivariateSpline to fit data tightly

I have a bunch of x, y points that represent a sigmoidal function:
x=[ 1.00094909 1.08787635 1.17481363 1.2617564 1.34867881 1.43562284
1.52259341 1.609522 1.69631283 1.78276102 1.86426648 1.92896789
1.9464453 1.94941586 2.00062852 2.073691 2.14982808 2.22808316
2.30634034 2.38456905 2.46280126 2.54106611 2.6193345 2.69748825]
y=[-0.10057627 -0.10172142 -0.10320428 -0.10378959 -0.10348456 -0.10312503
-0.10276956 -0.10170055 -0.09778279 -0.08608644 -0.05797392 0.00063599
0.08732999 0.16429878 0.2223306 0.25368884 0.26830932 0.27313931
0.27308756 0.27048902 0.26626313 0.26139534 0.25634544 0.2509893 ]
I use scipy.interpolate.UnivariateSpline() to fit to some cubic spline as follows:
from scipy.interpolate import UnivariateSpline
s = UnivariateSpline(x, y, k=3, s=0)
xfit = np.linspace(x.min(), x.max(), 200)
plt.scatter(x,y)
plt.plot(xfit, s(xfit))
plt.show()
This is what I get:
Since I specify s=0, the spline adheres completely to the data, but there are too many wiggles. Using a higher k value leads to even more wiggles.
So my questions are --
How should I correctly use scipy.interpolate.UnivariateSpline() to fit my data? More precisely, how do I make the spline minimise its wiggling?
Is this even the correct choice for this kind of a sigmoidal function? Should I be using something like scipy.optimize.curve_fit() with a trial tanh(x) function instead?
There are several options, I list a few below. The last one seems to give the best output. Whether you should use a spline or an actual function depends on what you want to do with the output; I list two analytical functions below that could be used but I don't know in which context the data were derived so it is hard to find the best one for you.
You can play with s, e.g. for s=0.005, the plot looks like this (still not extremely pretty but you could further adjust):
But I would indeed use a "proper" function and fit using e.g. curve_fit. The function below is still not ideal as it is monotonically increasing, so we miss the decrease at the end; the plot looks as follows:
This is the entire code, for both the spline and the actual fit:
from scipy.interpolate import UnivariateSpline
import matplotlib.pyplot as plt
import numpy as np
from scipy.optimize import curve_fit
def func(x, ymax, n, k, c):
return ymax * x ** n / (k ** n + x ** n) + c
x=np.array([ 1.00094909, 1.08787635, 1.17481363, 1.2617564, 1.34867881, 1.43562284,
1.52259341, 1.609522, 1.69631283, 1.78276102, 1.86426648, 1.92896789,
1.9464453, 1.94941586, 2.00062852, 2.073691, 2.14982808, 2.22808316,
2.30634034, 2.38456905, 2.46280126, 2.54106611, 2.6193345, 2.69748825])
y=np.array([-0.10057627, -0.10172142, -0.10320428, -0.10378959, -0.10348456, -0.10312503,
-0.10276956, -0.10170055, -0.09778279, -0.08608644, -0.05797392, 0.00063599,
0.08732999, 0.16429878, 0.2223306, 0.25368884, 0.26830932, 0.27313931,
0.27308756, 0.27048902, 0.26626313, 0.26139534, 0.25634544, 0.2509893 ])
popt, pcov = curve_fit(func, x, y, p0=[y.max(), 2, 2, -0.1], bounds=([0, 0, 0, -0.2], [0.4, 45, 2000, 10]))
xfit = np.linspace(x.min(), x.max(), 200)
plt.scatter(x, y)
plt.plot(xfit, func(xfit, *popt))
plt.show()
s = UnivariateSpline(x, y, k=3, s=0.005)
xfit = np.linspace(x.min(), x.max(), 200)
plt.scatter(x, y)
plt.plot(xfit, s(xfit))
plt.show()
A third option is to use a more advanced function that can also reproduce the decrease at the end and differential_evolution for the fit; that seems to give the best fit:
The code is as follows (using the same data as above):
from scipy.optimize import curve_fit, differential_evolution
def sigmoid_with_decay(x, a, b, c, d, e, f):
return a * (1. / (1. + np.exp(-b * (x - c)))) * (1. / (1. + np.exp(d * (x - e)))) + f
def error_sigmoid_with_decay(parameters, x_data, y_data):
return np.sum((y_data - sigmoid_with_decay(x_data, *parameters)) ** 2)
res = differential_evolution(error_sigmoid_with_decay,
bounds=[(0, 10), (0, 25), (0, 10), (0, 10), (0, 10), (-1, 0.1)],
args=(x, y),
seed=42)
xfit = np.linspace(x.min(), x.max(), 200)
plt.scatter(x, y)
plt.plot(xfit, sigmoid_with_decay(xfit, *res.x))
plt.show()
The fit is quite sensitive regarding the bounds, so be careful when you play with that...
This illustrates the result of fitting two halves of the data to different functions, the lower half to all data with X < 2.0 and the upper half to all data with X >= 1.9, so that there is overlap in the data for the fitted curves. The code switches from one equation to another at the center of the overlap region, X = 1.95.
import numpy, matplotlib
import matplotlib.pyplot as plt
xData=numpy.array([ 1.00094909, 1.08787635, 1.17481363, 1.2617564, 1.34867881, 1.43562284,
1.52259341, 1.609522, 1.69631283, 1.78276102, 1.86426648, 1.92896789,
1.9464453, 1.94941586, 2.00062852, 2.073691, 2.14982808, 2.22808316,
2.30634034, 2.38456905, 2.46280126, 2.54106611, 2.6193345, 2.69748825])
yData=numpy.array([-0.10057627, -0.10172142, -0.10320428, -0.10378959, -0.10348456, -0.10312503,
-0.10276956, -0.10170055, -0.09778279, -0.08608644, -0.05797392, 0.00063599,
0.08732999, 0.16429878, 0.2223306, 0.25368884, 0.26830932, 0.27313931,
0.27308756, 0.27048902, 0.26626313, 0.26139534, 0.25634544, 0.2509893 ])
# function for x < 1.95 (fitted up to 2.0 for overlap)
def lowerFunc(x_in): # Bleasdale-Nelder Power With Offset
# coefficients
a = -1.1431476643503597E+03
b = 3.3819340844164983E+21
c = -6.3633178925040745E+01
d = 3.1481973843740194E+00
Offset = -1.0300724909782859E-01
temp = numpy.power(a + b * numpy.power(x_in, c), -1.0 / d)
temp += Offset
return temp
# function for x >= 1.95 (fitted down to 1.9 for overlap)
def upperFunc(x_in): # rational equation with Offset
# coefficients
a = -2.5294212380048242E-01
b = 1.4262697377369586E+00
c = -2.6141935706529118E-01
d = -8.8730045918252121E-02
Offset = -4.8283287597672708E-01
temp = (a * numpy.power(x_in, 2) + b * numpy.log(x_in)) # numerator
temp /= (1.0 + c * numpy.power(numpy.log(x_in), -1) + d * numpy.exp(x_in)) # denominator
temp += Offset
return temp
def combinedFunc(x_in):
returnVal = []
for x in x_in:
if x < 1.95:
returnVal.append(lowerFunc(x))
else:
returnVal.append(upperFunc(x))
return returnVal
modelPredictions = combinedFunc(xData)
absError = modelPredictions - yData
SE = numpy.square(absError) # squared errors
MSE = numpy.mean(SE) # mean squared errors
RMSE = numpy.sqrt(MSE) # Root Mean Squared Error, RMSE
Rsquared = 1.0 - (numpy.var(absError) / numpy.var(yData))
print('RMSE:', RMSE)
print('R-squared:', Rsquared)
##########################################################
# graphics output section
def ModelAndScatterPlot(graphWidth, graphHeight):
f = plt.figure(figsize=(graphWidth/100.0, graphHeight/100.0), dpi=100)
axes = f.add_subplot(111)
# first the raw data as a scatter plot
axes.plot(xData, yData, 'D')
# create data for the fitted equation plot
xModel = numpy.linspace(min(xData), max(xData))
yModel = combinedFunc(xModel)
# now the model as a line plot
axes.plot(xModel, yModel)
axes.set_xlabel('X Data') # X axis data label
axes.set_ylabel('Y Data') # Y axis data label
plt.show()
plt.close('all') # clean up after using pyplot
graphWidth = 800
graphHeight = 600
ModelAndScatterPlot(graphWidth, graphHeight)

Issue fitting curves:

I´m trying to fit my data to a certain function by when I try to plot it, I always get double lines as shown in the figure below. This is the code I´m using:
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
import warnings
from scipy.optimize import differential_evolution
# bounds on parameters are set in generate_Initial_Parameters() below
def func_original(x,a,b,c):
return a/(x**2)+b/x+c
# bounds on parameters are set in generate_Initial_Parameters() below
def func_recommended(x,a,b,c):
return 1/(a*x**2+b*x+c)
# select peak function here
#func = func_original
func = func_recommended
# function for genetic algorithm to minimize (sum of squared error)
# bounds on parameters are set in generate_Initial_Parameters() below
def sumOfSquaredError(parameterTuple):
warnings.filterwarnings("ignore") # do not print warnings by genetic algorithm
return np.sum((yData - func(xData, *parameterTuple)) ** 2)
def generate_Initial_Parameters():
# data min and max used for bounds
maxX = max(xData)
minX = min(xData)
maxY = max(yData)
minY = min(yData)
minSearch = min([minX, minY])
maxSearch = max([maxX, maxY])
parameterBounds = []
parameterBounds.append([minSearch, maxSearch]) # parameter bounds for a
parameterBounds.append([minSearch, maxSearch]) # parameter bounds for b
parameterBounds.append([minSearch, maxSearch]) # parameter bounds for c
# "seed" the numpy random number generator for repeatable results
result = differential_evolution(sumOfSquaredError, parameterBounds, seed=3)
return result.x
# load data from text file
data=np.loadtxt('gammaoh.txt')
use=np.transpose(data)
yData=use[0]
xData=use[2]
# generate initial parameter values
initialParameters = generate_Initial_Parameters()
# curve fit the data
fittedParameters, niepewnosci = curve_fit(func, xData, yData, initialParameters)
# create values for display of fitted peak function
a, b, c = fittedParameters
y_fit = func(xData, a, b, c)
plt.plot(xData, yData, 'bo', label='Puntos experimentais $\gamma_{OH}$', markersize=5)
plt.plot(xData, (1/(xData**2*0.5998-2.29255*xData+1.7988)) , 'b-',label='Axuste $\gamma_{OH}$')
plt.title('Axustes coeficientes de actividade ')
plt.xlabel('$\chi_{H_2O}$ ')
plt.ylabel('$\gamma$')
plt.grid(True)
plt.legend(loc=2)
plt.savefig('gammaoh.png')
I would be very grateful if someone could tell me how to fix this, thank you in advance. Also, if anyone knows a better way of fitting data to a given function, it would be nice if you could tell me.
I have no deeper knowledge of the problem you are solving, but to avoid the extra line in the plot, it works if the lists are sorted according to x. I did this:
xData.sort()
tt = (1/(xData**2*0.5998-2.29255*xData+1.7988))
plt.plot(xData, tt , 'b-',label='Axuste $\gamma_{OH}$')

Categories

Resources