predicting values given a sinusouidal fit - python

I'm using Python to fit a time series with a sinusoidal function. I found quite a good match and now I want to be able to predict future values.. I'm at lost here.
Here's what I've got:
timeSeries = [0.01146, 0.00724, 0.00460, 0.00192, 0.00145, 0.01559, 0.02585, 0.04118, 0.05073, 0.01966, 0.01486, 0.02784]
import numpy as np
from scipy.optimize import curve_fit
def createSinFromFit(x, freq, amplitude, phase, offset):
return np.sin(x * freq + phase) * amplitude + offset
def sinRegr(series):
t = np.linspace(0, 4*np.pi, len(series))
guess_freq = 1
guess_amplitude = 3*np.std(series)/(2**0.5)
guess_phase = 0
guess_offset = np.mean(series)
p0=[guess_freq, guess_amplitude, guess_phase, guess_offset]
fit = curve_fit(createSinFromFit, t, series, p0=p0)
results = createSinFromFit(t,*fit[0])
return results
plotThis = sinRegr(timeSeries)
This code produces the fitting you see in this picture:
How can I extend the sin function so that it predicts the future points of the series? i.e. how can I have the sine plot span on to the right, beyond the area covered by the 'known' data points?

You need to distinguish a data timeline (input) and a fit timeline (output). Once you do that, the approach is fairly clear. Below I called them tdata and tfit:
import numpy as np
from scipy.optimize import curve_fit
import matplotlib.pyplot as plt
tdata = np.linspace(0, 10)
timeSeries = np.sin(tdata) + .4*np.random.random(tdata.shape)
def createSinFromFit(x, freq, amplitude, phase, offset):
return np.sin(x * freq + phase) * amplitude + offset
def sinRegr(tdata, series):
tfit = np.linspace(0, 6*np.pi, len(series))
guess_freq = .2
guess_amplitude = 3*np.std(series)/(2**0.5)
guess_phase = 0
guess_offset = np.mean(series)
p0=[guess_freq, guess_amplitude, guess_phase, guess_offset]
fit = curve_fit(createSinFromFit, tdata, series, p0=p0) # use tdata to create the fit
results = createSinFromFit(tfit,*fit[0]) # use tfit to generate a new curve
return tfit, results
tfit, plotThis = sinRegr(tdata, timeSeries)
plt.plot(tfit, plotThis)
plt.plot(tdata, timeSeries, "ro")
plt.show()

Related

scipy Fast fourier transform doesn't recognize the signal

i'm trying to get the frequency of a signal via fourier transform but it's not able to recognize it (sets the peak to f=0). Maybe something is wrong in my code (FULL reprudible code at the end of the page):
PF = fft.fft(Y[0,:])/Npoints #/Npoints to get the true amplitudes
ZF = fft.fft(Y[1,:])/Npoints
freq = fft.fftfreq(Npoints,deltaT)
PF = fft.fftshift(PF) #change of ordering so that the frequencies are increasing
ZF = fft.fftshift(ZF)
freq = fft.fftshift(freq)
plt.plot(freq, np.abs(PF))
plt.show()
plt.plot(T,Y[0,:])
plt.show()
where Npoints is the number of intervals (points) and deltaT is the time spacing of the intervals. You can see that the peak is at f=0
I show also a plot of Y[0,:] (my signal) over time where it's clear that the signal has a characteristic frequency
FULL REPRUDICIBLE CODE
import numpy as np
import matplotlib.pyplot as plt
#numerical integration
from scipy.integrate import solve_ivp
import scipy.fft as fft
r=0.5
g=0.4
e=0.6
H=0.6
m=0.15
#define a vector of K between 0 and 4 with 50 componets
K=np.arange(0.1,4,0.4)
tsteps=np.arange(7200,10000,5)
Npoints=len(tsteps)
deltaT=2800/Npoints #sample spacing
for k in K :
i=0
def RmAmodel(t,y):
return [r*y[0]*(1-y[0]/k)-g*y[0]/(y[0]+H)*y[1], e*g*y[0]/(y[1]+H)*y[1]-m*y[1]]
sol = solve_ivp(RmAmodel, [0,10000], [3,3], t_eval=tsteps) #t_eval specify the points where the solution is desired
T=sol.t
Y=sol.y
vk=[]
for i in range(Npoints):
vk.append(k)
XYZ=[vk,Y[0,:],Y[1,:]]
#check periodicity over P and Z with fourier transform
#try Fourier analysis just for the last value of K
PF = fft.fft(Y[0,:])/Npoints #/Npoints to get the true amplitudes
ZF = fft.fft(Y[1,:])/Npoints
freq = fft.fftfreq(Npoints,deltaT)
PF = fft.fftshift(PF) #change of ordering so that the frequencies are increasing
ZF = fft.fftshift(ZF)
freq = fft.fftshift(freq)
plt.plot(T,Y[0,:])
plt.show()
plt.plot(freq, np.abs(PF))
plt.show()
I can't pinpoint where the problem is. It looks like there is some problem in the fft code. Anyway, I have little time so I will just put a sample code I made before. You can use it as reference or copy-paste it. It should work.
import numpy as np
import matplotlib.pyplot as plt
from scipy.fft import fft, fftfreq
fs = 1000 #sampling frequency
T = 1/fs #sampling period
N = int((1 / T) + 1) #number of sample points for 1 second
t = np.linspace(0, 1, N) #time array
pi = np.pi
sig1 = 1 * np.sin(2*pi*10*t)
sig2 = 2 * np.sin(2*pi*30*t)
sig3 = 3 * np.sin(2*pi*50*t)
#generate signal
signal = sig1 + sig2 + sig3
#plot signal
plt.plot(t, signal)
plt.show()
signal_fft = fft(signal) #getting fft
f2 = np.abs(signal_fft / N) #full spectrum
f1 = f2[:N//2] #half spectrum
f1[1:] = 2*f1[1:] #actual amplitude
freq = fs * np.linspace(0,N/2,int(N/2)) / N #frequency array
#plot fft result
plt.plot(freq, f1)
plt.xlim(0,100)
plt.show()

Exponential Fit to Data Favours Smaller Values?

I am trying to apply an exponential fit to my data to determine the point at which the value drops by 1/e. When plotted, the fit seems to favor smaller values and does not portray the true relationship.
import numpy as np
import matplotlib
matplotlib.use("TkAgg") # need to set the TkAgg backend explicitly otherwise it introduced a low-level error
from matplotlib import pyplot as plt
import scipy as sc
def autoCorrelation(sample, longTime, temp, plotTau = False ):
# compute empirical autocovariance with lag tau averaged over time longTime
sample.takeTimeStep(timesteps=1500) # 1500 timesteps to let sample reach equilibrium
M = np.zeros(longTime)
for tau in range(longTime):
M[tau] = sample.calcMagnetisation()
sample.takeTimeStep()
M_ave = np.average(M) #time - average
M = (M - M_ave)
autocorrelation = np.correlate(M, M, mode='full')
autocorrelation /= autocorrelation.max() # normalise such that max autocorrelation is 1
autocorrelationArray = autocorrelation[int(len(autocorrelation)/2):]
x = np.arange(0, len(autocorrelationArray), 1)
# apply exponential fit
def exponenial(x, a, b):
return a * np.exp(-b * x)
popt, pcov = curve_fit(exponenial, x, np.absolute(autocorrelationArray)) # array, 2d array
yy = exponenial(x, *popt)
plt.plot(x, np.absolute(autocorrelationArray), 'o', x, yy)
plt.title('Exponential Fit of Magnetisation Autocorrelation against Time for Temperature = ' + str(T) + ' J/k')
plt.xlabel('Time / Number of Iterations ')
plt.ylabel('Magnetisation Autocorrelation')
plt.show()
# prints tau_e value b from exponential a * np.exp(-b * x)
print('tau_e is ' + str(1/popt[1])) # units converted to time steps by taking reciprocal
if __name__ == '__main__':
#plot autocorrelation against time
longTime = 100
temp = [1, 2, 2.3, 2.6, 3, 4]
for T in temp:
magnet = Ising(30, T) # (N, temp)
autoCorrelation(magnet, longTime, temp)
Note: Ising is a class in another .py file containing the functions takeTimeStep and calcMagnetisation.
Expect greater values of tau_e

Scipy.optimize.curve_fit does not fit

Say I want to fit a sine function using scipy.optimize.curve_fit. I don't know any parameters of the function. To get the frequency, I do Fourier transform and guess all the other parameters - amplitude, phase, and offset. When running my program, I do get a fit but it does not make sense. What is the problem? Any help will be appreciated.
import numpy as np
import matplotlib.pyplot as plt
import scipy as sp
ampl = 1
freq = 24.5
phase = np.pi/2
offset = 0.05
t = np.arange(0,10,0.001)
func = np.sin(2*np.pi*t*freq + phase) + offset
fastfft = np.fft.fft(func)
freq_array = np.fft.fftfreq(len(t),t[0]-t[1])
max_value_index = np.argmax(abs(fastfft))
frequency = abs(freq_array[max_value_index])
def fit(a, f, p, o, t):
return a * np.sin(2*np.pi*t*f + p) + o
guess = (0.9, frequency, np.pi/4, 0.1)
params, fit = sp.optimize.curve_fit(fit, t, func, p0=guess)
a, f, p, o = params
fitfunc = lambda t: a * np.sin(2*np.pi*t*f + p) + o
plt.plot(t, func, 'r-', t, fitfunc(t), 'b-')
The main problem in your program was a misunderstanding, how scipy.optimize.curve_fit is designed and its assumption of the fit function:
ydata = f(xdata, *params) + eps
This means that the fit function has to have the array for the x values as the first parameter followed by the function parameters in no particular order and must return an array for the y values. Here is an example, how to do this:
import numpy as np
import matplotlib.pyplot as plt
import scipy.optimize
#t has to be the first parameter of the fit function
def fit(t, a, f, p, o):
return a * np.sin(2*np.pi*t*f + p) + o
ampl = 1
freq = 2
phase = np.pi/2
offset = 0.5
t = np.arange(0,10,0.01)
#is the same as fit(t, ampl, freq, phase, offset)
func = np.sin(2*np.pi*t*freq + phase) + offset
fastfft = np.fft.fft(func)
freq_array = np.fft.fftfreq(len(t),t[0]-t[1])
max_value_index = np.argmax(abs(fastfft))
frequency = abs(freq_array[max_value_index])
guess = (0.9, frequency, np.pi/4, 0.1)
#renamed the covariance matrix
params, pcov = scipy.optimize.curve_fit(fit, t, func, p0=guess)
a, f, p, o = params
#calculate the fit plot using the fit function
plt.plot(t, func, 'r-', t, fit(t, *params), 'b-')
plt.show()
As you can see, I have also changed the way the fit function for the plot is calculated. You don't need another function - just utilise the fit function with the parameter list, the fit procedure gives you back.
The other problem was that you called the covariance array fit - overwriting the previously defined function fit. I fixed that as well.
P.S.: Of course now you only see one curve, because the perfect fit covers your data points.

Generate random numbers from exponential distribution and model using python

My goal is to create a dataset of random points whose histogram looks like an exponential decay function and then plot an exponential decay function through those points.
First I tried to create a series of random numbers (but did not do so successfully since these should be points, not numbers) from an exponential distribution.
from pylab import *
from scipy.optimize import curve_fit
import random
import numpy as np
import pandas as pd
testx = pd.DataFrame(range(10)).astype(float)
testx = testx[0]
for i in range(1,11):
x = random.expovariate(15) # rate = 15 arrivals per second
data[i] = [x]
testy = pd.DataFrame(data).T.astype(float)
testy = testy[0]; testy
plot(testx, testy, 'ko')
The result could look something like this.
And then I define a function to draw a line through my points:
def func(x, a, e):
return a*np.exp(-a*x)+e
popt, pcov = curve_fit(f=func, xdata=testx, ydata=testy, p0 = None, sigma = None)
print popt # parameters
print pcov # covariance
plot(testx, testy, 'ko')
xx = np.linspace(0, 15, 1000)
plot(xx, func(xx,*popt))
plt.show()
What I'm looking for is: (1) a more elegant way to create an array of random numbers from an exponential (decay) distribution and (2) how to test that my function is indeed going through the data points.
I would guess that the following is close to what you want. You can generate some random numbers drawn from an exponential distribution with numpy,
data = numpy.random.exponential(5, size=1000)
You can then create a histogram of them using numpy.hist and draw the histogram values into a plot. You may decide to take the middle of the bins as position for the point (this assumption is of course wrong, but gets the more valid the more bins you use).
Fitting works as in the code from the question. You will then find out that our fit roughly finds the parameter used for the data generation (in this case below ~5).
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
data = np.random.exponential(5, size=1000)
hist,edges = np.histogram(data,bins="auto",density=True )
x = edges[:-1]+np.diff(edges)/2.
plt.scatter(x,hist)
func = lambda x,beta: 1./beta*np.exp(-x/beta)
popt, pcov = curve_fit(f=func, xdata=x, ydata=hist)
print(popt)
xx = np.linspace(0, x.max(), 101)
plt.plot(xx, func(xx,*popt), ls="--", color="k",
label="fit, $beta = ${}".format(popt))
plt.legend()
plt.show()
I think you are actually asking about a regression problem, which is what Praveen was suggesting.
You have a bog standard exponential decay that arrives at the y-axis at about y=0.27. Its equation is therefore y = 0.27*exp(-0.27*x). I can model gaussian error around the values of this function and plot the result using the following code.
import matplotlib.pyplot as plt
from math import exp
from scipy.stats import norm
x = range(0, 16)
Y = [0.27*exp(-0.27*_) for _ in x]
error = norm.rvs(0, scale=0.05, size=9)
simulated_data = [max(0, y+e) for (y,e) in zip(Y[:9],error)]
plt.plot(x, Y, 'b-')
plt.plot(x[:9], simulated_data, 'r.')
plt.show()
print (x[:9])
print (simulated_data)
Here's the plot. Notice that I save the output values for subsequent use.
Now I can calculate the nonlinear regression of the exponential decay values, contaminated with noise, on the independent variable, which is what curve_fit does.
from math import exp
from scipy.optimize import curve_fit
import numpy as np
def model(x, p):
return p*np.exp(-p*x)
x = list(range(9))
Y = [0.22219001972988275, 0.15537454187341937, 0.15864069451825827, 0.056411162886672819, 0.037398831058143338, 0.10278251869912845, 0.03984605649260467, 0.0035360087611421981, 0.075855255999424692]
popt, pcov = curve_fit(model, x, Y)
print (popt[0])
print (pcov)
The bonus is that, not only does curve_fit calculate an estimate for the parameter — 0.207962159793 — it also offers an estimate for this estimate's variance — 0.00086071 — as an element of pcov. This would appear to be a fairly small value, given the small sample size.
Here's how to calculate the residuals. Notice that each residual is the difference between the data value and the value estimated from x using the parameter estimate.
residuals = [y-model(_, popt[0]) for (y, _) in zip(Y, x)]
print (residuals)
If you wanted to further 'test that my function is indeed going through the data points' then I would suggest looking for patterns in the residuals. But discussions like this might be beyond what's welcomed on stackoverflow: Q-Q and P-P plots, plots of residuals vs y or x, and so on.
I agree with the solution of #ImportanceOfBeingErnes, but I'd like to add a (well known?) general solution for distributions. If you have a distribution function f with integral F (i.e. f = dF / dx) then you get the required distribution by mapping random numbers with inv F i.e. the inverse function of the integral. In case of the exponential function, the integral is, again, an exponential and the inverse is the logarithm. So it can be done like this:
import matplotlib.pyplot as plt
import numpy as np
from random import random
def gen( a ):
y=random()
return( -np.log( y ) / a )
def dist_func( x, a ):
return( a * np.exp( -a * x) )
data = [ gen(3.14) for x in range(20000) ]
fig = plt.figure()
ax = fig.add_subplot( 1, 1, 1 )
ax.hist(data, bins=80, normed=True, histtype="step")
ax.plot(np.linspace(0,5,150), dist_func( np.linspace(0,5,150), 3.14 ) )
plt.show()

python nonlinear least squares fitting

I am a little out of my depth in terms of the math involved in my problem, so I apologise for any incorrect nomenclature.
I was looking at using the scipy function leastsq, but am not sure if it is the correct function.
I have the following equation:
eq = lambda PLP,p0,l0,kd : 0.5*(-1-((p0+l0)/kd) + np.sqrt(4*(l0/kd)+(((l0-p0)/kd)-1)**2))
I have data (8 sets) for all the terms except for kd (PLP,p0,l0). I need to find the value of kd by non-linear regression of the above equation.
From the examples I have read, leastsq seems to not allow for the inputting of the data, to get the output I need.
Thank you for your help
This is a bare-bones example of how to use scipy.optimize.leastsq:
import numpy as np
import scipy.optimize as optimize
import matplotlib.pylab as plt
def func(kd,p0,l0):
return 0.5*(-1-((p0+l0)/kd) + np.sqrt(4*(l0/kd)+(((l0-p0)/kd)-1)**2))
The sum of the squares of the residuals is the function of kd we're trying to minimize:
def residuals(kd,p0,l0,PLP):
return PLP - func(kd,p0,l0)
Here I generate some random data. You'd want to load your real data here instead.
N=1000
kd_guess=3.5 # <-- You have to supply a guess for kd
p0 = np.linspace(0,10,N)
l0 = np.linspace(0,10,N)
PLP = func(kd_guess,p0,l0)+(np.random.random(N)-0.5)*0.1
kd,cov,infodict,mesg,ier = optimize.leastsq(
residuals,kd_guess,args=(p0,l0,PLP),full_output=True,warning=True)
print(kd)
yields something like
3.49914274899
This is the best fit value for kd found by optimize.leastsq.
Here we generate the value of PLP using the value for kd we just found:
PLP_fit=func(kd,p0,l0)
Below is a plot of PLP versus p0. The blue line is from data, the red line is the best fit curve.
plt.plot(p0,PLP,'-b',p0,PLP_fit,'-r')
plt.show()
Another option is to use lmfit.
They provide a great example to get you started:.
#!/usr/bin/env python
#<examples/doc_basic.py>
from lmfit import minimize, Minimizer, Parameters, Parameter, report_fit
import numpy as np
# create data to be fitted
x = np.linspace(0, 15, 301)
data = (5. * np.sin(2 * x - 0.1) * np.exp(-x*x*0.025) +
np.random.normal(size=len(x), scale=0.2) )
# define objective function: returns the array to be minimized
def fcn2min(params, x, data):
""" model decaying sine wave, subtract data"""
amp = params['amp']
shift = params['shift']
omega = params['omega']
decay = params['decay']
model = amp * np.sin(x * omega + shift) * np.exp(-x*x*decay)
return model - data
# create a set of Parameters
params = Parameters()
params.add('amp', value= 10, min=0)
params.add('decay', value= 0.1)
params.add('shift', value= 0.0, min=-np.pi/2., max=np.pi/2)
params.add('omega', value= 3.0)
# do fit, here with leastsq model
minner = Minimizer(fcn2min, params, fcn_args=(x, data))
kws = {'options': {'maxiter':10}}
result = minner.minimize()
# calculate final result
final = data + result.residual
# write error report
report_fit(result)
# try to plot results
try:
import pylab
pylab.plot(x, data, 'k+')
pylab.plot(x, final, 'r')
pylab.show()
except:
pass
#<end of examples/doc_basic.py>

Categories

Resources