Runge-Kutta 4 for solving systems of ODEs Python - python

I wrote code for Runge-Kutta 4 for solving system of ODEs.
It works fine for 1-D ODE but when I try to solve x'' + kx = 0 I have a problem trying to define a vectorial function:
Let u1 = x and u2 = x' = u1', then the system looks like:
u1' = u2
u2' = -k*u1
If u = (u1,u2) and f(u, t) = (u2, -k*u1), then we need to solve:
u' = f(u, t)
def f(u,t, omega=2):
u, v = u
return np.asarray([v, -omega**2*u])
My entire code is:
import numpy as np
def ode_RK4(f, X_0, dt, T):
N_t = int(round(T/dt))
# Create an array for the functions ui
u = np.zeros((len(X_0),N_t+1)) # Array u[j,:] corresponds to the j-solution
t = np.linspace(0, N_t*dt, N_t + 1)
# Initial conditions
for j in range(len(X_0)):
u[j,0] = X_0[j]
# RK4
for j in range(len(X_0)):
for n in range(N_t):
u1 = f(u[j,n] + 0.5*dt* f(u[j,n], t[n])[j], t[n] + 0.5*dt)[j]
u2 = f(u[j,n] + 0.5*dt*u1, t[n] + 0.5*dt)[j]
u3 = f(u[j,n] + dt*u2, t[n] + dt)[j]
u[j, n+1] = u[j,n] + (1/6)*dt*( f(u[j,n], t[n])[j] + 2*u1 + 2*u2 + u3)
return u, t
def demo_exp():
import matplotlib.pyplot as plt
def f(u,t):
return np.asarray([u])
u, t = ode_RK4(f, [1] , 0.1, 1.5)
plt.plot(t, u[0,:],"b*", t, np.exp(t), "r-")
plt.show()
def demo_osci():
import matplotlib.pyplot as plt
def f(u,t, omega=2):
# u, v = u Here I've got a problem
return np.asarray([v, -omega**2*u])
u, t = ode_RK4(f, [2,0], 0.1, 2)
for i in [1]:
plt.plot(t, u[i,:], "b*")
plt.show()
In advance, thank you.

You are on the right path, but when applying time-integration methods such as RK to vector valued ODEs, one essentially does the exact same thing as in the scalar case, just with vectors.
Thus, you skip the for j in range(len(X_0)) loop and associated indexation and you make sure that you pass initial values as vectors (numpy arrays).
Also cleaned up the indexation for t a little and stored the solution in a list.
import numpy as np
def ode_RK4(f, X_0, dt, T):
N_t = int(round(T/dt))
# Initial conditions
usol = [X_0]
u = np.copy(X_0)
tt = np.linspace(0, N_t*dt, N_t + 1)
# RK4
for t in tt[:-1]:
u1 = f(u + 0.5*dt* f(u, t), t + 0.5*dt)
u2 = f(u + 0.5*dt*u1, t + 0.5*dt)
u3 = f(u + dt*u2, t + dt)
u = u + (1/6)*dt*( f(u, t) + 2*u1 + 2*u2 + u3)
usol.append(u)
return usol, tt
def demo_exp():
import matplotlib.pyplot as plt
def f(u,t):
return np.asarray([u])
u, t = ode_RK4(f, np.array([1]) , 0.1, 1.5)
plt.plot(t, u, "b*", t, np.exp(t), "r-")
plt.show()
def demo_osci():
import matplotlib.pyplot as plt
def f(u,t, omega=2):
u, v = u
return np.asarray([v, -omega**2*u])
u, t = ode_RK4(f, np.array([2,0]), 0.1, 2)
u1 = [a[0] for a in u]
for i in [1]:
plt.plot(t, u1, "b*")
plt.show()

The model is this:
enter image description here
From the Langtangen’s book Programming for Computations - Python.

Related

Fitting sinusoidal data in Python

I am trying to fit experimental data
with a function of the form:
A * np.sin(w*t + p) * np.exp(-g*t) + c
However, the fitted curve (the line in the following image) is not accurate:
If I leave out the exponential decay part, it works and I get a sinus function that is not decaying:
The function that I use is from this thread:
def fit_sin(tt, yy):
'''Fit sin to the input time sequence, and return fitting parameters "amp", "omega", "phase", "offset", "freq", "period" and "fitfunc"'''
tt = np.array(tt)
yy = np.array(yy)
ff = np.fft.fftfreq(len(tt), (tt[1]-tt[0])) # assume uniform spacing
Fyy = abs(np.fft.fft(yy))
guess_freq = abs(ff[np.argmax(Fyy[1:])+1]) # excluding the zero frequency "peak", which is related to offset
guess_amp = np.std(yy) * 2.**0.5
guess_offset = np.mean(yy)
guess_phase = 0.
guess_damping = 0.5
guess = np.array([guess_amp, 2.*np.pi*guess_freq, guess_phase, guess_offset, guess_damping])
def sinfunc(t, A, w, p, g, c): return A * np.sin(w*t + p) * np.exp(-g*t) + c
popt, pcov = scipy.optimize.curve_fit(sinfunc, tt, yy, p0=guess)
A, w, p, g, c = popt
f = w/(2.*np.pi)
fitfunc = lambda t: A * np.sin(w*t + p) * np.exp(-g*t) + c
return {"amp": A, "omega": w, "phase": p, "offset": c, "damping": g, "freq": f, "period": 1./f, "fitfunc": fitfunc, "maxcov": np.max(pcov), "rawres": (guess,popt,pcov)}
res = fit_sin(x, y)
x_fit = np.linspace(np.min(x), np.max(x), len(x))
plt.plot(x, y, label='Data', linewidth=line_width)
plt.plot(x_fit, res["fitfunc"](x_fit), label='Fit Curve', linewidth=line_width)
plt.show()
I am not sure if I implemented the code incorrectly or if the function is not able to describe my data correctly. I appreciate your help!
You can load the txt file from here:
GitHub
and manipulate the data like this to compare it with the post:
file = 'A2320_Data.txt'
column = 17
data = np.loadtxt(file, float)
start = 270
end = 36000
time_scale = 3600
x = []
y = []
for i in range(len(data)):
if start < data[i][0] < end:
x.append(data[i][0]/time_scale)
y.append(data[i][column])
x = np.array(x)
y = np.array(y)
plt.plot(x, y, label='Pyro Oscillations', linewidth=line_width)
Your fitted curve will look like this
import numpy as np
import matplotlib.pyplot as plt
import scipy.optimize
def sinfunc(t, A, w, p, g, c): return A * np.sin(w*t + p) * np.exp(-g*t) + c
tt = np.linspace(0, 10, 1000)
yy = sinfunc(tt, -1, 10, 2, 0.3, 2)
plt.plot(tt, yy)
g stretches the envelope horizontally, c moves the center vertically, w determines the oscillation frequency, A stretches the envelope vertically.
So it can't accurately model the data you have.
Also, you will not be able to reliably fit w, to determine the oscilation frequency it is better to try an FFT.
Of course you could adjust the function to look like your data by adding a few more parameters, e.g.
import numpy as np
import matplotlib.pyplot as plt
import scipy.optimize
def sinfunc(t, A, w, p, g, c1, c2, c3): return A * np.sin(w*t + p) * (np.exp(-g*t) - c1) + c2 * np.exp(-g*t) + c3
tt = np.linspace(0, 10, 1000)
yy = sinfunc(tt, -1, 20, 2, 0.5, 1, 1.5, 1)
plt.plot(tt, yy)
But you will still have to give a good guess for the frequency.

Python / Matplotlib - How to compute/plot derivative without hard-coding it?

I am plotting a famous function and its derivative here.
The famous function is the one which arises from the Bernoulli's inequality.
I wonder if there's some way to calculate the derivative without "hard-coding it"
i.e. by just using some library and calling derivative(f) or something like that.
import numpy as np
import matplotlib.pyplot as plt
# a,b - the two ends of our interval
a = -2.2
b = +0.25
n = 10
# function f(t) = (1+t)^n - n*t - 1
def f(t):
'''
s = 1
for i in range(n):
s = s * (1 + 1 * t)
return s - n * t - 1
'''
return np.power(1 + t, n) - n * t - 1
# derivative f'(t) = n*(1+t)^(n-1) - n
def f1(t):
'''
s = 1
for i in range(n-1):
s = s * (1 + 1 * t)
return n * s - n
'''
return n * np.power(1 + t, n-1) - n
t = np.linspace(a, b, 4000)
g = f(t)
g1 = f1(t)
plt.plot(t, g, 'r') # plotting t, g separately
plt.plot(t, g1, 'g') # plotting t, g1 separately
plt.axhline(0, color='k')
plt.axvline(0, color='k')
print("=====================")
print(f(-2))
print(f(-1.5))
print(f(-1))
print(f(-0.5))
print(f(0))
print("=====================")
print(f1(-2))
print(f1(-1.5))
print(f1(-1))
print(f1(-0.5))
print(f1(0))
plt.grid()
plt.show()
Among many others, there are two following methods:
Method 1
You can use derivative from scipy that takes a function f and returns its derivative w.r.t t. So you don't have to define the derivative function f1(t) explicitly.
from scipy.misc import derivative
def f(t):
return np.power(1 + t, n) - n * t - 1
# Rest of the code
t = np.linspace(a, b, 4000)
g = f(t)
plt.plot(t, g, 'r') # plotting t, g separately
plt.plot(t, derivative(f, t, dx=0.001), 'g')
Method 2
You can use gradient function of NumPy which uses central differences and returns the same shape as the input array.
t, dt = np.linspace(a, b, 4000, retstep=True)
g1 = np.gradient(f(t), dt)
plt.plot(t, g1, 'g')
You can use sympy to calculate the derivative symbolically. If you have a nice mathematical expression, this gives a better accuracy than numerical methods.
Sympy has its own plot functions, but they can be cumbersome if you want to combine many elements. In those cases, it can be easier to use lambdify to convert them to numpy functions.
from sympy import Pow, lambdify
from sympy.abc import t, n
f = Pow(1 + t, n) - n * t - 1
f1 = f.diff(t) # result: -n + n*(t + 1)**n/(t + 1)
f_np = lambdify(t, f.subs(n, 10))
f1_np = lambdify(t, f1.subs(n, 10))
import numpy as np
from matplotlib import pyplot as plt
a = -2.2
b = +0.25
x = np.linspace(a, b, 1000)
plt.plot(x, f_np(x), 'r')
plt.plot(x, f1_np(x), 'g')
plt.axhline(0, color='k')
plt.axvline(0, color='k')
plt.show()
PS: Purely staying within sympy, plotting can happen as follows:
from sympy import Pow, plot
from sympy.abc import t, n
a = -2.2
b = +0.25
f = Pow(1 + t, n) - n * t - 1
f1 = f.diff(t)
p1 = plot(f.subs(n, 10), (t, a, b), line_color='r', show=False)
p2 = plot(f1.subs(n, 10), (t, a, b), line_color='g', show=False)
p1.append(p2[0])
p1.show()
Automatic differentiation is a great tool to do this. Check out https://github.com/HIPS/autograd.
import autograd.numpy as np
import matplotlib.pyplot as plt
from autograd import elementwise_grad as egrad
# a,b - the two ends of our interval
a = -2.2
b = +0.25
n = 10
# function f(t) = (1+t)^n - n*t - 1
def f(t):
'''
s = 1
for i in range(n):
s = s * (1 + 1 * t)
return s - n * t - 1
'''
return np.power(1 + t, n) - n * t - 1
# derivative f'(t) = n*(1+t)^(n-1) - n
f1 = egrad(f)
t = np.linspace(a, b, 4000)
g = f(t)
g1 = f1(t)
plt.plot(t, g, 'r') # plotting t, g separately
plt.plot(t, g1, 'g') # plotting t, g1 separately
plt.axhline(0, color='k')
plt.axvline(0, color='k')
print("=====================")
print(f(-2))
print(f(-1.5))
print(f(-1))
print(f(-0.5))
print(f(0))
print("=====================")
print(f1(-2.0))
print(f1(-1.5))
print(f1(-1.0))
print(f1(-0.5))
print(f1(0.0))
Note that I had to change the arguments passed to f1 to be floats, but otherwise it generates the same plot. This is in general how all of the "deep learning" frameworks such as tensorflow, Torch, etc. compute gradients.
This avoids having to analytically compute the derivative yourself and also avoids issues with numerical differentiation.

solve_ivp returning different outcome of odeint?

I am trying to solve a simple ODE so as to understand the new API of Scipy.
I wrote a routine for Runge Kutta of order 4 to write it and confirmed it with the old API odeint and it matched beautifully. But now that I am trying to get around the solve_ivp, it seems that is not working. What am I getting wrong?
import numpy as np
from matplotlib import pyplot as plt
from scipy.integrate import solve_ivp, odeint
import time
freq = np.arange(1, 10000, 100)
def g(q, t):
return -q ** 3 + np.sin(t)
a = 0
b = 10
npoints = 100
h = (b - a) / npoints
t = np.arange(a, b, h)
output1 = np.zeros(t.shape)
x = 0
for i in range(len(t)):
output1[i] = x
k1 = h * g(x, t[i])
k2 = h * g(x + 0.5 * k1, t[i] + 0.5 * h)
k3 = h * g(x + 0.5 * k2, t[i] + 0.5 * h)
k4 = h * g(x + k3, t[i] + 0.5 * h)
x = x + 1 / 6 * (k1 + 2 * k2 + 2 * k3 + k4)
# ---------------Solving using odeint (old API)---------------#
y1_odeint = odeint(g, 0, t)
#---------------Solving using new API-------------#
y2=solve_ivp(g,(a,b),[0],t_eval=t)
# --------------------Representação gráfica--------------------------#
fig = plt.figure()
ax = fig.add_subplot(121)
ax1=fig.add_subplot(122)
ax.plot(t, output1,label="my own")
ax.plot(t,y1_odeint,label="odeint")
ax.plot(y2.t,np.squeeze(y2.y),label="new API")
ax.legend()
ax.set_title("Output")
ax1.plot(t,output1-np.squeeze(y1_odeint),label="|odeint-my own|")
ax1.legend()
plt.tight_layout()
plt.show()
Take another look at the docstring for solve_ivp. It expects the first argument of g to be t. By default, odeint uses the opposite convention. If you have a recent version of scipy, you can tell odeint that the first argument is t by giving it the argument tfirst=True.

Using solve_ivp instead of odeint to solve initial problem value

Currently, I solve the following ODE system of equations using odeint
dx/dt = (-x + u)/2.0
dy/dt = (-y + x)/5.0
initial conditions: x = 0, y = 0
However, I would like to use solve_ivp which seems to be the recommended option for this type of problems, but honestly I don't know how to adapt the code...
Here is the code I'm using with odeint:
import numpy as np
from scipy.integrate import odeint, solve_ivp
import matplotlib.pyplot as plt
def model(z, t, u):
x = z[0]
y = z[1]
dxdt = (-x + u)/2.0
dydt = (-y + x)/5.0
dzdt = [dxdt, dydt]
return dzdt
def main():
# initial condition
z0 = [0, 0]
# number of time points
n = 401
# time points
t = np.linspace(0, 40, n)
# step input
u = np.zeros(n)
# change to 2.0 at time = 5.0
u[51:] = 2.0
# store solution
x = np.empty_like(t)
y = np.empty_like(t)
# record initial conditions
x[0] = z0[0]
y[0] = z0[1]
# solve ODE
for i in range(1, n):
# span for next time step
tspan = [t[i-1], t[i]]
# solve for next step
z = odeint(model, z0, tspan, args=(u[i],))
# store solution for plotting
x[i] = z[1][0]
y[i] = z[1][1]
# next initial condition
z0 = z[1]
# plot results
plt.plot(t,u,'g:',label='u(t)')
plt.plot(t,x,'b-',label='x(t)')
plt.plot(t,y,'r--',label='y(t)')
plt.ylabel('values')
plt.xlabel('time')
plt.legend(loc='best')
plt.show()
main()
It's important that solve_ivp expects f(t, z) as right-hand side of the ODE. If you don't want to change your ode function and also want to pass your parameter u, I recommend to define a wrapper function:
def model(z, t, u):
x = z[0]
y = z[1]
dxdt = (-x + u)/2.0
dydt = (-y + x)/5.0
dzdt = [dxdt, dydt]
return dzdt
def odefun(t, z):
if t < 5:
return model(z, t, 0)
else:
return model(z, t, 2)
Now it's easy to call solve_ivp:
def main():
# initial condition
z0 = [0, 0]
# number of time points
n = 401
# time points
t = np.linspace(0, 40, n)
# step input
u = np.zeros(n)
# change to 2.0 at time = 5.0
u[51:] = 2.0
res = solve_ivp(fun=odefun, t_span=[0, 40], y0=z0, t_eval=t)
x = res.y[0, :]
y = res.y[1, :]
# plot results
plt.plot(t,u,'g:',label='u(t)')
plt.plot(t,x,'b-',label='x(t)')
plt.plot(t,y,'r--',label='y(t)')
plt.ylabel('values')
plt.xlabel('time')
plt.legend(loc='best')
plt.show()
main()
Note that without passing t_eval=t, the solver will automatically choose the time points inside tspan at which the solution will be stored.

Lotka-Volterra equations(predator prey) using Runge-Kutta in Python

I am trying to write a program using the Lotka-Volterra equations for predator-prey interactions. Solve Using ODE's:
dx/dt = a*x - B*x*y
dy/dt = g*x*y - s*y
Using 4th order Runge-Kutta method
I need to plot a graph showing both x and y as a function of time from t = 0 to t=30.
a = alpha = 1
b = beta = 0.5
g = gamma = 0.5
s = sigma = 2
initial conditions x = y = 2
Here is my code so far but not display anything on the graph. Some help would be nice.
#!/usr/bin/env python
from __future__ import division, print_function
import matplotlib.pyplot as plt
import numpy as np
def rk4(f, r, t, h):
""" Runge-Kutta 4 method """
k1 = h*f(r, t)
k2 = h*f(r+0.5*k1, t+0.5*h)
k3 = h*f(r+0.5*k2, t+0.5*h)
k4 = h*f(r+k3, t+h)
return (k1 + 2*k2 + 2*k3 + k4)/6
def f(r, t):
alpha = 1.0
beta = 0.5
gamma = 0.5
sigma = 2.0
x, y = r[2], r[2]
fxd = x*(alpha - beta*y)
fyd = -y*(gamma - sigma*x)
return np.array([fxd, fyd], float)
tpoints = np.linspace(0, 30, 0.1)
xpoints = []
ypoints = []
r = np.array([2, 2], float)
for t in tpoints:
xpoints += [r[2]]
ypoints += [r[2]]
r += rk4(f, r, t, h)
plt.plot(tpoints, xpoints)
plt.plot(tpoints, ypoints)
plt.xlabel("Time")
plt.ylabel("Population")
plt.title("Lotka-Volterra Model")
plt.savefig("Lotka_Volterra.png")
plt.show()
A simple check of your variable tpoints after running your script shows it's empty:
In [7]: run test.py
In [8]: tpoints
Out[8]: array([], dtype=float64)
This is because you're using np.linspace incorrectly. The third argument is the number of elements desired in the output. You've requested an array of length 0.1.
Take a look at np.linspace's docstring. You won't have a problem figuring out how to adjust your code.
1) define 'h' variable.
2) use
tpoints = np.arange(30) #array([0, 1, 2, ..., 30])
not
np.linspace()
and don't forget to set time step size equal to h:
h=0.1
tpoints = np.arange(0, 30, h)
3) be careful with indexes:
def f(r,t):
...
x, y=r[0], r[1]
...
for t in tpoints:
xpoints += [r[0]]
ypoints += [r[1]]
...
and better use .append(x):
for t in tpoints:
xpoints.append(r[0])
ypoints.append(r[1])
...
Here's tested code for python 3.7 (I've set h=0.001 for more presize)
import matplotlib.pyplot as plt
import numpy as np
def rk4(r, t, h): #edited; no need for input f
""" Runge-Kutta 4 method """
k1 = h*f(r, t)
k2 = h*f(r+0.5*k1, t+0.5*h)
k3 = h*f(r+0.5*k2, t+0.5*h)
k4 = h*f(r+k3, t+h)
return (k1 + 2*k2 + 2*k3 + k4)/6
def f(r, t):
alpha = 1.0
beta = 0.5
gamma = 0.5
sigma = 2.0
x, y = r[0], r[1]
fxd = x*(alpha - beta*y)
fyd = -y*(gamma - sigma*x)
return np.array([fxd, fyd], float)
h=0.001 #edited
tpoints = np.arange(0, 30, h) #edited
xpoints, ypoints = [], []
r = np.array([2, 2], float)
for t in tpoints:
xpoints.append(r[0]) #edited
ypoints.append(r[1]) #edited
r += rk4(r, t, h) #edited; no need for input f
plt.plot(tpoints, xpoints)
plt.plot(tpoints, ypoints)
plt.xlabel("Time")
plt.ylabel("Population")
plt.title("Lotka-Volterra Model")
plt.savefig("Lotka_Volterra.png")
plt.show()
You can also try to plot "cycles":
plt.xlabel("Prey")
plt.ylabel("Predator")
plt.plot(xpoints, ypoints)
plt.show()
https://i.stack.imgur.com/NB9lc.png

Categories

Resources