How to include known parameter that changes over time in solve_bvp - python

I am trying to use scipy's solve_bvp in python to solve differential equations that depend on a known parameter that changes over time. I have this parameter saved in a numpy array. However, when I try to use this array in the derivatives function, I get the following error ValueError: operands could not be broadcast together with shapes (10,) (11,).
Below is a simplified version of my code. I want the variable d2 to take certain values at different times according to an array, d2_set_values. The differential equations for some of the 12 variables then depend on d2. I hope it's clear from this code what I'm trying to achieve.
import numpy as np
from scipy.integrate import solve_bvp
t = np.linspace(0, 10, 11)
# Known parameter that changes over time
d2_set_values = np.zeros(t.size)
d2_set_values[:4] = 0.1
d2_set_values[4:8] = 0.2
d2_set_values[8:] = 0.1
# Initialise y vector
y = np.zeros((12, t.size))
# ODEs
def fun(x, y):
S1, I1, R1, S2, I2, R2, lamS1, lamI1, lamR1, lamS2, lamI2, lamR2 = y
d1 = 0.5*(I1 + 0.1*I2)*(lamS1 - lamI1)
d2 = d2_set_values
dS1dt = -0.5*S1*(1-d1)*(I1 + 0.1*I2)
dS2dt = -0.5*S2*(1-d2)*(I2 + 0.1*I1)
dI1dt = 0.5*S1*(1-d1)*(I1 + 0.1*I2) - 0.2*I1
dI2dt = 0.5*S2*(1-d2)*(I2 + 0.1*I1) - 0.2*I2
dR1dt = 0.2*I1
dR2dt = 0.2*I2
dlamS1dt = 0.5*(1-d1)*S1*lamS1
dlamS2dt = 0.5*(1-d2)*S2*lamS2
dlamI1dt = 0.5*(1-d1)*I1*lamI1
dlamI2dt = 0.5*(1-d2)*I2*lamI2
dlamR1dt = lamR1
dlamR2dt = lamR2
return np.vstack((dS1dt, dI1dt, dR1dt, dS2dt, dI2dt, dR2dt, dlamS1dt, dlamI1dt, dlamR1dt, dlamS2dt, dlamI2dt, dlamR2dt))
# Boundary conditions
def bc(ya, yb):
return np.array([ya[0]-0.99, ya[1]-0.01, ya[2]-0., ya[3]-1.0, ya[4]-0., ya[5]-0.,
yb[6]-0., yb[7]-1., yb[8]-0., yb[9]-0, yb[10]-0, yb[11]-0])
# Run the solver
sol = solve_bvp(fun, bc, t, y)
I have even tried reducing the size of d2_set_values by one, but that doesn't solve the issue.
Any help I can get would be much appreciated!

Related

Step by step time integrators in Python

I am solving a first order initial value problem of the form:
dy/dt = f(t,y(t)), y(0)=y0
I would like to obtain y(n+1) from a given numerical scheme, like for example :
using explicit Euler's scheme, we have
y(i) = y(i-1) + f(t-1,y(t-1)) * dt
Example code:
# Test code to evaluate different time integrators for the following equation:
# y' = (1/2) y + 2sin(3t) ; y(0) = -24/37
def dy_dt(y,t):
func = (1/2)*y + 2*np.sin(3*t)
return func
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import odeint
tmin = 0
tmax = 50
delt= 1e-2
t = np.arange(tmin,tmax,delt)
total_steps = len(t)
y_explicit=np.zeros(total_steps)
#y_ODEint=np.zeros(total_steps)
y0 = -24/37
y_explicit[0]=y0
#y_ODEint[0]=y0
# exact solution
y_exact = -(24/37)*np.cos(3*t)- (4/37)*np.sin(3*t) + (y0+24/37)*np.exp(0.5*t)
# Solution using ODEint Python
y_ODEint = odeint(dy_dt,y0,t)
for i in range(1,total_steps):
# Explicit scheme
y_explicit[i] = y_explicit[i-1] + (dy_dt(y_explicit[i-1],t[i-1]))*delt
# Update using ODEint
# y_ODEint[i] = odeint(dy_dt,y_ODEint[i-1],[0,delt])[-1]
plt.figure()
plt.plot(t,y_exact)
plt.plot(t,y_explicit)
# plt.plot(t,y_ODEint)
The current issue I am having is that the functions like ODEint in python provide the entire y(t) as opposed to y(i). like in the line "y_ODEint = odeint(dy_dt,y0,t)"
See in the code, how I have coded the explicit scheme, which gives y(i) for every time step. I want to do the same with ODEint, i tried something but didn't work (all commented lines)
I want to obtain y(i) rather than all ys using ODEint. Is that possible ?
Your system is time variant so you cannot translate the time step from (t[i-1], t[i]) to (0, delt).
The step by step integration will is unstable for your differential equation though
Here is what I get
def dy_dt(y,t):
func = (1/2)*y + 2*np.sin(3*t)
return func
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import odeint
tmin = 0
tmax = 40
delt= 1e-2
t = np.arange(tmin,tmax,delt)
total_steps = len(t)
y_explicit=np.zeros(total_steps)
#y_ODEint=np.zeros(total_steps)
y0 = -24/37
y_explicit[0]=y0
# exact solution
y_exact = -(24/37)*np.cos(3*t)- (4/37)*np.sin(3*t) + (y0+24/37)*np.exp(0.5*t)
# Solution using ODEint Python
y_ODEint = odeint(dy_dt,y0,t)
# To be filled step by step
y_ODEint_2 = np.zeros_like(y_ODEint)
y_ODEint_2[0] = y0
for i in range(1,len(y_ODEint_2)):
# update your code to run with the correct time interval
y_ODEint_2[i] = odeint(dy_dt,y_ODEint_2[i-1],[tmin+(i-1)*delt,tmin+i*delt])[-1]
plt.figure()
plt.plot(t,y_ODEint, label='single run')
plt.plot(t,y_ODEint_2, label='step-by-step')
plt.plot(t, y_exact, label='exact')
plt.legend()
plt.ylim([-20, 20])
plt.grid()
Important to notice that both methods are unstable, but the step-by-step explodes slightly before than the single odeint call.
With, for example dy_dt(y,t): -(1/2)*y + 2*np.sin(3*t) the integration becomes more stable, for instance, there is no noticeable error after integrating from zero to 200.

minimize a function that contains an array parameter

I am trying to minimize a function that contains an array parameter.
I tried to simplify my problem in the following example. The function Func contains the parameter c2 = linspace(-1,1,10). The optimization is about thet1, phai1, thet2, and phai2 so that they are bounded by the interval (0,2*pi).
import numpy as np
import scipy.optimize as spo
a1 = 0
c1 = 0
cc2 = np.linspace(-1,1,10)
alpha1 = 1+a1/3
def Func(ang):
thet1 = ang[0]
phai1 = ang[1]
thet2 = ang[2]
phai2 = ang[3]
RhoABC = np.array([[[1-a1,0,thet1,0,0,0,0,c1],[0,alpha1,0,0,phai2,0,c2,0],[0,0,alpha1,0,0,c2,thet2,0],[0,0,0,alpha1,c2,0,0,0],[0,phai1,0,c2,alpha1,0,0,0],[0,0,c2,0,0,alpha1,0,thet2],[0,c2,0,0,0,0,alpha1,0],[c1,0,0,phai1,0,0,0,1-a1]] for c2 in cc2])
w, v = np.linalg.eig(RhoABC)
return w[1]
angs0 = [.4*np.pi,1.9*np.pi,1.2*np.pi,.7*np.pi]
bnd = (0*np.pi,2*np.pi)
bnds = (bnd,bnd,bnd,bnd)
result = spo.minimize(Func,angs0,bounds=bnds)
the output is:
ValueError: too many axes: 2 (effrank=2), expected rank=1
I just want to plot the minimized value (fun) as a function of c2. Any suggestions, please?
The function to be minimized should return a float (see doc). However, Func returns a 1d array instead of a float. That is because RhoABC is 3d, so w is 2d, which makes w[1] a 1d array.

Fitting a quadratic function in python without numpy polyfit

I am trying to fit a quadratic function to some data, and I'm trying to do this without using numpy's polyfit function.
Mathematically I tried to follow this website https://neutrium.net/mathematics/least-squares-fitting-of-a-polynomial/ but somehow I don't think that I'm doing it right. If anyone could assist me that would be great, or If you could suggest another way to do it that would also be awesome.
What I've tried so far:
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
ones = np.ones(3)
A = np.array( ((0,1),(1,1),(2,1)))
xfeature = A.T[0]
squaredfeature = A.T[0] ** 2
b = np.array( (1,2,0), ndmin=2 ).T
b = b.reshape(3)
features = np.concatenate((np.vstack(ones), np.vstack(xfeature), np.vstack(squaredfeature)), axis = 1)
featuresc = features.copy()
print(features)
m_det = np.linalg.det(features)
print(m_det)
determinants = []
for i in range(3):
featuresc.T[i] = b
print(featuresc)
det = np.linalg.det(featuresc)
determinants.append(det)
print(det)
featuresc = features.copy()
determinants = determinants / m_det
print(determinants)
plt.scatter(A.T[0],b)
u = np.linspace(0,3,100)
plt.plot(u, u**2*determinants[2] + u*determinants[1] + determinants[0] )
p2 = np.polyfit(A.T[0],b,2)
plt.plot(u, np.polyval(p2,u), 'b--')
plt.show()
As you can see my curve doesn't compare well to nnumpy's polyfit curve.
Update:
I went through my code and removed all the stupid mistakes and now it works, when I try to fit it over 3 points, but I have no idea how to fit over more than three points.
This is the new code:
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
ones = np.ones(3)
A = np.array( ((0,1),(1,1),(2,1)))
xfeature = A.T[0]
squaredfeature = A.T[0] ** 2
b = np.array( (1,2,0), ndmin=2 ).T
b = b.reshape(3)
features = np.concatenate((np.vstack(ones), np.vstack(xfeature), np.vstack(squaredfeature)), axis = 1)
featuresc = features.copy()
print(features)
m_det = np.linalg.det(features)
print(m_det)
determinants = []
for i in range(3):
featuresc.T[i] = b
print(featuresc)
det = np.linalg.det(featuresc)
determinants.append(det)
print(det)
featuresc = features.copy()
determinants = determinants / m_det
print(determinants)
plt.scatter(A.T[0],b)
u = np.linspace(0,3,100)
plt.plot(u, u**2*determinants[2] + u*determinants[1] + determinants[0] )
p2 = np.polyfit(A.T[0],b,2)
plt.plot(u, np.polyval(p2,u), 'r--')
plt.show()
Instead using Cramer's Rule, actually solve the system using least squares. Remember that Cramer's Rule will only work if the total number of points you have equals the desired order of polynomial plus 1.
If you don't have this, then Cramer's Rule will not work as you're trying to find an exact solution to the problem. If you have more points, the method is unsuitable as we will create an overdetermined system of equations.
To adapt this to more points, numpy.linalg.lstsq would be a better fit as it solves the solution to the Ax = b by computing the vector x that minimizes the Euclidean norm using the matrix A. Therefore, remove the y values from the last column of the features matrix and solve for the coefficients and use numpy.linalg.lstsq to solve for the coefficients:
import numpy as np
import matplotlib.pyplot as plt
ones = np.ones(4)
xfeature = np.asarray([0,1,2,3])
squaredfeature = xfeature ** 2
b = np.asarray([1,2,0,3])
features = np.concatenate((np.vstack(ones),np.vstack(xfeature),np.vstack(squaredfeature)), axis = 1) # Change - remove the y values
determinants = np.linalg.lstsq(features, b)[0] # Change - use least squares
plt.scatter(xfeature,b)
u = np.linspace(0,3,100)
plt.plot(u, u**2*determinants[2] + u*determinants[1] + determinants[0] )
plt.show()
I get this plot now, which matches what the dashed curve is in your graph, also matching what numpy.polyfit gives you:

odeint for an differential system

I have a problem with odeint. I have to solve an first order differential system and then a second order system but I am a little confused with the first order one. Can you explain what I have marked as wrong? Thank you :)
import scipy.integrate as integrate
import numpy as np
def fun(t,y):
ys = np.array([y[1], (1-y[0]**2)*y[1]-y[0]])
return(ys)
N = 3
x0 = np.array([2.00861986087484313650940188,0])
t0tf = [0, 17.0652165601579625588917206249]
T=([0 for i in range (N+1)])
T[0]= t0tf[0]
Pas = (t0tf[1]-t0tf[0])/N
for i in range (1,N+1):
T[i]= t0tf[0] + i*Pas
X = integrate.odeint(fun, x0,T,Dfun=None, col_deriv=0,full_output=True)
T = np.array(T)
T = T.reshape(N+1,1)
S = np.append(X,T,axis=1)
print(S)
The returned error is:
ys = np.array([y[1], (1-y[0]**2)*y[1]-y[0]])
TypeError: 'float' object is not subscriptable
You need to reverse the order of the arguments to your derivative function - it should be f(y, t), not f(t, y). This is the opposite order to that used by the scipy.integrate.ode class.
Also, the concatenation S = np.append(X,T,axis=1) will fail because X is a tuple containing your integrals and a dict. Use S = np.append(X[0],T,axis=1) instead.

Apply non-linear regression for multi dimension data samples in Python

I have installed Numpy and SciPy, but I'm not quite understand their documentation about polyfit.
For exmpale, Here's my three data samples:
[-0.042780748663101636, -0.0040771571786609945, -0.00506567946276074]
[0.042780748663101636, -0.0044771571786609945, -0.10506567946276074]
[0.542780748663101636, -0.005771571786609945, 0.30506567946276074]
[-0.342780748663101636, -0.0304077157178660995, 0.90506567946276074]
The first two columns are sample features, the third column is output, My target is to get a function that could take two parameters(first two columns) and return its prediction(the output).
Any simple example ?
====================== EDIT ======================
Note that, I need to fit something like a curve, not only straight lines. The polynomial should be something like this ( n = 3):
a*x1^3 + b*x2^2 + c*x3 + d = y
Not:
a*x1 + b*x2 + c*x3 + d = y
x1, x2, x3 are features of one sample, y is the output
Try something like
edit: added an example function that used results of linear regression to estimate output.
import numpy as np
data =np.array(
[[-0.042780748663101636, -0.0040771571786609945, -0.00506567946276074],
[0.042780748663101636, -0.0044771571786609945, -0.10506567946276074],
[0.542780748663101636, -0.005771571786609945, 0.30506567946276074],
[-0.342780748663101636, -0.0304077157178660995, 0.90506567946276074]])
coefficient = data[:,0:2]
dependent = data[:,-1]
x,residuals,rank,s = np.linalg.lstsq(coefficient,dependent)
def f(x,u,v):
return u*x[0] + v*x[1]
for datum in data:
print f(x,*datum[0:2])
Which gives
>>> x
array([ 0.16991146, -30.18923739])
>>> residuals
array([ 0.07941146])
>>> rank
2
>>> s
array([ 0.64490113, 0.02944663])
and the function created with your coefficients gave
0.115817326583
0.142430900298
0.266464019171
0.859743371665
More info can be found at the documentation I posted as a comment.
edit 2: fitting your data to an arbitrary model.
edit 3: made my model a function for ease of understanding.
edit 4: made code more easily read/ changed model to a quadratic fit, but you should be able to read this code and know how to make it minimize any residual you want now.
contrived example:
import numpy as np
from scipy.optimize import leastsq
data =np.array(
[[-0.042780748663101636, -0.0040771571786609945, -0.00506567946276074],
[0.042780748663101636, -0.0044771571786609945, -0.10506567946276074],
[0.542780748663101636, -0.005771571786609945, 0.30506567946276074],
[-0.342780748663101636, -0.0304077157178660995, 0.90506567946276074]])
coefficient = data[:,0:2]
dependent = data[:,-1]
def model(p,x):
a,b,c = p
u = x[:,0]
v = x[:,1]
return (a*u**2 + b*v + c)
def residuals(p, y, x):
a,b,c = p
err = y - model(p,x)
return err
p0 = np.array([2,3,4]) #some initial guess
p = leastsq(residuals, p0, args=(dependent, coefficient))[0]
def f(p,x):
return p[0]*x[0] + p[1]*x[1] + p[2]
for x in coefficient:
print f(p,x)
gives
-0.108798280153
-0.00470479385807
0.570237823475
0.413016072653

Categories

Resources