Solving ODE with solve_ivp gets incredibly slow or freezes completely - python

I use solve_ivp to solve an ODE:
def test_ode(t, y):
dydt = C - y + (y ** 8 / (1 + y ** 8))
return dydt
steady_state = []
for C in np.linspace(0, 1, 1001):
sol = solve_ivp(test_ode, [0, 1e06], [0], method='BDF')
steady_state.append(sol.y[0][-1])
This gives me a RuntimeWarning:
ETA: --:--:--/anaconda3/lib/python3.6/site-packages/scipy/integrate/_ivp/bdf.py:418:
RuntimeWarning: divide by zero encountered in power
factors = error_norms ** (-1 / np.arange(order, order + 3))
But even worse, the run basically freezes (or at least gets incredibly slow). Replacing the initial value [0] by [1e-08] does not solve the problem. How can I fix this?

You can use NumbaLSODA: https://github.com/Nicholaswogan/NumbaLSODA . Its like solve_ivp, but all the code can be compiled. So it is very speedy:
from NumbaLSODA import lsoda_sig, lsoda
import numpy as np
import numba as nb
import time
#nb.cfunc(lsoda_sig)
def test_ode(t, y_, dydt, p):
y = y_[0]
C = p[0]
dydt[0] = C - y + (y ** 8 / (1 + y ** 8))
funcptr = test_ode.address
#nb.njit()
def main():
steady_state = np.empty((1001,),np.float64)
CC = np.linspace(0, 1, 1001)
y0 = np.array([0.0])
for i in range(len(CC)):
data = np.array([CC[i]],np.float64)
t_eval = np.array([0.0, 1.0e6])
sol, success = lsoda(funcptr, y0, t_eval, data = data)
steady_state[i] = sol[-1,0]
return steady_state
main()
start = time.time()
steady_state = main()
end = time.time()
print(end-start)
result is 0.013 seconds

Related

Multiprocess not recognizing defined function

I'm trying to fit a simple gaussian model to some data using the markov-chain monte carlo package EMCEE while utilizing parallel processing. I am using multiprocess instead of multiprocessing as I am working in Jupyter notebook where multiprocessing often runs into problems for some reason. Currently, the code is saying that one of my functions is not defined even though it was defined earlier. Not sure why it isn't being carried over once the parallelization starts. Specifically the error occurs on the very last line saying that "log_prior" is not defined. Any thoughts on how to fix this issue would be much appreciated.
import numpy as np
import emcee
from scipy.optimize import minimize
import matplotlib.pyplot as plt
np.random.seed(123)
# Choose the "true" parameters.
mu_true = 5
sig_true = 0.5
f_true = 0.534
# Generate some synthetic data from the model.
N = 500
x = np.sort(10 * np.random.rand(N))
yerr = 0.03 + 0.05 * np.random.rand(N)
y = np.exp(-np.power(x - mu_true, 2.) / (2 * np.power(sig_true, 2.)))
y+= 0.5*np.abs(f_true * y) * np.random.randn(N)
y += yerr * np.random.randn(N)
def log_likelihood(theta, x, y, yerr):
mu, sig, log_f = theta
model = np.exp(-np.power(x - mu, 2.) / (2 * np.power(sig, 2.)))
sigma2 = yerr**2 + model**2 * np.exp(2 * log_f)
return -0.5 * np.sum((y - model) ** 2 / sigma2 + np.log(sigma2))
np.random.seed(42)
nll = lambda *args: -log_likelihood(*args)
initial = np.array([mu_true, sig_true, np.log(f_true)]) + 0.1 * np.random.randn(3)
soln = minimize(nll, initial, args=(x, y, yerr))
mu_ml, sig_ml, log_f_ml = soln.x
def log_prior(theta):
mu, sig, log_f = theta
if 2 < mu < 10 and 0.0 < sig < 5 and -10.0 < log_f < 1.0:
return 0.0
return -np.inf
def log_probability(theta, x, y, yerr):
lp = log_prior(theta)
if not np.isfinite(lp):
return -np.inf
return lp + log_likelihood(theta, x, y, yerr)
import time
import multiprocess
import numpy as np
from multiprocess import Pool
pos = soln.x + 0.2 * np.random.randn(32, 3)
nwalkers, ndim = pos.shape
nsteps = 10000
if __name__ == "__main__":
with Pool() as pool:
sampler = emcee.EnsembleSampler(
nwalkers, ndim, log_probability, args=(x, y, yerr), pool=pool)
sampler.run_mcmc(pos, 10000, progress=True);

Ode integrator Python TypeError 'float' object is not subscriptable

I am desperately trying to use the scipy ODE integrator, but I keep getting the following error :
Y[0] = (1/I3) * T_z(INP[0], INP[1], INP[2], INP[3], INP[4])
TypeError: 'float' object is not subscriptable
My code is the following :
import scipy.integrate as spi
import numpy as np
import pylab as pl
from time import time
#Constants
I3 = 0.00396
lamb = 1
L = 5*10**-1
mu = 1
m = 0.1
Cz = 0.5
rho = 1.2
S = 0.03*0.4
K_z = 1/2*rho*S*Cz
g = 9.81
#Initial conditions
omega0 = 10*2*np.pi
V0 = 25
theta0 =np.pi/2
phi0 = 0
psi0 = -np.pi/9
X0 = 0
Y0 = 0
Z0 = 1.8
#for integration
t_start = 0.0
t_end = 5
t_step = 0.1
t_range = np.arange(t_start, t_end+t_step, t_step)
INPUT = omega0, V0, theta0, phi0, psi0, X0, Y0, Z0 #initial conditions
def diff_eqs(INP,t):
def M(v_G, w_z):
return L*K_z*(v_G**2 + v_G*L*w_z*np.sin(w_z*t_step)+(L*w_z)**2)
def F_x(w_z, v_G, theta, phi, psi):
return K_z*(v_G**2+(L*w_z)**2)*np.sin(theta)*np.sin(phi) + lamb*v_G*(np.cos(psi)*np.cos(phi) - np.cos(theta)*np.sin(phi)*np.sin(psi))
def F_y(w_z, v_G, theta, phi, psi):
return -K_z*(v_G**2+(L*w_z)**2)*np.sin(theta)*np.cos(phi) + lamb*v_G*(np.cos(psi)*np.sin(phi) + np.cos(theta)*np.cos(phi)*np.sin(psi))
def F_z(w_z, v_G, theta, phi, psi):
return -K_z*(v_G**2+(L*w_z)**2)*np.cos(theta) + lamb*v_G*np.sin(theta)*np.sin(psi) - m*g
def T_x(w_z, v_G, theta, phi, psi):
return M(v_G, w_z)*(-np.sin(w_z*t_step)*(np.cos(psi)*np.cos(phi) - np.cos(theta)*np.sin(phi)*np.sin(psi)) \
+ np.cos(w_z*t_step)*(-np.sin(psi)*np.cos(phi) - np.cos(theta)*np.sin(phi)*np.cos(psi))) \
- mu * w_z * (np.sin(theta)*np.sin(phi))
def T_y(w_z, v_G, theta, phi, psi):
return M(v_G, w_z)*(-np.sin(w_z*t_step)*(np.cos(psi)*np.sin(phi) + np.cos(theta)*np.cos(phi)*np.sin(psi)) \
+ np.cos(w_z*t_step)*(-np.sin(psi)*np.sin(phi) - np.cos(theta)*np.cos(phi)*np.cos(psi)))
- mu * w_z * (np.sin(theta)*np.cos(phi))
def T_z(w_z, v_G, theta, phi, psi):
return M(v_G, w_z)*(-np.sin(w_z*t_step)*np.sin(theta)*np.sin(psi) + np.cos(w_z*t_step)*np.sin(theta)*np.cos(psi)) \
- mu * w_z * np.cos(theta)
Y = np.zeros(8)
Y[0] = (1/I3) * T_z(INP[0], INP[1], INP[2], INP[3], INP[4])
Y[1] = -(lamb/m)*F_x(INP[0], INP[1], INP[2], INP[3], INP[4])
Y[2] = (1/(I3*INP[0]))*(-T_y(INP[0], INP[1], INP[2], INP[3], INP[4])*np.cos(INP[4]) - T_x(INP[0], INP[1], INP[2], INP[3], INP[4])*np.sin(INP[4]))
Y[3] = (1/(I3*INP[0]*np.cos(INP[3]))) * (-T_y(INP[0], INP[1], INP[2], INP[3], INP[4])*np.sin(INP[4]) + T_x(INP[0], INP[1], INP[2], INP[3], INP[4])*np.cos(INP[4]))
Y[4] = -(1/(m*INP[1]))*F_y(INP[0], INP[1], INP[2], INP[3], INP[4])
Y[5] = INP[1]*(-np.cos(INP[4])*np.cos(INP[3]) + np.sin(INP[4])*np.sin(INP[3])*np.cos(INP[2]))
Y[6] = INP[1]*(-np.cos(INP[4])*np.sin(INP[3]) - np.sin(INP[4])*np.cos(INP[3])*np.cos(INP[2]))
Y[7] = INP[1]*(-np.sin(INP[4])*np.sin(INP[2]))
return Y
ode = spi.ode(diff_eqs)
# BDF method suited to stiff systems of ODEs
ode.set_integrator('vode',nsteps=500,method='bdf')
ode.set_initial_value(INPUT,t_start)
ts = []
ys = []
while ode.successful() and ode.t < t_end:
ode.integrate(ode.t + t_step)
ts.append(ode.t)
ys.append(ode.y)
t = np.vstack(ts)
I have a set of 8 differentials equations I want to numerically solve. Therefore I have 8 initial values stored in "INPUT". But when I use this variable in ode.set_initial_value(INPUT,t_start), it keeps repeating that the variable is a float ! It has been bugging me for hours and the answer is maybe obvious but I can't see where I made a mistake. And I don't think the equations themselves, even though they are pretty messy, are involved here.
Thanks in advance for your help.
Your argument order is the one required in the ODE function for odeint. For ode you need the order (t, INP).
Try to use the more recent solve_ivp interface, it has about the same functionality of the ode class and about the same compact call structure as odeint.

Using solve_ivp instead of odeint to solve initial problem value

Currently, I solve the following ODE system of equations using odeint
dx/dt = (-x + u)/2.0
dy/dt = (-y + x)/5.0
initial conditions: x = 0, y = 0
However, I would like to use solve_ivp which seems to be the recommended option for this type of problems, but honestly I don't know how to adapt the code...
Here is the code I'm using with odeint:
import numpy as np
from scipy.integrate import odeint, solve_ivp
import matplotlib.pyplot as plt
def model(z, t, u):
x = z[0]
y = z[1]
dxdt = (-x + u)/2.0
dydt = (-y + x)/5.0
dzdt = [dxdt, dydt]
return dzdt
def main():
# initial condition
z0 = [0, 0]
# number of time points
n = 401
# time points
t = np.linspace(0, 40, n)
# step input
u = np.zeros(n)
# change to 2.0 at time = 5.0
u[51:] = 2.0
# store solution
x = np.empty_like(t)
y = np.empty_like(t)
# record initial conditions
x[0] = z0[0]
y[0] = z0[1]
# solve ODE
for i in range(1, n):
# span for next time step
tspan = [t[i-1], t[i]]
# solve for next step
z = odeint(model, z0, tspan, args=(u[i],))
# store solution for plotting
x[i] = z[1][0]
y[i] = z[1][1]
# next initial condition
z0 = z[1]
# plot results
plt.plot(t,u,'g:',label='u(t)')
plt.plot(t,x,'b-',label='x(t)')
plt.plot(t,y,'r--',label='y(t)')
plt.ylabel('values')
plt.xlabel('time')
plt.legend(loc='best')
plt.show()
main()
It's important that solve_ivp expects f(t, z) as right-hand side of the ODE. If you don't want to change your ode function and also want to pass your parameter u, I recommend to define a wrapper function:
def model(z, t, u):
x = z[0]
y = z[1]
dxdt = (-x + u)/2.0
dydt = (-y + x)/5.0
dzdt = [dxdt, dydt]
return dzdt
def odefun(t, z):
if t < 5:
return model(z, t, 0)
else:
return model(z, t, 2)
Now it's easy to call solve_ivp:
def main():
# initial condition
z0 = [0, 0]
# number of time points
n = 401
# time points
t = np.linspace(0, 40, n)
# step input
u = np.zeros(n)
# change to 2.0 at time = 5.0
u[51:] = 2.0
res = solve_ivp(fun=odefun, t_span=[0, 40], y0=z0, t_eval=t)
x = res.y[0, :]
y = res.y[1, :]
# plot results
plt.plot(t,u,'g:',label='u(t)')
plt.plot(t,x,'b-',label='x(t)')
plt.plot(t,y,'r--',label='y(t)')
plt.ylabel('values')
plt.xlabel('time')
plt.legend(loc='best')
plt.show()
main()
Note that without passing t_eval=t, the solver will automatically choose the time points inside tspan at which the solution will be stored.

How to change Function by without Changing its Parameters

I am new to python and in learning stages. I wanted to implement Particle Swarm Optimization(PSO) algorithm which I did by taking help from on-line materials and python tutorials. In PSO, a simple calculus problem is inferred i-e 100 * ((y - (x2))2) + ((1 - (x2))2). This problem is defined in a fitness function.
def fitness(x, y):
return 100 * ((y - (x**2))**2) + ((1 - (x**2))**2)
Now, I want to replace this simple calculus problem by simple first order Ordinary Differential Equation(ODE) by without changing existing function parameters (x,y) and want to return the value of dy_dx,y0 and t for further process.
# Define a function which calculates the derivative
def dy_dx(y, x):
return x - y
t = np.linspace(0,5,100)
y0 = 1.0 # the initial condition
ys = odeint(dy_dx, y0, t)`
In python odeint function is used for ODE which requires three essential parameters i-e func/model, y0( Initial condition on y (can be a vector) and t(A sequence of time points for which to solve for y) Example of odeint parameters.
I don't want to change its parameters because it will be difficult for me to make changes in algorithm.
For simplicity I pasted the full code below and my question is open to anyone if wants to modify the code with further parameters in General Best, Personal Best and r[i].
import numpy as np
from scipy.integrate import odeint
import random as rand
from scipy.integrate import odeint
from numpy import array
import matplotlib.pyplot as plt
def main():
#Variables
n = 40
num_variables = 2
a = np.empty((num_variables, n))
v = np.empty((num_variables, n))
Pbest = np.empty((num_variables, n))
Gbest = np.empty((1, 2))
r = np.empty((n))
for i in range(0, num_variables):
for j in range(0, n):
Pbest[i][j] = rand.randint(-20, 20)
a[i][j] = Pbest[i][j]
v[i][j] = 0
for i in range(0, n):
r[i] = fitness(a[0][i], a[1][i])
#Sort elements of Pbest
Order(Pbest, r, n)
Gbest[0][0] = Pbest[0][0]
Gbest[0][1] = Pbest[1][0]
generation = 0
plt.ion()
fig = plt.figure()
ax = fig.add_subplot(111)
ax.grid(True)
while(generation < 1000):
for i in range(n):
#Get Personal Best
if(fitness(a[0][i], a[1][i]) < fitness(Pbest[0][i], Pbest[1][i])):
Pbest[0][i] = a[0][i]
Pbest[1][i] = a[1][i]
#Get General Best
if(fitness(Pbest[0][i], Pbest[1][i]) < fitness(Gbest[0][0], Gbest[0][1])):
Gbest[0][0] = Pbest[0][i]
Gbest[0][1] = Pbest[1][i]
#Calculate Velocity
Vector_Velocidad(n, a, Pbest, Gbest, v)
generation = generation + 1
print 'Generacion: ' + str(generation) + ' - - - Gbest: ' +str(Gbest)
line1 = ax.plot(a[0], a[1], 'r+')
line2 = ax.plot(Gbest[0][0], Gbest[0][1], 'g*')
ax.set_xlim(-10, 10)
ax.set_ylim(-10, 10)
fig.canvas.draw()
ax.clear()
ax.grid(True)
print 'Gbest: '
print Gbest
def Vector_Velocidad(n, a, Pbest, Gbest, v):
for i in range(n):
#Velocity in X
v[0][i] = 0.7 * v[0][i] + (Pbest[0][i] - a[0][i]) * rand.random() * 1.47 + (Gbest[0][0] - a[0][i]) * rand.random() * 1.47
a[0][i] = a[0][i] + v[0][i]
v[1][i] = 0.7 * v[1][i] + (Pbest[1][i] - a[1][i]) * rand.random() * 1.47 + (Gbest[0][1] - a[1][i]) * rand.random() * 1.47
a[1][i] = a[1][i] + v[1][i]
def fitness(x, y):
return 100 * ((y - (x**2))**2) + ((1 - (x**2))**2)
def Order(Pbest, r, n):
for i in range(1, n):
for j in range(0, n - 1):
if r[j] > r[j + 1]:
#Order the fitness
tempRes = r[j]
r[j] = r[j + 1]
r[j + 1] = tempRes
#Order las X, Y
tempX = Pbest[0][j]
Pbest[0][j] = Pbest[0][j + 1]
Pbest[0][j + 1] = tempX
tempY = Pbest[1][j]
Pbest[1][j] = Pbest[1][j + 1]
Pbest[1][j + 1] = tempY
if '__main__' == main():
main()

Error in Optimization with Lagrange Multiplier

I'm trying to maximize/minimize a function with two variables using Lagrange Multiplier method, below is my code
import numpy as np
from scipy.optimize import fsolve
Sa = 200
Sm = 100
n = 90
mu1 = 500
mu2 = 150
sigma1 = 25
sigma2 = 10
f = 0.9
def func(X):
u1 = X[0]
u2 = X[1]
L = X[2] # this is the multiplier. lambda is a reserved keyword in python
'function --> f(u1,u2) = u1**2 + u2**2'
'constraint --> g(u1,u2) = (Snf/a)**1/b - n = 0'
Snf = Sa/(1-Sm/(sigma1*u1 + mu1))
a = (f*(sigma1*u1 + mu1)**2)/(sigma2*u2 + mu2)
b = -1/3*(f*(sigma1*u1 + mu1))/(sigma2*u2 + mu2)
return (u1**2+u2**2 - L * ((Snf/a)**1/b) - n)
def dfunc(X):
dLambda = np.zeros(len(X))
h = 1e-3 # this is the step size used in the finite difference.
for i in range(len(X)):
dX = np.zeros(len(X))
dX[i] = h
dLambda[i] = (func(X+dX)-func(X-dX))/(2*h);
return dLambda
# this is the max
X1 = fsolve(dfunc, [1, 1, 0])
print (X1, func(X1))
# this is the min
X2 = fsolve(dfunc, [-1, -1, 0])
print (X2, func(X2))
When I try to do a simple function as the constraint such as u1+u2=4 or u1^2+u2^2 = 20, it works just fine , but when I try my actual constraint function it always gives this error, is there a reason why?? THanks for the help
C:\Python34\lib\site-packages\scipy\optimize\minpack.py:161:
RuntimeWarning: The iteration is not making good progress, as measured by the
improvement from the last five Jacobian evaluations.
warnings.warn(msg, RuntimeWarning)

Categories

Resources