Generating phase plane/portrait using scipy - python

This code allows me to plot the the mutualistic relationship of two species. As the code gives, the graph is logistical. Given this, I should be able to see how its phase portrait looks like. My question is: how and where should I start if I need to show its phase plane/portrait using scipy?
import numpy as np
from scipy.integrate import odeint
import matplotlib.pyplot as plt
import pylab as p
# parameters
r1 = 1.0
r2 = 0.5
e1 = 1
e2 = 0.75
a12 = 0.25
a21 = 0.25
# initial population
N10 = 1
N20 = 1
# store initial values in an array
X0 = [N10, N20]
# model/equation
def mutualism(X,t):
N1, N2 = X
dX = np.zeros(2) # initialize dX as array containing three zeroes
dX[0] = N1 * (r1 - (e1 * N1) + (a12 * N2)) # equation for dN10dt
dX[1] = N2 * (r2 - (e2 * N2) + (a21 * N1)) # equation for dN20dt
return dX
# set time length
t = np.linspace(0, 100,100*10)
# odeint returns an array containing values for each value of t
X = odeint(mutualism,X0,t)
N1 = X[:,0]; N2 = X[:,1]
#plot
f1 = p.figure()
p.plot(t, N1, 'r-', label='Species 1')
p.plot(t, N2 , 'b-', label='Species 2')
p.grid()
p.legend(loc='best')
p.xlabel('Time')
p.ylabel('Population')
p.title('Mutualism')
So far I've tried using matplotlib in generating the phase portrait but to no avail, it did not work.

Related

Using Python DAE solver for coupled equations in time and space

I would like to solve two coupled equations in time and space, using the scikits.odes.dae solver.
My equations are as follows:
dy1/dt = dy1/dz + y2
y1 = 5 * y2
The code I have written is the following
import matplotlib.pyplot as plt
import numpy as np
from scikits.odes import dae
N = 51 #number os spacesteps
L = 1.0 #[m] length of sorbent bed, also a guess
dz = L/(N-1) #[m] length of space step
time = np.arange(0, 1.5, 0.1)
dydz = 1
y0 = [1, 0.2] #initial values y0[0] = y1 and y0[1] = y2
yp0 = [1, 1] #initial guess for \dot{y1} and \dot{y2}
def trial_space(t, y, ydot, result):
result[0] = ydot[0] - 6 * dydz + y[1]
result[1] = y[0] - 5 * y[1]
solver = dae('ida', trial_space)
solution = = solver.solve(time, y0, yp0)
Currently, I am feeding the solver a constant value of dydz, but actually I would like to use a central differencing scheme to obtain
dy/dz[i] = (y[i+1] - y[i-1])/(2*dz)
How do I integrate this into the solver?

How can I solve a contourf array problem?

I have trouble with plt.contourf.
The program is supposed to calculate the distance between s1 and m called s1m and S2 and m called s2m than using s1m and s2m we calculate the wave functions psi and psiP than we multiply them to get the intensity of light, we use what we get in contourf to see the results in a screen.
When I run the program I
import numpy as np
import matplotlib.pyplot as plt
S1 = np.array([100,0,-1])
S2 = np.array([-100,0,-1])
M = np.array([1,1,0])
Lambda = 633
s1m= np.substract(m,S1)#vector S1M
s2m= np.substract(m,S2)#vector S2M
SM1= np.multiply(s1m,s1m)
SM2= np.multiply(s2m,s2m)
S1M= np.sqrt(SM1)#distance s1m
S2M= np.sqrt(SM2)#distance s2m
def intensity (S1M,S2M):
Phi1=(2 * np pi * S1M)/lambda
Phi2=(2 * np.pi * S2M)/lambda
Tet1=(-2 * np.pi * S1M)/lambda
Tet2=(-2 * np.pi * S2M)/lambda
Psi1 = np.exp(Phi1)
Psi2 = np.exp(Phi2)
Psi1P = np.exp(Tet1)
Psi2P = np.exp(Tet2)
Psi = Psi1 + Psi2
PsiP = Psi1P + Psi2P
I = Psi * PsiP
x = np.linspace(1,5,5)
y = np.linspace(1,5,5)
XX,YY = np.meshgrid(x,y)
ZZ = intensity (S1M, S2M)
plt.contourf (XX, YY, ZZ)
plt.show()

How to implement 'solve_ivp' with vectorized='True' in python

I've been trying to solve a set of differential equations using solve_ivp. The Jacobian matrix of the system is the A as you can see below. I wanted to enable the option vectorized='True' but unfortunately i do not know how to modify the present code to vectorize the Jacobian matrix A. Does anyone know how this can be done?
# imports
import numpy as np
import scipy.sparse as sp
from scipy.integrate import solve_ivp
import matplotlib.pyplot as plt
# grid sizing
R=0.05 #sphere radius
N=1000#number of points
D=0.00002 #diffusion coefficient
k=10 # Arrhenius
Cs=1.0 # Boundary concentration
C0=0.0 # Initial concentration
time_constant=R**2.0/D
dr=R/(N-1)
# Algebra simplification
a=D/dr**2
Init_conc=np.linspace(0,0,N)
B=np.zeros(N)
B[N-1]=Cs*(a+a/(N-1))
#
e1 = np.ones(N)
e2 = np.ones(N)
e3 = np.ones(N)
#
#
#
e1[0]=-k-6*a
e1[1:]=-k-2*a
#
#
e2[1]=6*a
for i in range(2,N) :
e2[i]=a+a/(i-1)
#
#
#
for i in range (0,N-1) :
e3[i]=a-a/(i+1)
A = sp.spdiags([e3,e1,e2],[-1,0,1],N,N,format="csc")
def dc_dt(t,C) :
dc=A.dot(C)+B
return dc
# Solving the system, I want to implement the same thing with vectorized='True'
OutputTimes=np.linspace(0,0.2*time_constant,100)
ans=solve_ivp(dc_dt,(0,0.2*time_constant),Init_conc,method='RK45',t_eval=OutputTimes,vectorized='False')
print (ans)
Please have a look at this answer, the explanation is thorough. For your code in particular, please see below for updated snippet and figure. It is not obvious that vectorize is providing any speed-up. However, providing A for the keyword jac makes a difference. But I guess it is only valid if A is constant?
# imports
import numpy as np
import scipy.sparse as sp
from scipy.integrate import solve_ivp
import matplotlib.pyplot as plt # noqa
def dc_dt(t, C):
print(C.shape)
if len(C.shape) == 1:
return np.squeeze(A.dot(C)) + B
else:
return A.dot(C) + np.transpose(np.tile(B, (C.shape[1], 1)))
# return np.squeeze(A.dot(C)) + B
# grid sizing
R = 0.05 # sphere radius
N = 1000 # number of points
D = 0.00002 # diffusion coefficient
k = 10 # Arrhenius
Cs = 1.0 # Boundary concentration
C0 = 0.0 # Initial concentration
time_constant = R**2.0 / D
dr = R / (N - 1)
# Algebra simplification
a = D / dr**2
Init_conc = np.repeat(0, N)
B = np.zeros(N)
B[-1] = Cs * (a + a / (N - 1))
e1 = np.ones(N)
e2 = np.ones(N)
e3 = np.ones(N)
e1[0] = -k - 6 * a
e1[1:] = -k - 2 * a
e2[1] = 6 * a
for i in range(2, N):
e2[i] = a + a / (i - 1)
for i in range(0, N - 1):
e3[i] = a - a / (i + 1)
A = sp.spdiags([e3, e1, e2], [-1, 0, 1], N, N, format="csc")
# Solving the system, I want to implement the same thing with vectorized='True'
OutputTimes = np.linspace(0, 0.2 * time_constant, 10000)
ans = solve_ivp(dc_dt, (0, 0.2 * time_constant), Init_conc,
method='BDF', t_eval=OutputTimes, jac=A, vectorized=True)
plt.plot(np.arange(N), ans.y[:, 0])
plt.plot(np.arange(N), ans.y[:, 1])
plt.plot(np.arange(N), ans.y[:, 10])
plt.plot(np.arange(N), ans.y[:, 20])
plt.plot(np.arange(N), ans.y[:, 50])
plt.plot(np.arange(N), ans.y[:, -1])
plt.show()

How to use if statement in a differential equation (SciPy)?

I am trying to solve a differential equation with Python.
In this two system differential equation if the value of first variable (v) is more than a threshold (30) it should be reset to another value (-65). Below I put my code. The problem is that the value of first variable after reaching 30 remains constant and won't reset to -65. These equations describe the dynamics of a single neuron. The equations are taken from this website and this PDF file.
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import FormatStrFormatter
from scipy.integrate import odeint
plt.close('all')
a = 0.02
b = 0.2
c = -65
d = 8
i = 0
p = [a,b,c,d,i]
def fun(u,tspan,*p):
du = [0,0]
if u[0] < 30: #Checking if the threshold has been reached
du[0] = (0.04*u[0] + 5)*u[0] + 150 - u[1] - p[4]
du[1] = p[0]*(p[1]*u[0]-u[1])
else:
u[0] = p[2] #reset to -65
u[1] = u[1] + p[3]
return du
p = tuple(p)
y0 = [0,0]
tspan = np.linspace(0,100,1000)
sol = odeint(fun, y0, tspan, args=p)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
plt.plot(tspan,sol[:,0],'k',linewidth = 5)
plt.plot(tspan,sol[:,1],'r',linewidth = 5)
myleg = plt.legend(['v','u'],\
loc='upper right',prop = {'size':28,'weight':'bold'}, bbox_to_anchor=(1,0.9))
The solution looks like:
Here is the correct solution by Julia, here u1 represent v:
This is the Julia code:
using DifferentialEquations
using Plots
a = 0.02
b = 0.2
c = -65
d = 8
i = 0
p = [a,b,c,d,i]
function fun(du,u,p,t)
if u[1] <30
du[1] = (0.04*u[1] + 5)*u[1] + 150 - u[2] - p[5]
du[2] = p[1]*(p[2]*u[1]-u[2])
else
u[1] = p[3]
u[2] = u[2] + p[4]
end
end
u0 = [0.0;0.0]
tspan = (0.0,100)
prob = ODEProblem(fun,u0,tspan,p)
tic()
sol = solve(prob,reltol = 1e-8)
toc()
plot(sol)
Recommended solution
This uses events and integrates separately after each discontinuity.
import matplotlib.pyplot as plt
import numpy as np
from scipy.integrate import solve_ivp
a = 0.02
b = 0.2
c = -65
d = 8
i = 0
p = [a,b,c,d,i]
# Define event function and make it a terminal event
def event(t, u):
return u[0] - 30
event.terminal = True
# Define differential equation
def fun(t, u):
du = [(0.04*u[0] + 5)*u[0] + 150 - u[1] - p[4],
p[0]*(p[1]*u[0]-u[1])]
return du
u = [0,0]
ts = []
ys = []
t = 0
tend = 100
while True:
sol = solve_ivp(fun, (t, tend), u, events=event)
ts.append(sol.t)
ys.append(sol.y)
if sol.status == 1: # Event was hit
# New start time for integration
t = sol.t[-1]
# Reset initial state
u = sol.y[:, -1].copy()
u[0] = p[2] #reset to -65
u[1] = u[1] + p[3]
else:
break
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
# We have to stitch together the separate simulation results for plotting
ax.plot(np.concatenate(ts), np.concatenate(ys, axis=1).T)
myleg = plt.legend(['v','u'])
Minimum change "solution"
It appears as though your approach works just fine with solve_ivp.
Warning I think in both Julia and solve_ivp, the correct way to handle this kind of thing is to use events. I believe the approach below relies on an implementation detail, which is that the state vector passed to the function is the same object as the internal state vector, which allows us to modify it in place. If it were a copy, this approach wouldn't work. In addition, there is no guarantee in this approach that the solver is taking small enough steps that the correct point where the limit is reached will be stepped on. Using events will make this more correct and generalisable to other differential equations which perhaps have lower gradients before the discontinuity.
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import FormatStrFormatter
from scipy.integrate import solve_ivp
plt.close('all')
a = 0.02
b = 0.2
c = -65
d = 8
i = 0
p = [a,b,c,d,i]
def fun(t, u):
du = [0,0]
if u[0] < 30: #Checking if the threshold has been reached
du[0] = (0.04*u[0] + 5)*u[0] + 150 - u[1] - p[4]
du[1] = p[0]*(p[1]*u[0]-u[1])
else:
u[0] = p[2] #reset to -65
u[1] = u[1] + p[3]
return du
y0 = [0,0]
tspan = (0,100)
sol = solve_ivp(fun, tspan, y0)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
plt.plot(sol.t,sol.y[0, :],'k',linewidth = 5)
plt.plot(sol.t,sol.y[1, :],'r',linewidth = 5)
myleg = plt.legend(['v','u'],loc='upper right',prop = {'size':28,'weight':'bold'}, bbox_to_anchor=(1,0.9))
Result

Offset for SciPy.optimize least squares is not correct

I have a script which generates a noisy curve composed of 3 different sine curves. Using LombScargle I find the periods which are dominant because of these curves and detrend the original curve.
This involves phase folding, using SciPy optimize to fit a sine curve and then extrapolating the fitted curve out of phase space and onto the original multi-sine-wave-curve, and then subtracting this fit to detrend the data.
I iterate this process so that eventually when the signal on the LombScargle is less than 3 times some pre-generated noise height, the iteration stops. (equivalent to signal-to-noise ratio becoming too small).
But after the 1st iteration the phase of the fitted curve is way off! I can't see why this happens though. Can anyone help?
import matplotlib.pyplot as plt
import numpy as np
import math
from scipy.optimize import leastsq
from astropy.stats import LombScargle
t = np.linspace(15,30,1000)
y = 5 * np.sin(2*np.pi * 1./2. * (t + 1)) + 30
y1 = 11 * np.sin(2*np.pi * 1./3. * (t + 1))
y4 = 2 * np.sin(2*np.pi * 1./7. * (t + 1))
sampl = np.random.uniform(low = 0, high = 2.5, size = len(t))
y2 = y+y1+y4+sampl
y2 = y2/np.nanmedian(y2)
freq = np.linspace(1./150.,1./0.5,10000)
power2 = LombScargle(t,sampl).power(freq)
for _ in range(5):
plt.figure()
plt.subplot(221)
plt.plot(t,y2,'-')
plt.grid()
power = LombScargle(t,y2).power(freq)
p = 1./freq[np.argmax(power)]
plt.title(p)
plt.subplot(222)
plt.plot(1./freq,power)
plt.grid()
# print p
if np.max(power) <= np.max(power2)*3:
print 'done'
break
else:
foldtimes = (t - t[0])/p
FoldTimes = foldtimes % 1
p_0 = np.min(FoldTimes)
dp = 0.01
its = math.ceil((np.max(FoldTimes)-p_0)/dp)
n = np.linspace(0,its,its+1)
binned_flux = []
binned_phase = []
for i in range(len(n)):
indices = np.where(((p_0 + (n[i]*dp)) <= FoldTimes) & (FoldTimes <= (p_0 + (n[i]*dp)+dp)))[0]
if len(indices)>1:
binned_flux.append( np.nanmedian(y2[indices])) ##continuously adds the averaged fluxes to list
binned_phase.append (np.mean(FoldTimes[indices])) #same for hjs averages
binned_flux = np.array(binned_flux)
binned_phase = np.array(binned_phase)
plt.subplot(223)
plt.grid()
plt.plot(binned_phase,binned_flux,'.',linestyle='None')
plt.ylabel('Relative Flux')
plt.xlabel('Phase')
guess_mean = np.mean(y2)
guess_A = (np.max(y2) - np.min(y2))/2.
guess_phase = 0
optimize_func = lambda x: ((x[0] * np.sin(2*np.pi*(np.sort(binned_phase) + x[1]))) + x[2]) - binned_flux
est_A, est_phase, est_mean = leastsq(optimize_func, [guess_A,guess_phase,guess_mean])[0]
print est_phase
fit = (est_A*np.sin(2*np.pi*(1./p)*(t+ (est_phase*p)))) + est_mean
fit_p = (est_A*np.sin(2*np.pi*(1.)*(np.sort(binned_phase)+(est_phase)))) + est_mean
plt.plot(binned_phase,fit_p)
plt.subplot(224)
plt.plot(t,y2,'-')
plt.plot(t,fit)
plt.grid()
y2 = (y2 - fit) + est_mean #rewrites the original curve minus the fit

Categories

Resources