radial basis network derivatives are pushing means together - python

I am trying to make my own implementation of a simple neural network to classify points. I heard about a specific type of activation function that I am interested in testing, the Gaussian. I do not just want to use relus or sigmoids, I am trying to build a network that takes as input about 300 x and y values, then in the first layer computes the Gaussian function on these values with about 50 neurons which each have a separate x and y value as their means (I will keep the sigma constant). Mathematically I anticipate this to look like
exp(- [(x-Mx)^2 + (y-My)^2] / (2 * sigma^2) ) / (sqrt(2*pi*sigma))
then I will perform a weighted sum of these terms over all the neurons in the first layer, add a bias, and pass it through a sigmoid to get my prediction. I will perform this step for each training example and get a list of predictions. I think that I do the forward propagation but I will include the code for that in case someone can spot an obvious error in my implementation. Then I perform the back-propogation. I have tested my updating of the weights and bias, and I believe that they are not the problem. I think that there is something wrong with my implementation of the gradient for the means however because they always cluster to a single point which clearly does not maximize the cost function. I have already tried using a couple of different data sets, and varying some hyper parameters, all to no avail. Can anyone figure out what the problem is?
Here is my code.
# libraries
import matplotlib.patches as patches
import seaborn as sns; sns.set()
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import pdb
# functions
def gaussian(sq_error, sigma):
return ((1/np.sqrt(2*np.pi*sigma**2))) * np.exp(-(sq_error)/(2*sigma**2))
def calc_X1(X0, Mx, My, m, sigma):
X1 = [] # shape will be (10, m)
for ex in range(0, m):
sq_error = (X0[0][ex] - Mx) **2 + (X0[1][ex] - My) **2
X1.append(gaussian(sq_error, sigma))
X1 = np.array(X1)
return X1.T
def sigmoid(Z):
return 1 / (1 + np.exp(-Z))
def calc_X2(W2, X1, b2):
return sigmoid(np.dot(W2, X1) + b2)
def cost(X2, Y, m):
return -1/m * ( np.dot(Y, np.log(X2.T)) + np.dot(1-Y, np.log(1-X2.T))) [0]
def calc_dZ2(X2, Y):
return X2 - Y
def calc_dM(dZ2, W2, X1, sigma, M, m, xOrY, X0):
cur_dM = np.zeros(M.shape)
for i in range(0, m):
# pdb.set_trace()
cur_dM += dZ2[0][i] * float(np.dot(W2, X1.T[i])) * 1/sigma**2 * (X0[xOrY][i] - M)
return cur_dM / m
def train_correct(X2, Y, m):
ct = 0
for i in range(0, m):
if np.round(X2[0][i]) == Y[i]:
ct += 1
return ct / m
# graphing functions
def plot_train_data(X, Y, m, ax):
for ex in range(0, m):
xCur = X[0][ex]
yCur = X[1][ex]
if Y[ex] == 1:
color=(1, 0, 0)
else:
color=(0,0,1)
ax.scatter(xCur, yCur, c=color)
def probability_hash(pr):
return (float(pr), float(np.round(pr)), float(1-pr))
def probability_hash_1d(pr):
return float(pr)
def plot_boundary(Mx, My, sigma, W2, b2, ax):
boundsx = [-5, 5]
boundsy = [-5, 5]
samples = [10, 10]
width = (boundsx[1] - boundsx[0]) / samples[0]
height = (boundsy[1] - boundsy[0]) / samples[1]
pt = np.zeros((2,1))
for x in np.linspace(boundsx[0], boundsx[1], samples[0]):
for y in np.linspace(boundsy[0], boundsy[1], samples[1]):
pt[0][0] = x
pt[1][0] = y
X1_cur = calc_X1(pt, Mx, My, 1, sigma)
X2_cur = calc_X2(W2, X1_cur, b2)
# ax.add_patch(patches.Rectangle((x, y), width, height, facecolor=probability_hash(X2_cur)))
ax.scatter(x, y, c=probability_hash(X2_cur))
def cool_plot_boundary(Mx, My, sigma, W2, b2, ax):
boundsx = [-2, 2]
boundsy = [-2, 2]
samples = [50, 50]
width = (boundsx[1] - boundsx[0]) / samples[0]
height = (boundsy[1] - boundsy[0]) / samples[1]
pt = np.zeros((2,1))
heats = []
xs = np.linspace(boundsx[0], boundsx[1], samples[0])
ys = np.linspace(boundsy[0], boundsy[1], samples[1])
for x in xs:
heats.append([])
for y in ys:
pt[0][0] = x
pt[1][0] = y
X1_cur = calc_X1(pt, Mx, My, 1, sigma)
X2_cur = calc_X2(W2, X1_cur, b2)
heats[-1].append(probability_hash_1d(X2_cur))
# xticks = []
# yticks = []
# for i in range(0, len(xs)):
# if i % 3 == 0:
# xticks.append(round(xs[i], 2))
# for i in range(0, len(ys)):
# if i % 3 == 0:
# yticks.append(round(ys[i], 2))
xticks = []
yticks = []
sns.heatmap(heats, ax=ax, cbar=True, xticklabels=xticks, yticklabels=yticks)
def plot_m(Mx, My, n1, ax):
for i in range(0, n1):
ax.scatter(Mx[i], My[i], c="k")
# initialize parameters
file = "data/disk2.csv"
df = pd.read_csv(file)
sigma = 2
itterations = 10000
learning_rate = 0.9
n0 = 2 # DO NOT CHANGE, formality
X0 = np.row_stack((df["0"], df["1"])) # shape is (2, m)
Y = np.array(df["2"])
m = len(Y)
n1 = 50
Mx = np.random.randn(n1)
My = np.random.randn(n1)
X1 = calc_X1(X0, Mx, My, m, sigma)
n2 = 1 # DO NOT CHANGE, formality
small_number = 0.01
W2 = np.random.randn(1, n1) * small_number
b2 = 0
X2 = calc_X2(W2, X1, b2)
J = cost(X2, Y, m)
Js = []
itters = []
fig = plt.figure()
plotGap = 200
for i in range(0, itterations):
# forward propogation
X1 = calc_X1(X0, Mx, My, m, sigma)
X2 = calc_X2(W2, X1, b2)
J = cost(X2, Y, m)
if i % plotGap == 0:
fig.clear()
costAx = fig.add_subplot(311)
plotAx = fig.add_subplot(312)
pointsAx = fig.add_subplot(313)
cool_plot_boundary(Mx, My, sigma, W2, b2, plotAx)
# plot_boundary(Mx, My, sigma, W2, b2, plotAx)
plot_train_data(X0, Y, m, pointsAx)
Js.append(J)
itters.append(i)
costAx.plot(itters, Js, c="k")
print("cost = " + str(J) + "\ttraining correct = " + str(train_correct(X2, Y, m)))
plot_m(Mx, My, n1, pointsAx)
plt.pause(0.1)
# back propogation
dZ2 = calc_dZ2(X2, Y)
dW2 = np.dot(dZ2, X1.T) / m
db2 = np.sum(dZ2) / m
dMx = calc_dM(dZ2, W2, X1, sigma, Mx, m, 0, X0)
dMy = calc_dM(dZ2, W2, X1, sigma, My, m, 1, X0)
b2 -= learning_rate * db2
W2 -= learning_rate * dW2
Mx -= learning_rate * dMx
My -= learning_rate * dMy
For data I have a csv with a bunch of point locations and labels. You can use this code to generate a similar csv. (Make sure you have a folder called data in the folder you run this from).
# makes data in R2 to learn
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
n = 2
# number of exaples
m = 300
X = []
Y = []
# hyperparamers for data
rApprox = 1
error = 0.4
noise = 0.1
name = "data/disk2"
plt.cla()
for ex in range(0, m):
xCur = np.random.randn(2)
X.append(xCur)
if abs(np.linalg.norm(xCur) + np.random.randn()*noise - rApprox) < error:
Y.append(1)
color="r"
else:
Y.append(0)
color="b"
plt.scatter(xCur[0], xCur[1], c=color)
if abs(np.random.randn()) < 0.01:
plt.pause(0.1)
plt.pause(1)
plt.savefig(name + ".png")
X = np.array(X)
Y = np.array(Y)
df = pd.DataFrame(X)
df[2] = Y
df.to_csv(name + ".csv", index=False)
Thanks for your help.

Substitute this function for the calculate dm function. You must be careful when multiplying, it is not just enough that the dimensions work out.
def calculuate_dMs(X0, X1, X2, Mx, My, W2, dZ2, sigma, m, n1):
# pdb.set_trace()
X0x_big = np.dot(np.ones((n1, 1)), X0[0].reshape(1, m))
X0y_big = np.dot(np.ones((n1, 1)), X0[1].reshape(1, m))
Mx_big = np.dot(Mx.reshape(n1, 1), np.ones((1, m)))
My_big = np.dot(My.reshape(n1, 1), np.ones((1, m)))
W2_big = np.dot(W2.reshape(n1, 1), np.ones((1, m)))
dZ2_big = np.dot(np.ones((n1, 1)), dZ2.reshape(1, m))
dxTemp = np.multiply(np.multiply(np.multiply((X0x_big - Mx_big), X1), W2_big), dZ2_big)
dyTemp = np.multiply(np.multiply(np.multiply((X0y_big - My_big), X1), W2_big), dZ2_big)
return (np.sum(dxTemp, axis=1)/m, np.sum(dyTemp, axis=1)/m)

Related

Moving Points with 1D Noise

I'd like to move points in X & Y with 1D Noise. To further clarify, I don't want each point to move by a unique random number, but rather a larger noise over the whole line with gradients moving the points. The Noise would serve as a multiplier for a move amount and would be a value between -1 and 1. For example, if the Noise value was 0.8, it would multiply the X & Y of points by the amount.
How would I go about this?
This is what I have so far (the black line is the original line). I think it's wrong, because the frequency is 1 but there appears to be multiple waves in the noise.
import numpy as np
import matplotlib.pyplot as plt
import random
import math
from enum import Enum
#PerlinNoise by alexandr-gnrk
class Interp(Enum):
LINEAR = 1
COSINE = 2
CUBIC = 3
class PerlinNoise():
def __init__(self,
seed, amplitude=1, frequency=1,
octaves=1, interp=Interp.COSINE, use_fade=False):
self.seed = random.Random(seed).random()
self.amplitude = amplitude
self.frequency = frequency
self.octaves = octaves
self.interp = interp
self.use_fade = use_fade
self.mem_x = dict()
def __noise(self, x):
# made for improve performance
if x not in self.mem_x:
self.mem_x[x] = random.Random(self.seed + x).uniform(-1, 1)
return self.mem_x[x]
def __interpolated_noise(self, x):
prev_x = int(x) # previous integer
next_x = prev_x + 1 # next integer
frac_x = x - prev_x # fractional of x
if self.use_fade:
frac_x = self.__fade(frac_x)
# intepolate x
if self.interp is Interp.LINEAR:
res = self.__linear_interp(
self.__noise(prev_x),
self.__noise(next_x),
frac_x)
elif self.interp is Interp.COSINE:
res = self.__cosine_interp(
self.__noise(prev_x),
self.__noise(next_x),
frac_x)
else:
res = self.__cubic_interp(
self.__noise(prev_x - 1),
self.__noise(prev_x),
self.__noise(next_x),
self.__noise(next_x + 1),
frac_x)
return res
def get(self, x):
frequency = self.frequency
amplitude = self.amplitude
result = 0
for _ in range(self.octaves):
result += self.__interpolated_noise(x * frequency) * amplitude
frequency *= 2
amplitude /= 2
return result
def __linear_interp(self, a, b, x):
return a + x * (b - a)
def __cosine_interp(self, a, b, x):
x2 = (1 - math.cos(x * math.pi)) / 2
return a * (1 - x2) + b * x2
def __cubic_interp(self, v0, v1, v2, v3, x):
p = (v3 - v2) - (v0 - v1)
q = (v0 - v1) - p
r = v2 - v0
s = v1
return p * x**3 + q * x**2 + r * x + s
def __fade(self, x):
# useful only for linear interpolation
return (6 * x**5) - (15 * x**4) + (10 * x**3)
x = np.linspace(10, 10, 20)
y = np.linspace(0, 10, 20)
seed = 10
gen_x = PerlinNoise(seed=seed, amplitude=5, frequency=1, octaves=1, interp=Interp.CUBIC, use_fade=True)
noise_x = np.array([gen_x.get(pos) for pos in y])
fig, ax = plt.subplots(1)
ax.set_aspect("equal")
ax.plot(x, y, linewidth=2, color="k")
ax.scatter(x, y, s=20, zorder=4, color="k")
ax.plot(x+noise_x, y, linewidth=2, color="blue")
ax.scatter(x+noise_x, y, s=80, zorder=4, color="red")
plt.show()
Thank you!

Nullcline Plot for Nonlinear System of ODEs

I am attempting to plot the nullcline (steady state) curves of the Oregonator model to assert the existence of a limit cycle by applying the Poincare-Bendixson Theorem. I am close, but for some reason the plot that is produced shows two straight lines. I think it has something to do with the plotting stage. Any ideas?
Also any hints for how to construct a quadrilateral to apply the theorem with would be most appreciated.
Code:
import numpy as np
import matplotlib.pyplot as plt
# Dimensionless parameters
eps = 0.04
q = 0.0008
f = 1
# Oregonator model as numpy array
def Sys(Y, t = 0):
return np.array((Y[0] * (1 - Y[0] - ((Y[0] - q) * f * Y[1]) / (Y[0] + q)) / eps, Y[0] - Y[1] ))
# Oregonator model steady states
def g(x,z):
return (x * (1 - x) + ((q - x) * f * z) / (q + x)) / eps
def h(x,z):
return x - z
# Initial lists containing values
x = []
z = []
def sys(iv1, iv2, dt, time):
# initial values:
x.append(iv1)
z.append(iv2)
# Compute and fill lists
for i in range(time):
x.append(x[i] + (g(x[i],z[i])) * dt)
z.append(z[i] + (h(x[i],z[i])) * dt)
return x, z
sys(1, 0.5, 0.01, 30)
# Locate and find equilibrium points
eqp = []
def find_fixed_points(r):
for x in range(r):
for z in range(r):
if ((g(x, z) == 0) and (h(x, z) == 0)):
eqp.append((x,z))
return eqp
# Plot nullclines
plt.plot([0,2],[2,0], 'r-', lw=2, label='x-nullcline')
plt.plot([1,1],[0,2], 'b-', lw=2, label='z-nullcline')
# Plot equilibrium points
for point in eqp:
plt.plot(point[0],point[1],"red", marker = "o", markersize = 10.0)
plt.legend(loc='best')
x = np.linspace(0, 2, 20)
z = np.linspace(0, 2, 20)
X1 , Z1 = np.meshgrid(x, z) # Create a grid
DX1, DZ1 = Sys([X1, Z1]) # Compute reaction rate on the grid
M = (np.hypot(DX1, DZ1)) # Norm reaction rate
M[ M == 0] = 1. # Avoid zero division errors
DX1 /= M # Normalise each arrows
DZ1 /= M
plt.quiver(X1, Z1, DX1, DZ1, M, pivot='mid')
plt.xlabel("x(\u03C4)")
plt.ylabel("z(\u03C4)")
plt.legend()
plt.grid()
plt.show()

Python: Gibbs sampler for regression model

I am trying to write a function for Gibbs sampler in the Bayesian framework. I got the code from this [website][1], which is a straightforward regression model. However, I am tackling a more complicated model which is: y= beta0 + beta1* x + x^gamma * sigma * epsilon where sigma is the variance of the model. That means I need to estimate p(beta0|y,x,beta1,sigma,gamma) and so on(in the Gibbs sampler method). my question is how should I modify the code to sample beta0, beta1 and other variables as there are extra variables to condition on.
My codes are:
import numpy as np
import pymc as pm
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
plt.rcParams['figure.figsize'] = (10, 5)
conda install -c conda-forge pymc3=3.0
def sample_beta_0(y, x, beta_1, sigma, gamma, mu_0, tau_0):
N = len(y)
assert len(x) == N
tau_i = 1/((x**gamma)*sigma)**2
precision = tau_0 + sum(tau_i)
mean = tau_0 * mu_0 + np.sum((y - beta_1 * x)*tau_i)
mean /= precision
return np.random.normal(mean, 1 / np.sqrt(precision))
def sample_beta_1(y, x, beta_0, sigma, mu_1, sigma_1):
N = len(y)
assert len(x) == N
precision = sigma_1 + sigma * np.sum(x * x)
mean = sigma_1 * mu_1 + sigma * np.sum( (y - beta_0) * x)
mean /= precision
return np.random.normal(mean, 1 / np.sqrt(precision))
def sample_sigma(y, x, beta_0, beta_1, alpha, beta):
N = len(y)
alpha_new = alpha + N / 2
resid = y - beta_0 - beta_1 * x
beta_new = beta + np.sum(resid * resid) / 2
return np.random.gamma(alpha_new, 1 / beta_new)
beta_0_true = -1
beta_1_true = 2
sigma_true = 1
N = 50
x = np.random.uniform(low=0, high=4, size=N)
y = np.random.normal(beta_0_true + beta_1_true * x, 1 / np.sqrt(sigma_true))
synth_plot = plt.plot(x, y, "o")
plt.xlabel("x")
plt.ylabel("y")
# print('Y are', y)
# print('X are', x)
plt.show()
"""GIBBS Sampler"""
# specify initial values
init = {"beta_0": 0,
"beta_1": 0,
"sigma": 2}
# specify hyper parameters
hypers = {"mu_0": 0,
"sigma_0": 1,
"mu_1": 0,
"sigma_1": 1,
"alpha": 2,
"beta": 1}
def gibbs(y, x, iters, init, hypers):
assert len(y) == len(x)
beta_0 = init["beta_0"]
beta_1 = init["beta_1"]
sigma = init["sigma"]
trace = np.zeros((iters, 3)) # trace to store values of beta_0, beta_1, sigma
for it in range(iters):
beta_0 = sample_beta_0(y, x, beta_1, sigma, hypers["mu_0"], hypers["sigma_0"])
beta_1 = sample_beta_1(y, x, beta_0, sigma, hypers["mu_1"], hypers["sigma_1"])
sigma = sample_sigma(y, x, beta_0, beta_1, hypers["alpha"], hypers["beta"])
trace[it, :] = np.array((beta_0, beta_1, sigma))
trace = pd.DataFrame(trace)
trace.columns = ['beta_0', 'beta_1', 'sigma']
print(trace)
return trace
iters = 1000
trace = gibbs(y, x, iters, init, hypers)
traceplot = trace.plot()
traceplot.set_xlabel("Iteration")
traceplot.set_ylabel("Parameter value")
trace_burnt = trace[500:999]
hist_plot = trace_burnt.hist(bins = 30, layout = (1,3))
print(trace_burnt.median())
print(trace_burnt.std())
I know is really long but please help!

Producing 2D perlin noise with numpy

I'm trying to produce 2D perlin noise using numpy, but instead of something smooth I get this :
my broken perlin noise, with ugly squares everywhere
For sure, I'm mixing up my dimensions somewhere, probably when I combine the four gradients ... But I can't find it and my brain is melting right now. Anyone can help me pinpoint the problem ?
Anyway, here is the code:
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
def perlin(x,y,seed=0):
# permutation table
np.random.seed(seed)
p = np.arange(256,dtype=int)
np.random.shuffle(p)
p = np.stack([p,p]).flatten()
# coordinates of the first corner
xi = x.astype(int)
yi = y.astype(int)
# internal coordinates
xf = x - xi
yf = y - yi
# fade factors
u = fade(xf)
v = fade(yf)
# noise components
n00 = gradient(p[p[xi]+yi],xf,yf)
n01 = gradient(p[p[xi]+yi+1],xf,yf-1)
n11 = gradient(p[p[xi+1]+yi+1],xf-1,yf-1)
n10 = gradient(p[p[xi+1]+yi],xf-1,yf)
# combine noises
x1 = lerp(n00,n10,u)
x2 = lerp(n10,n11,u)
return lerp(x2,x1,v)
def lerp(a,b,x):
"linear interpolation"
return a + x * (b-a)
def fade(t):
"6t^5 - 15t^4 + 10t^3"
return 6 * t**5 - 15 * t**4 + 10 * t**3
def gradient(h,x,y):
"grad converts h to the right gradient vector and return the dot product with (x,y)"
vectors = np.array([[0,1],[0,-1],[1,0],[-1,0]])
g = vectors[h%4]
return g[:,:,0] * x + g[:,:,1] * y
lin = np.linspace(0,5,100,endpoint=False)
y,x = np.meshgrid(lin,lin)
plt.imshow(perlin(x,y,seed=0))
Thanks to Paul Panzer and a good night of sleep it works now ...
import numpy as np
import matplotlib.pyplot as plt
def perlin(x, y, seed=0):
# permutation table
np.random.seed(seed)
p = np.arange(256, dtype=int)
np.random.shuffle(p)
p = np.stack([p, p]).flatten()
# coordinates of the top-left
xi, yi = x.astype(int), y.astype(int)
# internal coordinates
xf, yf = x - xi, y - yi
# fade factors
u, v = fade(xf), fade(yf)
# noise components
n00 = gradient(p[p[xi] + yi], xf, yf)
n01 = gradient(p[p[xi] + yi + 1], xf, yf - 1)
n11 = gradient(p[p[xi + 1] + yi + 1], xf - 1, yf - 1)
n10 = gradient(p[p[xi + 1] + yi], xf - 1, yf)
# combine noises
x1 = lerp(n00, n10, u)
x2 = lerp(n01, n11, u) # FIX1: I was using n10 instead of n01
return lerp(x1, x2, v) # FIX2: I also had to reverse x1 and x2 here
def lerp(a, b, x):
"linear interpolation"
return a + x * (b - a)
def fade(t):
"6t^5 - 15t^4 + 10t^3"
return 6 * t**5 - 15 * t**4 + 10 * t**3
def gradient(h, x, y):
"grad converts h to the right gradient vector and return the dot product with (x,y)"
vectors = np.array([[0, 1], [0, -1], [1, 0], [-1, 0]])
g = vectors[h % 4]
return g[:, :, 0] * x + g[:, :, 1] * y
lin = np.linspace(0, 5, 100, endpoint=False)
x, y = np.meshgrid(lin, lin) # FIX3: I thought I had to invert x and y here but it was a mistake
plt.imshow(perlin(x, y, seed=2), origin='upper')

Lotka-Volterra equations(predator prey) using Runge-Kutta in Python

I am trying to write a program using the Lotka-Volterra equations for predator-prey interactions. Solve Using ODE's:
dx/dt = a*x - B*x*y
dy/dt = g*x*y - s*y
Using 4th order Runge-Kutta method
I need to plot a graph showing both x and y as a function of time from t = 0 to t=30.
a = alpha = 1
b = beta = 0.5
g = gamma = 0.5
s = sigma = 2
initial conditions x = y = 2
Here is my code so far but not display anything on the graph. Some help would be nice.
#!/usr/bin/env python
from __future__ import division, print_function
import matplotlib.pyplot as plt
import numpy as np
def rk4(f, r, t, h):
""" Runge-Kutta 4 method """
k1 = h*f(r, t)
k2 = h*f(r+0.5*k1, t+0.5*h)
k3 = h*f(r+0.5*k2, t+0.5*h)
k4 = h*f(r+k3, t+h)
return (k1 + 2*k2 + 2*k3 + k4)/6
def f(r, t):
alpha = 1.0
beta = 0.5
gamma = 0.5
sigma = 2.0
x, y = r[2], r[2]
fxd = x*(alpha - beta*y)
fyd = -y*(gamma - sigma*x)
return np.array([fxd, fyd], float)
tpoints = np.linspace(0, 30, 0.1)
xpoints = []
ypoints = []
r = np.array([2, 2], float)
for t in tpoints:
xpoints += [r[2]]
ypoints += [r[2]]
r += rk4(f, r, t, h)
plt.plot(tpoints, xpoints)
plt.plot(tpoints, ypoints)
plt.xlabel("Time")
plt.ylabel("Population")
plt.title("Lotka-Volterra Model")
plt.savefig("Lotka_Volterra.png")
plt.show()
A simple check of your variable tpoints after running your script shows it's empty:
In [7]: run test.py
In [8]: tpoints
Out[8]: array([], dtype=float64)
This is because you're using np.linspace incorrectly. The third argument is the number of elements desired in the output. You've requested an array of length 0.1.
Take a look at np.linspace's docstring. You won't have a problem figuring out how to adjust your code.
1) define 'h' variable.
2) use
tpoints = np.arange(30) #array([0, 1, 2, ..., 30])
not
np.linspace()
and don't forget to set time step size equal to h:
h=0.1
tpoints = np.arange(0, 30, h)
3) be careful with indexes:
def f(r,t):
...
x, y=r[0], r[1]
...
for t in tpoints:
xpoints += [r[0]]
ypoints += [r[1]]
...
and better use .append(x):
for t in tpoints:
xpoints.append(r[0])
ypoints.append(r[1])
...
Here's tested code for python 3.7 (I've set h=0.001 for more presize)
import matplotlib.pyplot as plt
import numpy as np
def rk4(r, t, h): #edited; no need for input f
""" Runge-Kutta 4 method """
k1 = h*f(r, t)
k2 = h*f(r+0.5*k1, t+0.5*h)
k3 = h*f(r+0.5*k2, t+0.5*h)
k4 = h*f(r+k3, t+h)
return (k1 + 2*k2 + 2*k3 + k4)/6
def f(r, t):
alpha = 1.0
beta = 0.5
gamma = 0.5
sigma = 2.0
x, y = r[0], r[1]
fxd = x*(alpha - beta*y)
fyd = -y*(gamma - sigma*x)
return np.array([fxd, fyd], float)
h=0.001 #edited
tpoints = np.arange(0, 30, h) #edited
xpoints, ypoints = [], []
r = np.array([2, 2], float)
for t in tpoints:
xpoints.append(r[0]) #edited
ypoints.append(r[1]) #edited
r += rk4(r, t, h) #edited; no need for input f
plt.plot(tpoints, xpoints)
plt.plot(tpoints, ypoints)
plt.xlabel("Time")
plt.ylabel("Population")
plt.title("Lotka-Volterra Model")
plt.savefig("Lotka_Volterra.png")
plt.show()
You can also try to plot "cycles":
plt.xlabel("Prey")
plt.ylabel("Predator")
plt.plot(xpoints, ypoints)
plt.show()
https://i.stack.imgur.com/NB9lc.png

Categories

Resources