How to improve nonlinear regression fit better by using scipy.ODR? - python

I have a problem using scipy.odr to fit data
Here is my data
https://www.dropbox.com/s/zuo6rqokcxsbnh5/data.txt?dl=0
1st column is data x, 2nd is data y
My Python fit code:
import numpy as np
import scipy.odr
import matplotlib.pyplot as plt
a = open('C:/Users/san/Desktop/data.txt','r')
data = np.loadtxt(a, delimiter=' ')
a.close()
#data x
f = data[:,0]
#data y
Soddre = data[:,1]
#####fit function#####
def S11fit(beta,f):
w = 2*np.pi*f
L1 = 25*1e-3
L2 = 25*1e-3
dL = 0.01*1e-3
z0 = 376.73415
Z0 = 50
ZL1 = 50
c = 2.997925e8
miu0 = z0/c
miur = 1
y0 = 1j*w/c;
episr = beta[0]+1j*beta[1]+(beta[2]+1j*beta[3])/(1+1j*beta[4]*w)#My target
ys = 1j*w/c*np.sqrt(episr*miur)
the = np.sqrt(episr/miur)
Zin3_1 = np.tanh(ys*L2)
Zin3_2 = the*np.tanh(y0*dL)
Zin3_3 = the*( 1+the*np.tanh(y0*dL)*np.tanh(ys*L2) )
Zin3 = ZL1*( Zin3_1 + Zin3_2 + Zin3_3*np.tanh(y0*L1) )\
/( Zin3_3 +( Zin3_1 + Zin3_2 )*np.tanh(y0*L1) )
S11t = (Zin3-Z0)/(Zin3+Z0)
return S11t.real
#Setting initial beta
A0r = 1
A0i = 0.02
A0 = A0r+1j*A0i
er1 = 1+0.02
er2 = 1+0.03
B1 = ( er1-er2 )/( 1j*( -w[200]*(er1-A0)+w[300]*(er2-A0) ) )
A1 = (er2-A0)*(1+1j*w[300]*B1)
#Using scipy.odr to fit data
Model = scipy.odr.Model(S11fit)
Data = scipy.odr.Data(f,Soddre)
odr = scipy.odr.ODR(Data, Model, [A0r, A0i,A1.real, A1.imag, B1.real])
output = odr.run()
resulr = output.pprint()
beta = output.beta
print "ODR", beta
#Fitting result
plt.plot(f, Soddre, "b")
plt.plot(f, S11fit(beta, w), "g--", lw = 1)
plt.tight_layout()
plt.show()
#By fitting data get episr
episr = beta[0]+1j*beta[1]+(beta[2]+1j*beta[3])/(1+1j*beta[4]*w)
plt.plot(f, episr.real, "b")
Results
Beta: [ -5.13825455e+00 -1.08908338e+00 5.46365124e+00 -4.76830313e+01
-5.13452718e-10]
The grapg of fitting result and reference:
https://www.dropbox.com/s/9yj4ulo57mqgcc2/Fitting%20result.pdf?dl=0
My fitting curve doesn't fit well
Does anyone have a good suggestion to fit data well?

I can fit my data well.Data need to add deviation in Soddre.
Just add:
ca = np.full((1, 801), 0.01)
and modify Data:
Data = scipy.odr.RealData(f,Soddre,sy=ca)

Related

Scipy Curve_fit gives a rather weird fit

Dear Python programmers,
I am currently working with curve_fit from scipy inorder to find out what correlation the x and y data have with echouter. However, the curve fit becomes really weird even when I fit a simple lineair formule towards it. I've tried changing the array to a numpy array at the: def func(x, a, b, c): "Fit functie" return a * np.asarray(x) + b part but it still gives me a graph that looks like a 3 year old who scratched with some red pencil.
One thing I do remember is sorting the values of massflows and rms_smote from low to high. Which you can view above the def func(x, a, b, c) bit. Since the curve_fit was giving me a fit. Yet also kinda scratched out as if you're sketching when the values ware unsorted. I don't know if curve_fit considers data differently if it's sorted or not.
If you need any more information, let me know :) Any suggestion is welcome!
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from scipy.stats import linregress
from scipy.optimize import curve_fit
data_15 = pd.read_csv(r"C:\Users\Thomas\Documents\Pythondata\2022-01-15_SMOTERapport.csv", header= 0, sep=';', decimal=',')
data_06 = pd.read_csv(r"C:\Users\Thomas\Documents\Pythondata\2022-02-06_SMOTERapport.csv", header= 0, sep=';', decimal=',')
data_10 = pd.read_csv(r"C:\Users\Thomas\Documents\Pythondata\2022-02-10_SMOTERapport.csv", header= 0, sep=';', decimal=',')
speed_15 = data_15['SPEED_ACT']
speed_06 = data_06['SPEED_ACT']
speed_10 = data_10['SPEED_ACT']
"Data filter 01_15"
filter = [i for i, e in enumerate(speed_15) if e >= 80]
s_15 = pd.DataFrame(data_15)
speed15 = s_15.filter(items = filter, axis=0)
speed15.reset_index(drop=True, inplace=True)
temp15 = speed15['TP_SMOTE']
foutmetingen2 = [i for i, e in enumerate(temp15) if e < 180]
speed15 = speed15.drop(foutmetingen2)
tp_strip15 = speed15['TP_AMBIENT']
tp_target15 = speed15['TP_TARGET']
tp_smote15 = speed15['TP_SMOTE']
v_15 = speed15['SPEED_ACT']
width15 = speed15['STRIP_WIDTH']
thickness15 = speed15['STRIP_THICKNESS']
power15 = speed15['POWER_INVERTER_PRE']
voltage15 = speed15['VOLTAGE_INVERTER_PRE']
"Data filter 02_06"
filter = [i for i, e in enumerate(speed_06) if e >= 80]
s_06 = pd.DataFrame(data_06)
speed06 = s_06.filter(items = filter, axis=0)
speed06.reset_index(drop=True, inplace=True)
temp06 = speed06['TP_SMOTE']
foutmetingen2 = [i for i, e in enumerate(temp06) if e < 180]
speed06 = speed06.drop(foutmetingen2)
tp_strip06 = speed06['TP_AMBIENT']
tp_target06 = speed06['TP_TARGET']
tp_smote06 = speed06['TP_SMOTE']
v_06 = speed06['SPEED_ACT']
width06 = speed06['STRIP_WIDTH']
thickness06 = speed06['STRIP_THICKNESS']
power06 = speed06['POWER_INVERTER_PRE']
voltage06 = speed06['VOLTAGE_INVERTER_PRE']
"Data filter 02_10"
filter = [i for i, e in enumerate(speed_10) if e >= 80]
s_10 = pd.DataFrame(data_10)
speed10 = s_10.filter(items = filter, axis=0)
speed10.reset_index(drop=True, inplace=True)
temp_01 = speed10['TP_SMOTE']
foutmetingen2 = [i for i, e in enumerate(temp_01) if e < 180]
speed10 = speed10.drop(foutmetingen2)
tp_strip10 = speed10['TP_AMBIENT']
tp_target10 = speed10['TP_TARGET']
tp_smote10 = speed10['TP_SMOTE']
v_10 = speed10['SPEED_ACT']
width10 = speed10['STRIP_WIDTH']
thickness10 = speed10['STRIP_THICKNESS']
power10 = speed10['POWER_INVERTER_PRE']
voltage10 = speed10['VOLTAGE_INVERTER_PRE']
"Constanten"
widthmax = 1253
Kra = 0.002033636
Kosc = 0.073086272
Pnominal = 2200
meting_15 = np.arange(0, len(speed15), 1)
meting_06 = np.arange(0, len(speed06), 1)
meting_10 = np.arange(0, len(speed10), 1)
cp = 480
rho = 7850
"---------------------------------------------------------------------"
def temp(power, speed, width, thickness, tp_strip, tp_target, tp_smote,
voltage):
"Berekende temperatuur vergelijken met target temperatuur"
massflow = (speed/60)*width*10**-3*thickness*10**-3*rho
LossesRA = Kra*Pnominal*(width/widthmax)
LossesOSC = Kosc*Pnominal*(voltage/100)**2
Plosses = (LossesRA + LossesOSC)
power_nl = (power/100)*Pnominal - Plosses
temp_c = ((power_nl*1000)/(massflow*cp)) + tp_strip
verschil_t = (temp_c/tp_target)*100-100
verschil_smote = (temp_c/tp_smote)*100-100
return temp_c, verschil_t, verschil_smote, massflow
temp_15 = temp(power15, v_15, width15, thickness15, tp_strip15, tp_target15,
tp_smote15, voltage15)
temp_06 = temp(power06, v_06, width06, thickness06, tp_strip06, tp_target06,
tp_smote06, voltage06)
temp_10 = temp(power10, v_10, width10, thickness10, tp_strip10, tp_target10,
tp_smote10, voltage10)
"---------------------------------------------------------------------"
def rms(Temperatuurberekend, TemperatuurGemeten):
"De Root Mean Square berekenen tussen berekend en gemeten data"
rootmeansquare = (TemperatuurGemeten - Temperatuurberekend)
rootmeansquare_totaal = np.sum(rootmeansquare)
rootmeansquare_gem = rootmeansquare_totaal/len(rootmeansquare)
return rootmeansquare, rootmeansquare_totaal, rootmeansquare_gem
rms_tp_smote15 = (rms(temp_15[0], tp_smote15))
rms_tp_smote06 = (rms(temp_06[0], tp_smote06))
rms_tp_smote10 = (rms(temp_10[0], tp_smote10))
"----------------------------------------------------------------------"
massflows = [np.sum(temp_06[3])/len(temp_06[3]), np.sum(temp_15[3])/
len(temp_15[3]), np.sum(temp_10[3])/len(temp_10[3])]
rms_smote = [rms_tp_smote06[2], rms_tp_smote10[2], rms_tp_smote15[2]]
rms_tp_smote_pre = np.append(rms_tp_smote15[0].tolist(),
rms_tp_smote06[0].tolist())
rms_tp_smote = np.append(rms_tp_smote_pre, rms_tp_smote10[0].tolist())
massflow_pre = np.append(temp_15[3].tolist(), temp_06[3].tolist())
massflow = np.append(massflow_pre, temp_10[3].tolist())
massflow_sort = np.sort(massflow)
rms_tp_smote_sort = [x for _, x in sorted(zip(massflow, rms_tp_smote))]
a,b,r,p, s_a= linregress (massflows,rms_smote)
print('RC: ' ,a ,'\n','std: ', s_a , '\n', 'Offset: ', b)
def func(x, a, b, c):
"Fit functie"
return a * np.asarray(x) + b
popt, pcov = curve_fit(func, massflow_sort, rms_tp_smote_sort)
popt
functie = func(massflow_sort, *popt)
sns.set_theme(style='whitegrid')
fig, axs = plt.subplots(2, figsize=(10, 10))
axs[0].plot(massflows, rms_smote, label='Temp afwijking als f(massflow)')
axs[0].plot ([massflows[0] ,massflows[len (massflows) -1]] ,
[a*massflows [0]+b,a*massflows[len (massflows) -1]+b] ,
label ='trendlijn')
axs[0].set(xlabel='Mass flow ($kg/s$)',
ylabel='Temperatuur afwijking gem ($\u00b0C$)', title='Met Verliezen')
axs[0].legend(loc='upper right')
axs[1].plot(massflow_sort, rms_tp_smote_sort, 'o', label='Temp/Massflow 01-15')
#axs[1].plot(temp_06[3], rms_tp_smote06[0], 'o', label='Temp/Massflow 02-06')
#axs[1].plot(temp_10[3], rms_tp_smote10[0], 'o', label='Temp/Massflow 02-10')
axs[1].plot(massflow, func(massflow_sort, *popt), 'r-',
label='fit: a=%5.3f, b=%5.3f, c=%5.3f' % tuple(popt))
axs[1].set(xlabel='Mass flow ($kg/s$)',
ylabel='Temperatuur afwijking gem ($\u00b0C$)')
axs[1].legend(loc='upper right')
print("Gemiddelde verschil temperatuur smote: ", rms_tp_smote15[1])
print("Gemiddelde uitwijking temperatuur smote: ", rms_tp_smote15[2])

Logistic regression - strange behaviour of the decision boundary when additional parameters are added

I am trying to build a logistic regression model for a dataset consisting of two parameters
x1 and x2, but instead of analyzing just the two of them, I have added their squares as well - x12, x22 and x1· x2.
At the first glance everything looks fine and the error function is decreasing, but whilist drawing the plot of the decision boundary I have noticed, that after circa 500 iterations something strange happens to it.
Here is an animation of the error function as a function of iterations and a respective plot of the decision boundary:
Now,I interpret the decision boundary as a quadratic function
x2=f(x1), where
the relation between both parameters is given like this:
0.5 = θ0 + θ1x1 + θ2x2 + θ3x12 + θ4x1x2
+ θ5x22
Here is the python code I use to do everything:
#!/usr/bin/python3
import numpy as np
import matplotlib.pyplot as plt
from math import log
from matplotlib.animation import FuncAnimation
def sigmoid(x):
return 1.0 / (1.0 + np.exp(-x))
def loadData(filepath):
source=""
try:
f = open(filepath, "r")
source = f.read()
f.close()
except IOError:
print("Error while reading file (" + filepath + ")")
return ""
raw_data = source.split("\n")
raw_data = [x.split(",") for x in raw_data if x !=""]
raw_data = np.matrix(raw_data).astype(float)
return (raw_data[:,:np.size(raw_data,1)-1], raw_data[:,np.size(raw_data, 1)-1:])
def standardize(dataset, skipfirst=True):
means = np.amin(dataset, 0)
deviation = np.std(dataset, 0)
if skipfirst:
dataset[:,1:] -= means[:,1:]
dataset[:,1:] /= deviation[:,1:]
return dataset
else:
dataset -= means
dataset /= deviation
return dataset
def error(X, Y, Theta):
"Calculates error values"
v_sigm = np.vectorize(sigmoid)
h_x = X # Theta
sigmo = v_sigm(h_x)
partial_vect = (Y-1).T # np.log(1-sigmo) - Y.T # np.log(sigmo)
return 1/(2*np.size(Y, axis=0))*np.sum(partial_vect)
def gradientStep(X, Y, Theta, LR):
"Returns new theta Values"
v_sigm = np.vectorize(sigmoid)
h_x = X # Theta
modif = -1*LR/np.size(Y, 0)*(h_x-Y)
sums = np.sum(modif.T # X, axis = 0)
return Theta + sums.T
X, Y = loadData("ex2data1.txt")
#add bias to X
X = np.append(np.ones((np.size(X, 0), 1)), X, axis=1)
added_params = [[x[1]**2, x[1]*x[2], x[2]**2] for x in np.array(X)]
X = np.append(X, np.matrix(added_params), axis=1)
#standardize X
X = standardize(X)
#create vector of parameters
Theta=np.zeros((np.size(X, 1), 1))
iterations = 3000
Theta_vals = []
Error_vals = []
for i in range(0, iterations):
Theta_vals.append(np.asarray(Theta).flatten())
Error_vals.append(error(X, Y, Theta))
Theta = gradientStep(X, Y, Theta, 0.07)
#CALCULATING FINISHES HERE
#plot data:
fig = plt.figure()
def_ax = fig.add_subplot(211)
def_ax.set_xlim(np.amin(X[:,1:2]), np.amax(X[:,1:2]))
def_ax.set_ylim(np.amin(X[:,2:3]), np.amax(X[:,2:3]))
err_ax = fig.add_subplot(212)
err_ax.set_ylim(0, error(X, Y, Theta))
err_ax.set_xlim(0, iterations)
positive_X1 = []
positive_X2 = []
negative_X1 = []
negative_X2 = []
for i in range(0, np.size(Y, 0)):
if(Y[i, 0] == 1):
positive_X1.append(X[i, 1])
positive_X2.append(X[i, 2])
else:
negative_X1.append(X[i, 1])
negative_X2.append(X[i, 2])
err_ax.set_ylim(np.amin(Error_vals), np.amax(Error_vals))
def animation(frame):
global Theta_vals, Error_vals, def_ax, err_ax, positive_X1, positive_X2, negative_X1, negative_X2
def_limX = def_ax.get_xlim()
def_limY = def_ax.get_ylim()
err_limX = err_ax.get_xlim()
err_limY = err_ax.get_ylim()
def_ax.clear()
err_ax.clear()
def_ax.set_xlim(def_limX)
def_ax.set_ylim(def_limY)
err_ax.set_xlim(err_limX)
err_ax.set_ylim(err_limY)
def_ax.scatter(positive_X1, positive_X2, marker="^")
def_ax.scatter(negative_X1, negative_X2, marker="o")
Theta = Theta_vals[frame]
res_x = np.linspace(*def_ax.get_xlim(), num=5)
delta_x = [(Theta[4]*x+Theta[2])**2-4*Theta[5]*(Theta[3]*x**2+Theta[1]*x+Theta[0]-0.5) for x in res_x]
delta_x = [np.sqrt(x) if x >= 0 else 0 for x in delta_x]
minb = [-(Theta[4]*x+Theta[2]) for x in res_x]
res_1 = []
res_2 = []
for i in range(0, len(res_x)):
if Theta[5] == 0:
res_1.append(0)
res_2.append(0)
else:
res_1.append((minb[i]+delta_x[i])/(2*Theta[5]))
res_2.append((minb[i]-+delta_x[i])/(2*Theta[5]))
def_ax.plot(res_x, res_1)
def_ax.plot(res_x, res_2)
err_x = np.linspace(0, frame, frame)
err_y = Error_vals[0:frame]
err_ax.plot(err_x, err_y)
anim = FuncAnimation(fig, animation, frames=iterations, interval=3, repeat_delay=2000)
print(error(X, Y, Theta))
anim.save("anim.mp4")
What could be the reason of such a strange behaviour?

Matlab to python - different results, cannot solve

I have been trying to convert the following matlab code into python and I am having difficulties with construction of the matrix algebra.
The product of the python code is vastly different from matlab and I can't figure out the problem (I assume its in the matrix multiplication).
Just for some background info: Using the data set, I was attempting to create a forecast with one dummy varialbe 'D1'
Here is the matlab code:
load 'AUSRetail.csv'
y = AUSRetail(:,1); Q = AUSRetail(:,2);
T = length(y); t = (1:T)';
D4 = (Q == 4);
T0 = 15;
h = 1; % h−step−ahead forecast
syhat = zeros(T-h-T0+1,1);
ytph = y(T0+h:end); % observed y {t+h}
for t = T0:T-h
yt = y(1:t);
D4t = D4(1:t);
Xt = [ones(t,1) (1:t)' D4t];
beta2 = (Xt'*Xt)\(Xt'*yt);
yhat2 = [1 t+h D4(t+h)]*beta2;
syhat(t-T0+1) = yhat2;
end
MSFE2 = mean((ytph-syhat).^2);
plot([1:T], y)
hold on
plot([T0 + h:T], syhat)
and here is my python code attempt:
data = pd.read_csv("dataretail.csv").dropna()
D4 = np.asarray(data["Dummy4"])
dataValues = np.asarray(data["Value"])
T = len(D4)
T0 = 15
h = 1
syhat = []
ytph = np.asarray(dataValues) # Real values to test against
for t in range(T0,T-1):
yt = dataValues[:t].reshape(t,1)
D4t = D4[:t]
#Construction of Xt
xt1 = np.ones((t,1))
xt2 = np.arange(1,t+1).reshape(t,1)
xt3 = D4t.reshape(t,1)
Xt = np.column_stack((xt1,xt2,xt3))
Xt2 = np.transpose(Xt)
A = Xt2 # Xt
b = Xt2 # yt
beta2 = np.transpose(A) # b
yhat2 = np.array([1, t+h, D4[t+h]]) # beta2
syhat.append(int(yhat2))
fig = plt.figure()
axes = fig.add_axes([0.2, 0.2, 0.8, 1])
axes.set_xlabel("T (time)")
axes.set_ylabel("Yhat2")
axes.plot(range(25), abc)
axes.set_title("Model")

Why is a 1D k-means clustering slower than a k-means initialized mixture model fit?

My timing shows that k-means consistently loses out on timing, compared to a mixture model, initialized using k-means.
What's the explanation for this? Is the GMM using a different k-means algorithm? Am I misunderstanding how it works? Does it use a differently sized dataset (smaller than I'm drawing from?).
import sklearn.cluster
import sklearn.mixture
import numpy as np
import time
import matplotlib.pyplot as plt
k = 3
N = 100
def clust():
m = sklearn.cluster.KMeans(n_clusters = k)
m.fit(X.reshape(-1, 1))
return m.cluster_centers_
def fit():
m = sklearn.mixture.GaussianMixture(n_components = k, init_params = "kmeans")
m.fit(X.reshape(-1, 1))
return m.means_
duration_clust = []
duration_fit = []
ctrs_clust = []
ctrs_fit = []
for i in range(N):
_1 = np.random.normal(0.25, 0.15, 50)
_2 = np.random.normal(0.50, 0.15, 50)
_3 = np.random.normal(0.75, 0.15, 50)
X = np.concatenate((_1, _2, _3)).reshape(-1, 1)
ts = time.time()
c = clust()
te = time.time()
time_clust = (te - ts) * 1e3
ts = time.time()
f = fit()
te = time.time()
time_fit = (te - ts) * 1e3
duration_clust.append(time_clust)
duration_fit.append(time_fit)
ctrs_clust.append(c)
ctrs_fit.append(f)
bins0 = np.arange(0, 20, 1)
bins1 = np.linspace(0,1,30)
fig, ax = plt.subplots(nrows = 2)
ax[0].hist(duration_clust, label = "Kmeans", bins = bins0, alpha = 0.5)
ax[0].hist(duration_fit, label = "GMM with Kmeans", bins = bins0, alpha = 0.5)
ax[0].set_xlabel("duration (ms)")
ax[0].legend(loc = "upper right")
ax[1].hist(np.ravel(ctrs_clust), label = "Kmeans centers", bins = bins1, alpha = 0.5)
ax[1].hist(np.ravel(ctrs_fit), label = "GMM centers", bins = bins1, alpha = 0.5)
ax[1].set_xlabel("Center location")
ax[1].axvline([0.25], label = "Truth", color = "black")
ax[1].axvline([0.50], color = "black")
ax[1].axvline([0.75], color = "black")
ax[1].legend(loc = "upper right")
plt.tight_layout()
plt.show()

Having successfully converted complex Matlab code to Python, how to run the code?

This question is a follow-up to my previous question here: Assistance, tips and guidelines for converting Matlab code to Python
I have converted the Matlab code manually. I am using a MAC OS and running Python from the terminal. But how do I run the code below, for some value of N, where N is an even number? I should get a graph (specified by the plot code).
When I run it as is, I get nothing.
My code is below:
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import odeint
def Array(N):
K00 = np.logspace(0,3,101,10)
len1 = len(K00)
y0 = [0]*(3*N/2+3)
S = [np.zeros((len1,1)) for kkkk in range(N/2+1)]
KS = [np.zeros((len1,1)) for kkkk in range(N/2)]
PS = [np.zeros((len1,1)) for kkkk in range(N/2)]
Splot = [np.zeros((len1,1)) for kkkk in range(N/2+1)]
KSplot = [np.zeros((len1,1)) for kkkk in range(N/2)]
PSplot = [np.zeros((len1,1)) for kkkk in range(N/2)]
Kplot = np.zeros((len1,1))
Pplot = np.zeros((len1,1))
for series in range(0,len1):
K0 = K00[series]
Q = 10
r1 = 0.0001
r2 = 0.001
d = 0.001
a = 0.001
k = 0.999
P0 = 1
S10 = 1e5
tf = 1e10
time = np.linspace(0,tf,len1)
y0[0] = S10
y0[3*N/2+1] = K0
y0[3*N/2+2] = P0
for i in range(1,3*N/2+1):
y0[i] = 0
[t,y] = odeint(EqnsArray,y0,time, mxstep = 5000)
for alpha in range(0,(N/2+1)):
S[alpha] = y[:,alpha]
for beta in range((N/2)+1,N+1):
KS[beta-N/2-1] = y[:,beta]
for gamma in range(N+1,3*N/2+1):
PS[gamma-N-1] = y[:,gamma]
for alpha in range(0,(N/2+1)):
Splot[alpha][series] = y[len1-1,alpha]
for beta in range((N/2)+1,N+1):
KSplot[beta-N/2-1][series] = y[len1-1,beta]
for gamma in range(N+1,3*N/2+1):
PSplot[gamma-N-1][series] = y[len1-1,gamma]
for alpha in range(0,(N/2+1)):
u1 = u1 + Splot[alpha]
for beta in range((N/2)+1,N+1):
u2 = u2 + KSplot[beta-N/2-1]
for gamma in range(N+1,3*N/2+1):
u3 = u3 + PSplot[gamma-N-1]
K = soln[:,3*N/2+1]
P = soln[:,3*N/2+2]
Kplot[series] = soln[len1-1,3*N/2+1]
Pplot[series] = soln[len1-1,3*N/2+2]
utot = u1+u2+u3
#Plot
plt.plot(np.log10(K00),utot)
plt.show()
def EqnsArray(y,t):
for alpha in range(0,(N/2+1)):
S[alpha] = y[alpha]
for beta in range((N/2)+1,N+1):
KS[beta-N/2-1] = y[beta]
for gamma in range(N+1,3*N/2+1):
PS[gamma-N-1] = y[gamma]
K = y[3*N/2+1]
P = y[3*N/2+2]
# The model equations
ydot = np.zeros((3*N/2+3,1))
B = range((N/2)+1,N+1)
G = range(N+1,3*N/2+1)
runsumPS = 0
runsum1 = 0
runsumKS = 0
runsum2 = 0
for m in range(0,N/2):
runsumPS = runsumPS + PS[m]
runsum1 = runsum1 + S[m+1]
runsumKS = runsumKS + KS[m]
runsum2 = runsum2 + S[m]
ydot[B[m]] = a*K*S[m]-(d+k+r1)*KS[m]
for i in range(0,N/2-1):
ydot[G[i]] = a*P*S[i+1]-(d+k+r1)*PS[i]
for p in range(1,N/2):
ydot[p] = -S[p]*(r1+a*K+a*P)+k*KS[p-1]+d*(PS[p-1]+KS[p])
ydot[0] = Q-(r1+a*K)*S[0]+d*KS[0]+k*runsumPS
ydot[N/2] = k*KS[N/2-1]-(r2+a*P)*S[N/2]+d*PS[N/2-1]
ydot[G[N/2-1]] = a*P*S[N/2]-(d+k+r2)*PS[N/2-1]
ydot[3*N/2+1] = (d+k+r1)*runsumKS-a*K*runsum2
ydot[3*N/2+2] = (d+k+r1)*(runsumPS-PS[N/2-1])- \
a*P*runsum1+(d+k+r2)*PS[N/2-1]
ydot_new = []
for j in range(0,3*N/2+3):
ydot_new.extend(ydot[j])
return ydot_new
You have to call your function, like:
Array(12)
You have to add this at the end of your code.

Categories

Resources