Related
I implemented the conjugate gradient method using TensorFlow to invert a sparse matrix.
The matrix I used to test the method is well-conditioned, as it is the sum of a mass matrix and a stiffness matrix obtained with finite elements.
I compared with the same method implemented using scipy and on the same data.
The solutions obtained with either methods are the same, however, TensorFlow is 5 times slower (I tested under colab environment).
Under colab environment, scipy ran in 0.27 s, while TensorFlow required 1.37 s
Why the algorithm is so slow under TensorFlow?
I can not cast to dense matrices, as I want to use the formula with matrices of large size (100k X100k or more).
Thanks,
Cesare
Here is the code I used to test this:
import tensorflow as tf
import numpy as np
from scipy.sparse import coo_matrix,linalg
import os
import sys
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
from time import time
from scipy.spatial import Delaunay
def create_mesh(Lx=1,Ly=1,Nx=100,Ny=100):
mesh0=dict()
dx = Lx/Nx
dy = Ly/Ny
XX,YY=np.meshgrid(np.arange(0,Lx+dx,dx),np.arange(0,Ly+dy,dy))
points=np.vstack((XX.ravel(),YY.ravel())).T
#np.random.shuffle(points)
tri = Delaunay(points)
mesh0['Pts']=np.copy(points).astype(np.float32)
mesh0['Tria']=np.copy(tri.simplices).astype(int)
return(mesh0)
def eval_connectivity(mesh0):
print('computing mesh connectivity')
npt=mesh0['Pts'].shape[0]
connectivity = {}
for jpt in range(npt):
connectivity[jpt] = []
for Tria in mesh0['Tria']:
for ilpt in range(3):
iglobalPt=Tria[ilpt]
for jlpt in range(1+ilpt,3):
jglobalPt=Tria[jlpt]
connectivity[iglobalPt].append(jglobalPt)
connectivity[jglobalPt].append(iglobalPt)
for key,value in connectivity.items():
connectivity[key]=np.unique(np.array(value,dtype=int))
return(connectivity)
def eval_local_mass(mesh0,iTri):
lmass = np.zeros(shape=(3,3),dtype=np.float32)
Tria=mesh0['Tria'][iTri]
v10 = mesh0['Pts'][Tria[1],:]-mesh0['Pts'][Tria[0],:]
v20 = mesh0['Pts'][Tria[2],:]-mesh0['Pts'][Tria[0],:]
N12 = np.cross(v10,v20)
Tsurf = 0.5*np.linalg.norm(N12)
for ipt in range(3):
lmass[ipt,ipt]=1.0/12.0
for jpt in range(1+ipt,3):
lmass[ipt,jpt] = 1.0/24.0
lmass[jpt,ipt] = lmass[ipt,jpt]
lmass = 2.0*Tsurf*lmass
return(lmass)
def eval_local_stiffness(mesh0,iTri):
Tria = mesh0['Tria'][iTri]
v10 = mesh0['Pts'][Tria[1],:]-mesh0['Pts'][Tria[0],:]
v20 = mesh0['Pts'][Tria[2],:]-mesh0['Pts'][Tria[0],:]
N12 = np.cross(v10,v20)
Tsurf = 0.5*np.linalg.norm(N12)
covbT = np.zeros(shape=(3,3),dtype=np.float32)
covbT[0,:2] = v10
covbT[1,:2] = v20
covbT[2,2] = N12/(2*Tsurf)
contrb = np.linalg.inv(covbT)
v1 = contrb[:,0]
v2 = contrb[:,1]
a = np.dot(v1,v1)
b = np.dot(v1,v2)
c = np.dot(v2,v2)
gij_c = np.array([[a,b],[b,c]],dtype=np.float32)
lgrad = np.array([[-1.0,1.0,0.0], [-1.0,0.0,1.0] ],dtype=np.float32)
lstif = Tsurf*np.matmul( np.matmul(lgrad.T,gij_c), lgrad )
return(lstif)
def compute_vectors_sparse_matrices(mesh0):
npt = mesh0['Pts'].shape[0]
connect = eval_connectivity(mesh0)
nzero = 0
for key,value in connect.items():
nzero += (1+value.shape[0])
I = np.zeros(shape=(nzero),dtype=int)
J = np.zeros(shape=(nzero),dtype=int)
VM = np.zeros(shape=(nzero),dtype=np.float32)
VS = np.zeros(shape=(nzero),dtype=np.float32)
k0 = np.zeros(shape=(npt+1),dtype=int)
k0[0] = 0
k = -1
for jpt in range(npt):
loc_con = connect[jpt].tolist()[:]
loc_con.append(jpt)
loc_con = np.sort(loc_con)
k0[jpt+1]=k0[jpt]+loc_con.shape[0]
for jloc in range(loc_con.shape[0]):
k=k+1
I[k]= jpt
J[k]= loc_con[jloc]
for iTr, Tria in enumerate(mesh0['Tria']):
lstiff = eval_local_stiffness(mesh0,iTr)
lmass = eval_local_mass(mesh0,iTr)
for iEntry,irow in enumerate(Tria):
loc_con = connect[irow].tolist()[:]
loc_con.append(irow)
loc_con = np.sort(loc_con)
for jEntry,jcol in enumerate(Tria):
indexEntry = k0[irow]+np.where(loc_con==jcol)[0]
VM[indexEntry] = VM[indexEntry]+lmass[iEntry,jEntry]
VS[indexEntry] = VS[indexEntry]+lstiff[iEntry,jEntry]
return(I,J,VM,VS)
def compute_global_sparse_matrices(mesh0):
I,J,VM,VS = compute_vectors_sparse_matrices(mesh0)
npt = mesh0['Pts'].shape[0]
MASS = coo_matrix((VM,(I,J)),shape=(npt,npt))
STIFF = coo_matrix((VS,(I,J)),shape=(npt,npt))
return(MASS,STIFF)
def compute_global_sparse_tensors(mesh0):
I,J,VM,VS = compute_vectors_sparse_matrices(mesh0)
npt = mesh0['Pts'].shape[0]
indices = np.hstack([I[:,np.newaxis], J[:,np.newaxis]])
MASS = tf.sparse.SparseTensor(indices=indices, values=VM.astype(np.float32), dense_shape=[npt, npt])
STIFF = tf.sparse.SparseTensor(indices=indices, values=VS.astype(np.float32), dense_shape=[npt, npt])
return(MASS,STIFF)
def compute_matrices_scipy(mesh0):
MASS,STIFF = compute_global_sparse_matrices(mesh0)
return(MASS,STIFF)
def compute_matrices_tensorflow(mesh0):
MASS,STIFF = compute_global_sparse_tensors(mesh0)
return(MASS,STIFF)
def conjgrad_scipy(A,b,x0,niter=100,toll=1.e-5):
x = np.copy(x0)
r = b - A * x
p = np.copy(r)
rsold = np.dot(r,r)
for it in range(niter):
Ap = A * p
alpha = rsold /np.dot(p,Ap)
x += alpha * p
r -= alpha * Ap
rsnew = np.dot(r,r)
if (np.sqrt(rsnew) < toll):
break
p = r + (rsnew / rsold) * p
rsold = rsnew
return([x,it,np.sqrt(rsnew)])
def conjgrad_tensorflow(A,b,x0,niter=100,toll=1.e-5):
x = x0
r = b - tf.sparse.sparse_dense_matmul(A,x)
p = r
rsold = tf.reduce_sum(tf.multiply(r, r))
for it in range(niter):
Ap = tf.sparse.sparse_dense_matmul(A,p)
alpha = rsold /tf.reduce_sum(tf.multiply(p, Ap))
x += alpha * p
r -= alpha * Ap
rsnew = tf.reduce_sum(tf.multiply(r, r))
if (tf.sqrt(rsnew) < toll):
break
p = r + (rsnew / rsold) * p
rsold = rsnew
return([x,it,tf.sqrt(rsnew)])
mesh = create_mesh(Lx=10,Ly=10,Nx=100,Ny=100)
x0 = tf.constant( (mesh['Pts'][:,0]<5 ).astype(np.float32) )
nit_time = 10
dcoef = 1.0
maxit = x0.shape[0]//2
stoll = 1.e-6
print('nb of nodes:\t{}'.format(mesh['Pts'].shape[0]))
print('nb of trias:\t{}'.format(mesh['Tria'].shape[0]))
t0 = time()
MASS0,STIFF0 = compute_matrices_scipy(mesh)
elapsed_scipy=time()-t0
print('Matrices; elapsed: {:3.5f} s'.format(elapsed_scipy))
A = MASS0+dcoef*STIFF0
x = np.copy(np.squeeze(x0.numpy()) )
t0 = time()
for jt in range(nit_time):
b = MASS0*x
x1,it,tol=conjgrad_scipy(A,b,x,niter=maxit,toll=stoll)
x=np.copy(x1)
print('time {}; iters {}; resid: {:3.2f}'.format(1+jt,it,tol) )
elapsed_scipy=time()-t0
print('elapsed, scipy: {:3.5f} s'.format(elapsed_scipy))
t0 = time()
MASS,STIFF =compute_matrices_tensorflow(mesh)
elapsed=time()-t0
print('Matrices; elapsed: {:3.5f} s'.format(elapsed))
x = None
x1 = None
A = tf.sparse.add(MASS,tf.sparse.map_values(tf.multiply, STIFF, dcoef))
x = tf.expand_dims(tf.identity(x0),axis=1)
t0 = time()
for jt in range(nit_time):
b = tf.sparse.sparse_dense_matmul(MASS,x)
x1,it,tol=conjgrad_tensorflow(A,b,x,niter=maxit,toll=stoll)
x = x1
print('time {}; iters {}; resid: {:3.2f}'.format(1+jt,it,tol) )
elapsed_tf=time()-t0
print('elapsed, tf: {:3.2f} s'.format(elapsed_tf))
print('elapsed times:')
print('scipy: {:3.2f} s\ttf: {:3.2f} s'.format(elapsed_scipy,elapsed_tf))
I have a normally distributed variable x (like product demand), an index id_1 (like product number) and a second index id_2 (like product group). My goal is to estimate the mean and the standard deviations for x hierarchically (all > product group > product).
That's my data:
import numpy as np
import pymc3 as pm
import arviz as az
# data
my_step_1 = 0.4
my_step_2 = 4.1
sd_step_1 = 0.1
sd_step_2 = 0.2
my = 10
sd = .1
grp_n = 8
grps_1 = 5
grps_2 = 4
x = np.round(np.concatenate([np.random.normal(my + i * my_step_1 + j * my_step_2, \
sd + i * sd_step_1 + j * sd_step_2, grp_n) \
for i in range(grps_1) for j in range(grps_2)]), 1) # demand
id_1 = np.repeat(np.arange(grps_1 * grps_2), grp_n) # group, product number
id_2 = np.tile(np.repeat(np.arange(grps_2), grp_n), grps_1) # super-group, product group
shape_1 = len(np.unique(id_1))
shape_2 = len(np.unique(id_2))
I've managed a single hierarchy:
with pm.Model() as model_h1:
#
mu_mu_hyper = pm.Normal('mu_mu_hyper', mu = 0, sd = 10)
mu_sd_hyper = pm.HalfNormal('mu_sd_hyper', 10)
sd_hyper = pm.HalfNormal('sd_hyper', 10)
#
mu = pm.Normal('mu', mu = mu_mu_hyper, sd = mu_sd_hyper, shape = shape_1)
sd = pm.HalfNormal('sd', sd = sd_hyper, shape = shape_1)
y = pm.Normal('y', mu = mu[id_1], sd = sd[id_1], observed = x)
trace_h1 = pm.sample(1000)
#az.plot_forest(trace_h1, var_names=['mu', 'sd'], combined = True)
But how can I code 2 hierarchies?
# 2 hierarchies .. doesn't work
with pm.Model() as model_h2:
#
mu_mu_hyper2 = pm.Normal('mu_mu_hyper2', mu = 0, sd = 10)
mu_sd_hyper2 = pm.HalfNormal('mu_sd_hyper2', sd = 10)
sd_mu_sd_hyper2 = pm.HalfNormal('sd_mu_sd_hyper2', sd = 10)
sd_hyper2 = pm.HalfNormal('sd_hyper2', sd = 10)
#
mu_mu_hyper1 = pm.Normal('mu_hyper1', mu = mu_mu_hyper2, sd = mu_sd_hyper2, shape = shape_2)
mu_sd_hyper1 = pm.HalfNormal('mu_sd_hyper1', sd = sd_mu_sd_hyper2, shape = shape_2)
sd_hyper1 = pm.HalfNormal('sd_hyper1', sd = sd_hyper2, shape = shape_2)
#sd_hyper1 = pm.HalfNormal('sd_hyper1', sd = sd_hyper2[id_2], shape = shape_2)??
#
mu = pm.Normal('mu', mu = mu_mu_hyper1, sd = mu_sd_hyper1, shape = shape_1)
sd = pm.HalfNormal('sd', sd = sd_hyper1, shape = shape_1)
y = pm.Normal('y', mu = mu[id_1], sd = sd[id_1], observed = x)
trace_h2 = pm.sample(1000)
You could try looping through product groups and use the mean, std for a group as constraints for the products belonging to this particular group.
# sample product group to product mapping
group_product_mapping = {0: [1, 2, 3], 1: [4, 5, 6]}
total_groups = len(group_product_mapping.keys())
with pm.model() as model_h:
mu_all = pm.Normal('mu_all', 0, 10)
sd_all = pm.HalfNormal('sd_all', 10)
sd_mu_group = pm.HalfNormal('sd_mu_group', 10)
# group parameters constrained to mu, sd from all
mu_group = pm.Normal('mu_group', mu_all, sd_all, shape=total_groups)
sd_group = pm.HalfNormal('sd_group', sd_mu_group, shape=total_groups)
mu_products = dict()
# iterate through groups and constrain product parameters to the product group they belong to
for idx, group in enumerate(group_product_mapping.keys()):
mu_products[group] = pm.Normal(f'mu_products_{group}', mu_group[idx], sd_group[idx], shape=len(group_product_mapping[group]))
sd_produtcs[group] = pm.HalfNormal(f'sd_mu_products_{group}', 10)
I am converting some code from MATLAB to Python, and I have encountered an issue I can't resolve. When iterating over the For loop in the section of code, my for loop is spitting out repeated values, that are also incorrect. I believe this has to do with my definition of "x" and "z", but I am not quite Here is my Python script and the matrices D2A1 and D2A2 are giving the repeated blocks of incorrect values.
import sys
import numpy as np
import scipy as sp
import scipy.special as scl
import numpy.matlib as mat
###
#np.set_printoptions(threshold = sys.maxsize)
##
###Constants and Parameters
w = np.array([.09,.089])
a = np.array([0,3])
coup = np.array([w[0],0])/10
dE12 = -2*w[0]
gs = np.array([0,0])
ws = w**2
alpha = a[0]*ws[0]/a[1]/ws[1]
dEp = (dE12+a[0]**2*ws[0]/2+a[1]**2*ws[1]/2)/a[1]/ws[1]
ac = np.array([0,0],dtype = 'float')
ac[0] = alpha*dEp*ws[1]/(ws[0]+alpha**2*ws[1])
ac[1] = dEp - alpha*ac[0]
iS = 0 ## starting state
z0c = gs[1]
x0c = gs[0]
Mx = 128*2
Mz = 128*2
N = 2
dt = 0.05
#Now we need grid lengths L[1x1]
Lx = 10
Lz = 10
LxT = Lx*2
LzT = Lz*2
#x0-z0 = z0[1XM] = Grod of M points from 0 to L
x0 = np.array([np.linspace(-Lx,Lx,Mx)])
z0 = np.array([np.linspace(-Lz,Lz, Mz)])
x0op = np.transpose(np.matlib.repmat(x0,Mz,1))
z0op = np.matlib.repmat(z0,Mx,1)
## For loop over matricies
VDI = np.zeros((2,2),dtype = 'complex')
D2A1 = np.zeros(((2,Mx*Mz)),dtype = 'complex')
D2A2 = D2A1
V1 = D2A1
V2 = V1
VP1 = V1
VP2 = V1
for ig in range(Mz):
for jg in range(Mx):
z = z0[0,ig]
x = x0[0,jg]
###Diabtic Matrix###
VDI[0,0] = (w[1]*z)**2/2+(w[0]*x)**2/2
VDI[1,1] = (w[1]*(z-a[1]))**2/2+(w[0]*(x-a[0]))**2/2+dE12
VDI[0,1] = coup[1]*(z+ac[1])+coup[0]*(x+ac[0])
VDI[1,0] = VDI[0,1]
###Adiabatdization###
[VDt, U] = np.linalg.eigh(VDI)
VDt = np.array(VDt).reshape(2,1)
VDt = np.diagflat(VDt)
UUdVP = np.array([U#sp.linalg.expm(-1.j*dt*VDt)#U.T])
V = U#VDt#U.T
ixz = jg+(ig-1)*Mx
D2A1[:, ixz] = np.conj((U[:,0]))
D2A2[:, ixz] = np.conj((U[:,1]))
print(D2A1)
Below is the MATLAB loop I am trying to recreate.
VDI=zeros(2,2);
D2A1=zeros(2,Mx*Mz); D2A2=D2A1; V1=D2A1; V2=V1; VP1=V1; VP2=V1;
for ig=1:Mz,
for jg=1:Mx,
z = z0(ig); x = x0(jg);
% diabatic matrix
VDI(1,1) = (w(2)*z)^2/2+(w(1)*x)^2/2;
VDI(2,2) = (w(2)*(z-a(2)))^2/2+(w(2)*(x-a(1)))^2/2+dE12;
VDI(1,2) = coup(2)*(z+ac(2))+coup(1)*(x+ac(1)); VDI(2,1)=VDI(1,2);
% adiabatization
[U,VDt]=eig(VDI) ;
[VDt Ind]=sort(diag(VDt)); U=U(:,Ind);
UUdVP=U*diag(exp(-1i*dt*VDt))*U';
V=U*diag(VDt)*U';
ixz = jg + (ig-1)*Mx;
D2A1(:,ixz) = conj(U(:,1)); D2A2(:,ixz) = conj(U(:,2));
end; end;
Any help would be greatly appreciated. Thanks!
Fixed. Error was in the definition of matrices to be generated. From what I gather in Python you must specifically define each array, while in MATLAB you can set matrix equivalences and run them through a for-loop.
I have a problem using scipy.odr to fit data
Here is my data
https://www.dropbox.com/s/zuo6rqokcxsbnh5/data.txt?dl=0
1st column is data x, 2nd is data y
My Python fit code:
import numpy as np
import scipy.odr
import matplotlib.pyplot as plt
a = open('C:/Users/san/Desktop/data.txt','r')
data = np.loadtxt(a, delimiter=' ')
a.close()
#data x
f = data[:,0]
#data y
Soddre = data[:,1]
#####fit function#####
def S11fit(beta,f):
w = 2*np.pi*f
L1 = 25*1e-3
L2 = 25*1e-3
dL = 0.01*1e-3
z0 = 376.73415
Z0 = 50
ZL1 = 50
c = 2.997925e8
miu0 = z0/c
miur = 1
y0 = 1j*w/c;
episr = beta[0]+1j*beta[1]+(beta[2]+1j*beta[3])/(1+1j*beta[4]*w)#My target
ys = 1j*w/c*np.sqrt(episr*miur)
the = np.sqrt(episr/miur)
Zin3_1 = np.tanh(ys*L2)
Zin3_2 = the*np.tanh(y0*dL)
Zin3_3 = the*( 1+the*np.tanh(y0*dL)*np.tanh(ys*L2) )
Zin3 = ZL1*( Zin3_1 + Zin3_2 + Zin3_3*np.tanh(y0*L1) )\
/( Zin3_3 +( Zin3_1 + Zin3_2 )*np.tanh(y0*L1) )
S11t = (Zin3-Z0)/(Zin3+Z0)
return S11t.real
#Setting initial beta
A0r = 1
A0i = 0.02
A0 = A0r+1j*A0i
er1 = 1+0.02
er2 = 1+0.03
B1 = ( er1-er2 )/( 1j*( -w[200]*(er1-A0)+w[300]*(er2-A0) ) )
A1 = (er2-A0)*(1+1j*w[300]*B1)
#Using scipy.odr to fit data
Model = scipy.odr.Model(S11fit)
Data = scipy.odr.Data(f,Soddre)
odr = scipy.odr.ODR(Data, Model, [A0r, A0i,A1.real, A1.imag, B1.real])
output = odr.run()
resulr = output.pprint()
beta = output.beta
print "ODR", beta
#Fitting result
plt.plot(f, Soddre, "b")
plt.plot(f, S11fit(beta, w), "g--", lw = 1)
plt.tight_layout()
plt.show()
#By fitting data get episr
episr = beta[0]+1j*beta[1]+(beta[2]+1j*beta[3])/(1+1j*beta[4]*w)
plt.plot(f, episr.real, "b")
Results
Beta: [ -5.13825455e+00 -1.08908338e+00 5.46365124e+00 -4.76830313e+01
-5.13452718e-10]
The grapg of fitting result and reference:
https://www.dropbox.com/s/9yj4ulo57mqgcc2/Fitting%20result.pdf?dl=0
My fitting curve doesn't fit well
Does anyone have a good suggestion to fit data well?
I can fit my data well.Data need to add deviation in Soddre.
Just add:
ca = np.full((1, 801), 0.01)
and modify Data:
Data = scipy.odr.RealData(f,Soddre,sy=ca)
This question is a follow-up to my previous question here: Assistance, tips and guidelines for converting Matlab code to Python
I have converted the Matlab code manually. I am using a MAC OS and running Python from the terminal. But how do I run the code below, for some value of N, where N is an even number? I should get a graph (specified by the plot code).
When I run it as is, I get nothing.
My code is below:
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import odeint
def Array(N):
K00 = np.logspace(0,3,101,10)
len1 = len(K00)
y0 = [0]*(3*N/2+3)
S = [np.zeros((len1,1)) for kkkk in range(N/2+1)]
KS = [np.zeros((len1,1)) for kkkk in range(N/2)]
PS = [np.zeros((len1,1)) for kkkk in range(N/2)]
Splot = [np.zeros((len1,1)) for kkkk in range(N/2+1)]
KSplot = [np.zeros((len1,1)) for kkkk in range(N/2)]
PSplot = [np.zeros((len1,1)) for kkkk in range(N/2)]
Kplot = np.zeros((len1,1))
Pplot = np.zeros((len1,1))
for series in range(0,len1):
K0 = K00[series]
Q = 10
r1 = 0.0001
r2 = 0.001
d = 0.001
a = 0.001
k = 0.999
P0 = 1
S10 = 1e5
tf = 1e10
time = np.linspace(0,tf,len1)
y0[0] = S10
y0[3*N/2+1] = K0
y0[3*N/2+2] = P0
for i in range(1,3*N/2+1):
y0[i] = 0
[t,y] = odeint(EqnsArray,y0,time, mxstep = 5000)
for alpha in range(0,(N/2+1)):
S[alpha] = y[:,alpha]
for beta in range((N/2)+1,N+1):
KS[beta-N/2-1] = y[:,beta]
for gamma in range(N+1,3*N/2+1):
PS[gamma-N-1] = y[:,gamma]
for alpha in range(0,(N/2+1)):
Splot[alpha][series] = y[len1-1,alpha]
for beta in range((N/2)+1,N+1):
KSplot[beta-N/2-1][series] = y[len1-1,beta]
for gamma in range(N+1,3*N/2+1):
PSplot[gamma-N-1][series] = y[len1-1,gamma]
for alpha in range(0,(N/2+1)):
u1 = u1 + Splot[alpha]
for beta in range((N/2)+1,N+1):
u2 = u2 + KSplot[beta-N/2-1]
for gamma in range(N+1,3*N/2+1):
u3 = u3 + PSplot[gamma-N-1]
K = soln[:,3*N/2+1]
P = soln[:,3*N/2+2]
Kplot[series] = soln[len1-1,3*N/2+1]
Pplot[series] = soln[len1-1,3*N/2+2]
utot = u1+u2+u3
#Plot
plt.plot(np.log10(K00),utot)
plt.show()
def EqnsArray(y,t):
for alpha in range(0,(N/2+1)):
S[alpha] = y[alpha]
for beta in range((N/2)+1,N+1):
KS[beta-N/2-1] = y[beta]
for gamma in range(N+1,3*N/2+1):
PS[gamma-N-1] = y[gamma]
K = y[3*N/2+1]
P = y[3*N/2+2]
# The model equations
ydot = np.zeros((3*N/2+3,1))
B = range((N/2)+1,N+1)
G = range(N+1,3*N/2+1)
runsumPS = 0
runsum1 = 0
runsumKS = 0
runsum2 = 0
for m in range(0,N/2):
runsumPS = runsumPS + PS[m]
runsum1 = runsum1 + S[m+1]
runsumKS = runsumKS + KS[m]
runsum2 = runsum2 + S[m]
ydot[B[m]] = a*K*S[m]-(d+k+r1)*KS[m]
for i in range(0,N/2-1):
ydot[G[i]] = a*P*S[i+1]-(d+k+r1)*PS[i]
for p in range(1,N/2):
ydot[p] = -S[p]*(r1+a*K+a*P)+k*KS[p-1]+d*(PS[p-1]+KS[p])
ydot[0] = Q-(r1+a*K)*S[0]+d*KS[0]+k*runsumPS
ydot[N/2] = k*KS[N/2-1]-(r2+a*P)*S[N/2]+d*PS[N/2-1]
ydot[G[N/2-1]] = a*P*S[N/2]-(d+k+r2)*PS[N/2-1]
ydot[3*N/2+1] = (d+k+r1)*runsumKS-a*K*runsum2
ydot[3*N/2+2] = (d+k+r1)*(runsumPS-PS[N/2-1])- \
a*P*runsum1+(d+k+r2)*PS[N/2-1]
ydot_new = []
for j in range(0,3*N/2+3):
ydot_new.extend(ydot[j])
return ydot_new
You have to call your function, like:
Array(12)
You have to add this at the end of your code.