I'm trying to run the following Python code in Matlab:
from numba import autojit
import numpy as np
import matplotlib.pyplot as plt
import time
from scipy.integrate import quad
from scipy.special import jv
def make_p(p_max):
p_list = []
for p in range(p_max+1):
temp = []
for u in range(p+1):
for r in range(p+1):
for i in range(p+1):
if u+r+i == p:
temp.append([u,r,i])
p_list.append(temp)
return p_list
#autojit
def p_find(e,Ae,Aho,k,q,p_max,p_list):
bessels_sqr_0 = (jv(0,Ae*k*q/2)*jv(0,Aho*k*q/4)*jv(0,Aho*k*q/4))**2
if bessels_sqr_0 > 0.99:
return 0
else:
for i in range(1,len(p_list)):
bessels_sqr = 0
for triplets in p_list[i]:
n,j,l = triplets[0], triplets[1], triplets[2]
bessels_sqr += (jv(n,Ae*k*q/2)*jv(j,Aho*k*q/4)*jv(l,Aho*k*q/4))**2
if bessels_sqr_0 + bessels_sqr > 0.99:
return i
return p_max
#autojit
def integrand1(x,Dm,O_sqr,G_sqr,Dl,h,Ae,Aho,o_rf,o_ho,k,q):
sum_func = 0
for u in h:
n,j,l = u
sum_func += (O_sqr*(jv(n,Ae*k*q/2)*jv(j,Aho*k*q/4)*jv(l,Aho*k*q/4))**2)/(G_sqr+2*O_sqr*((jv(n,Ae*k*q/2)*jv(j,Aho*k*q/4)*jv(l,Aho*k*q/4))**2)+4*(Dl-o_rf*(n-l+j)-o_ho*(l+j)+Dm*x)**2)
return (x/(np.pi*np.sqrt(1-x**2)))*sum_func
#autojit
def dEdt1(O_sqr,G_sqr,Dl,Dm,hb,h,Ae,Aho,o_rf,o_ho,k,q):
return -hb*np.sqrt(G_sqr)*Dm*quad(integrand1,-1,1,args=(Dm,O_sqr,G_sqr,Dl,h,Ae,Aho,o_rf,o_ho,k,q), epsabs = 1e-35)[0]
#autojit
def integrand2(x,Dm,O_sqr,G_sqr,Dl):
return (x/(np.pi*np.sqrt(1-x**2)))*(O_sqr/(G_sqr+2*O_sqr+4*(Dl+Dm*x)**2))
#autojit
def dEdt2(O_sqr,G_sqr,Dl,Dm,hb):
return -hb*np.sqrt(G_sqr)*Dm*quad(integrand2,-1,1,args=(Dm,O_sqr,G_sqr,Dl), epsabs = 1e-35)[0]
def main():
kb = 1.38064852e-23
hb = 1.054571800e-34
s = 0.5
G_sqr = (2*np.pi*21.5e6)**2
k = 2*np.pi/(422e-9)
O_sqr = s*G_sqr/2
dt = 1e-5
m = 87.9*1.660539040e-27
max_t = 2e-3
tt = np.arange(0,max_t,dt)
Dl = -2*np.pi*10e6
T0 = 1
E_array = np.zeros(len(tt))
E_array[0] = kb*T0
p_max = 5
o_rf = 2*np.pi*26.51e6
a = 0
q = 0.1
o_ho = o_rf/(2*np.sqrt(a+q**2/2))
Ae = 1e-9
p_list = make_p(p_max)
for i in range(len(E_array)-1):
if i%int(len(E_array)/10) == 0:
print(i/(len(E_array)-1)/10)
Aho = np.sqrt(2*E_array[i]/(m*o_ho**2))
p_min = p_find(E_array[i],Ae,Aho,k,q,p_max,p_list)
if p_min > 0:
print(p_min)
for h in p_list[:p_min+1]:
if i < len(E_array):
k1 = dEdt1(O_sqr,G_sqr,Dl,k*np.sqrt(2*E_array[i]/m),hb,h,Ae,Aho,o_rf,o_ho,k,q)
k2 = dEdt1(O_sqr,G_sqr,Dl,k*np.sqrt(2*(E_array[i]+(dt/2)*k1)/m),hb,h,Ae,Aho,o_rf,o_ho,k,q)
k3 = dEdt1(O_sqr,G_sqr,Dl,k*np.sqrt(2*(E_array[i]+(dt/2)*k2)/m),hb,h,Ae,Aho,o_rf,o_ho,k,q)
k4 = dEdt1(O_sqr,G_sqr,Dl,k*np.sqrt(2*(E_array[i]+dt*k3)/m),hb,h,Ae,Aho,o_rf,o_ho,k,q)
E_array[i+1] = E_array[i]+dt/6*(k1+2*k2+2*k3+k4)
else:
if i < len(E_array):
k1 = dEdt2(O_sqr,G_sqr,Dl,k*np.sqrt(2*E_array[i]/m),hb)
k2 = dEdt2(O_sqr,G_sqr,Dl,k*np.sqrt(2*(E_array[i]+(dt/2)*k1)/m),hb)
k3 = dEdt2(O_sqr,G_sqr,Dl,k*np.sqrt(2*(E_array[i]+(dt/2)*k2)/m),hb)
k4 = dEdt2(O_sqr,G_sqr,Dl,k*np.sqrt(2*(E_array[i]+dt*k3)/m),hb)
E_array[i+1] = E_array[i]+dt/6*(k1+2*k2+2*k3+k4)
return E_array, tt, kb
start = time.time()
E_array, tt, kb = main()
end = time.time()
print('runtime: {:.3f}'.format(end-start))
plt.figure()
plt.title('Temperature vs time')
plt.plot(tt*1e3,E_array/kb, linestyle = '', marker = 'o', color='black')
plt.show()
The problem is, when I try to call py.matcode.main(), I get the following error message: Python Error:
main() missing 14 required positional arguments: 'E_array', 'k', 'm',
'O_sqr', 'G_sqr', 'Dl', 'hb', 'kb', 'TD', 'method', 'p_list', 'o_ho',
'Ae', and 'p_max'
Even though Matlab is showing that Python has a problem running my code, the code runs fine in Python.
Moreover, I am able to call py.matcode.make_p(py.int(5)) which gives the correct result.
Can anyone please help trying to find what I'm doing wrong?
Change the bottom of your code to:
if __name__ == "__main__":
start = time.time()
E_array, tt, kb = main()
end = time.time()
print('runtime: {:.3f}'.format(end-start))
plt.figure()
plt.title('Temperature vs time')
plt.plot(tt*1e3,E_array/kb, linestyle = '', marker = 'o', color='black')
plt.show()
Related
I implemented the conjugate gradient method using TensorFlow to invert a sparse matrix.
The matrix I used to test the method is well-conditioned, as it is the sum of a mass matrix and a stiffness matrix obtained with finite elements.
I compared with the same method implemented using scipy and on the same data.
The solutions obtained with either methods are the same, however, TensorFlow is 5 times slower (I tested under colab environment).
Under colab environment, scipy ran in 0.27 s, while TensorFlow required 1.37 s
Why the algorithm is so slow under TensorFlow?
I can not cast to dense matrices, as I want to use the formula with matrices of large size (100k X100k or more).
Thanks,
Cesare
Here is the code I used to test this:
import tensorflow as tf
import numpy as np
from scipy.sparse import coo_matrix,linalg
import os
import sys
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
from time import time
from scipy.spatial import Delaunay
def create_mesh(Lx=1,Ly=1,Nx=100,Ny=100):
mesh0=dict()
dx = Lx/Nx
dy = Ly/Ny
XX,YY=np.meshgrid(np.arange(0,Lx+dx,dx),np.arange(0,Ly+dy,dy))
points=np.vstack((XX.ravel(),YY.ravel())).T
#np.random.shuffle(points)
tri = Delaunay(points)
mesh0['Pts']=np.copy(points).astype(np.float32)
mesh0['Tria']=np.copy(tri.simplices).astype(int)
return(mesh0)
def eval_connectivity(mesh0):
print('computing mesh connectivity')
npt=mesh0['Pts'].shape[0]
connectivity = {}
for jpt in range(npt):
connectivity[jpt] = []
for Tria in mesh0['Tria']:
for ilpt in range(3):
iglobalPt=Tria[ilpt]
for jlpt in range(1+ilpt,3):
jglobalPt=Tria[jlpt]
connectivity[iglobalPt].append(jglobalPt)
connectivity[jglobalPt].append(iglobalPt)
for key,value in connectivity.items():
connectivity[key]=np.unique(np.array(value,dtype=int))
return(connectivity)
def eval_local_mass(mesh0,iTri):
lmass = np.zeros(shape=(3,3),dtype=np.float32)
Tria=mesh0['Tria'][iTri]
v10 = mesh0['Pts'][Tria[1],:]-mesh0['Pts'][Tria[0],:]
v20 = mesh0['Pts'][Tria[2],:]-mesh0['Pts'][Tria[0],:]
N12 = np.cross(v10,v20)
Tsurf = 0.5*np.linalg.norm(N12)
for ipt in range(3):
lmass[ipt,ipt]=1.0/12.0
for jpt in range(1+ipt,3):
lmass[ipt,jpt] = 1.0/24.0
lmass[jpt,ipt] = lmass[ipt,jpt]
lmass = 2.0*Tsurf*lmass
return(lmass)
def eval_local_stiffness(mesh0,iTri):
Tria = mesh0['Tria'][iTri]
v10 = mesh0['Pts'][Tria[1],:]-mesh0['Pts'][Tria[0],:]
v20 = mesh0['Pts'][Tria[2],:]-mesh0['Pts'][Tria[0],:]
N12 = np.cross(v10,v20)
Tsurf = 0.5*np.linalg.norm(N12)
covbT = np.zeros(shape=(3,3),dtype=np.float32)
covbT[0,:2] = v10
covbT[1,:2] = v20
covbT[2,2] = N12/(2*Tsurf)
contrb = np.linalg.inv(covbT)
v1 = contrb[:,0]
v2 = contrb[:,1]
a = np.dot(v1,v1)
b = np.dot(v1,v2)
c = np.dot(v2,v2)
gij_c = np.array([[a,b],[b,c]],dtype=np.float32)
lgrad = np.array([[-1.0,1.0,0.0], [-1.0,0.0,1.0] ],dtype=np.float32)
lstif = Tsurf*np.matmul( np.matmul(lgrad.T,gij_c), lgrad )
return(lstif)
def compute_vectors_sparse_matrices(mesh0):
npt = mesh0['Pts'].shape[0]
connect = eval_connectivity(mesh0)
nzero = 0
for key,value in connect.items():
nzero += (1+value.shape[0])
I = np.zeros(shape=(nzero),dtype=int)
J = np.zeros(shape=(nzero),dtype=int)
VM = np.zeros(shape=(nzero),dtype=np.float32)
VS = np.zeros(shape=(nzero),dtype=np.float32)
k0 = np.zeros(shape=(npt+1),dtype=int)
k0[0] = 0
k = -1
for jpt in range(npt):
loc_con = connect[jpt].tolist()[:]
loc_con.append(jpt)
loc_con = np.sort(loc_con)
k0[jpt+1]=k0[jpt]+loc_con.shape[0]
for jloc in range(loc_con.shape[0]):
k=k+1
I[k]= jpt
J[k]= loc_con[jloc]
for iTr, Tria in enumerate(mesh0['Tria']):
lstiff = eval_local_stiffness(mesh0,iTr)
lmass = eval_local_mass(mesh0,iTr)
for iEntry,irow in enumerate(Tria):
loc_con = connect[irow].tolist()[:]
loc_con.append(irow)
loc_con = np.sort(loc_con)
for jEntry,jcol in enumerate(Tria):
indexEntry = k0[irow]+np.where(loc_con==jcol)[0]
VM[indexEntry] = VM[indexEntry]+lmass[iEntry,jEntry]
VS[indexEntry] = VS[indexEntry]+lstiff[iEntry,jEntry]
return(I,J,VM,VS)
def compute_global_sparse_matrices(mesh0):
I,J,VM,VS = compute_vectors_sparse_matrices(mesh0)
npt = mesh0['Pts'].shape[0]
MASS = coo_matrix((VM,(I,J)),shape=(npt,npt))
STIFF = coo_matrix((VS,(I,J)),shape=(npt,npt))
return(MASS,STIFF)
def compute_global_sparse_tensors(mesh0):
I,J,VM,VS = compute_vectors_sparse_matrices(mesh0)
npt = mesh0['Pts'].shape[0]
indices = np.hstack([I[:,np.newaxis], J[:,np.newaxis]])
MASS = tf.sparse.SparseTensor(indices=indices, values=VM.astype(np.float32), dense_shape=[npt, npt])
STIFF = tf.sparse.SparseTensor(indices=indices, values=VS.astype(np.float32), dense_shape=[npt, npt])
return(MASS,STIFF)
def compute_matrices_scipy(mesh0):
MASS,STIFF = compute_global_sparse_matrices(mesh0)
return(MASS,STIFF)
def compute_matrices_tensorflow(mesh0):
MASS,STIFF = compute_global_sparse_tensors(mesh0)
return(MASS,STIFF)
def conjgrad_scipy(A,b,x0,niter=100,toll=1.e-5):
x = np.copy(x0)
r = b - A * x
p = np.copy(r)
rsold = np.dot(r,r)
for it in range(niter):
Ap = A * p
alpha = rsold /np.dot(p,Ap)
x += alpha * p
r -= alpha * Ap
rsnew = np.dot(r,r)
if (np.sqrt(rsnew) < toll):
break
p = r + (rsnew / rsold) * p
rsold = rsnew
return([x,it,np.sqrt(rsnew)])
def conjgrad_tensorflow(A,b,x0,niter=100,toll=1.e-5):
x = x0
r = b - tf.sparse.sparse_dense_matmul(A,x)
p = r
rsold = tf.reduce_sum(tf.multiply(r, r))
for it in range(niter):
Ap = tf.sparse.sparse_dense_matmul(A,p)
alpha = rsold /tf.reduce_sum(tf.multiply(p, Ap))
x += alpha * p
r -= alpha * Ap
rsnew = tf.reduce_sum(tf.multiply(r, r))
if (tf.sqrt(rsnew) < toll):
break
p = r + (rsnew / rsold) * p
rsold = rsnew
return([x,it,tf.sqrt(rsnew)])
mesh = create_mesh(Lx=10,Ly=10,Nx=100,Ny=100)
x0 = tf.constant( (mesh['Pts'][:,0]<5 ).astype(np.float32) )
nit_time = 10
dcoef = 1.0
maxit = x0.shape[0]//2
stoll = 1.e-6
print('nb of nodes:\t{}'.format(mesh['Pts'].shape[0]))
print('nb of trias:\t{}'.format(mesh['Tria'].shape[0]))
t0 = time()
MASS0,STIFF0 = compute_matrices_scipy(mesh)
elapsed_scipy=time()-t0
print('Matrices; elapsed: {:3.5f} s'.format(elapsed_scipy))
A = MASS0+dcoef*STIFF0
x = np.copy(np.squeeze(x0.numpy()) )
t0 = time()
for jt in range(nit_time):
b = MASS0*x
x1,it,tol=conjgrad_scipy(A,b,x,niter=maxit,toll=stoll)
x=np.copy(x1)
print('time {}; iters {}; resid: {:3.2f}'.format(1+jt,it,tol) )
elapsed_scipy=time()-t0
print('elapsed, scipy: {:3.5f} s'.format(elapsed_scipy))
t0 = time()
MASS,STIFF =compute_matrices_tensorflow(mesh)
elapsed=time()-t0
print('Matrices; elapsed: {:3.5f} s'.format(elapsed))
x = None
x1 = None
A = tf.sparse.add(MASS,tf.sparse.map_values(tf.multiply, STIFF, dcoef))
x = tf.expand_dims(tf.identity(x0),axis=1)
t0 = time()
for jt in range(nit_time):
b = tf.sparse.sparse_dense_matmul(MASS,x)
x1,it,tol=conjgrad_tensorflow(A,b,x,niter=maxit,toll=stoll)
x = x1
print('time {}; iters {}; resid: {:3.2f}'.format(1+jt,it,tol) )
elapsed_tf=time()-t0
print('elapsed, tf: {:3.2f} s'.format(elapsed_tf))
print('elapsed times:')
print('scipy: {:3.2f} s\ttf: {:3.2f} s'.format(elapsed_scipy,elapsed_tf))
I’ve been trying to solve the water hammer PDE’s from the Maple example linked below in python (numpy/scipy). I’m getting very unstable results. Can anyone see my mistake? Guessing something is wrong with the boundary conditions.
https://www.maplesoft.com/support/help/view.aspx?path=applications/WaterHammer
import numpy as np
from scipy.integrate import odeint
import matplotlib.pyplot as plt
## Parameters
Dia = 0.1
V = 14.19058741 # Stead state
p = 1000 # Liquid density
u = 0.001 # Viscosity
L = 25
e = 0.0001 # Roughness
Psource = 0.510E6
thick = 0.001
E= 7010*10**9
K=20010E6
Vsteady= 14.19058741
Ks = 1/((1/K)+(Dia/E*thick))
# Darcy-Weisbach
def Friction(V):
Rey = ((Dia*V*p)/u)
fL = 64/Rey
fT = 1/((1.8*np.log10((6.9/Rey) + (e/(3.7*Dia))**1.11))**2)
if Rey >= 0 and Rey < 2000:
return fL
if Rey >= 2000 and Rey<4000:
return fL + ((fT-fL)*(Rey-2000))/(4000-2000)
if Rey >= 4000:
return fT
return 0
def model(D, t):
V = D[:N]
P = D[N:]
dVdt = np.zeros(N)
for i in range(1, len(dVdt)-1):
dVdt[i] = -(1/p)*((P[i+1]-P[i-1])/2*dx)-((Friction(np.abs(V[i]))*(np.abs(V[i])**2))/(2*Dia))
dPdt = np.zeros(N)
for i in range(1, len(dPdt)-1):
dPdt[i] = -((V[i+1]-V[i-1])/(2*dx))*Ks
if t < 2:
dVdt[29] = 0
else:
dVdt[29] = -1
dPdt[29] = 0
dVdt[0] = dVdt[1]
return np.append(dVdt,dPdt)
N = 30
x = np.linspace(0, L, N)
dx = x[1] - x[0]
## Initial conditions
Vi_0 = np.ones(N)*Vsteady
Pi_0 = np.arange(N)
for i in Pi_0:
Pi_0[i] = Psource - (i*dx/L)*Psource
# initial condition
y0 = np.append(Vi_0, Pi_0)
# time points
t = np.linspace(0,3,10000)
# solve ODE
y = odeint(model,y0,t)
Vr = y[:,0:N]
Pr = y[:,N:]
plt.plot(t,Pr[:,5])
I have an improved kmeans algorithm (KPlusPlus) that builds on the class kmeans. Detk is another class inherited from KPlusPlus.
The objective of the KPlusPlus class is to find out the optimal seeding for finding the kmeans centroids (Source)
Detk calculates the gap statistic to find the optimal number of clusters. I have found this code from here
# kmeans class
class KMeans():
def __init__(self, K, X=None, N=0):
self.K = K
if X == None:
if N == 0:
raise Exception("If no data is provided, \
a parameter N (number of points) is needed")
else:
self.N = N
self.X = self._init_board_gauss(N, K)
else:
self.X = X
self.N = len(X)
self.mu = None
self.clusters = None
self.method = None
def _init_board_gauss(self, N, k):
n = float(N)/k
X = []
for i in range(k):
c = (random.uniform(-1,1), random.uniform(-1,1))
s = random.uniform(0.05,0.15)
x = []
while len(x) < n:
a,b = np.array([np.random.normal(c[0],s),np.random.normal(c[1],s)])
# Continue drawing points from the distribution in the range [-1,1]
if abs(a) and abs(b)<1:
x.append([a,b])
X.extend(x)
X = np.array(X)[:N]
return X
def plot_board(self):
X = self.X
fig = plt.figure(figsize=(5,5))
plt.xlim(-1,1)
plt.ylim(-1,1)
if self.mu and self.clusters:
mu = self.mu
clus = self.clusters
K = self.K
for m, clu in clus.items():
cs = cm.spectral(1.*m/self.K)
plt.plot(mu[m][0], mu[m][1], 'o', marker='*', \
markersize=12, color=cs)
plt.plot(zip(*clus[m])[0], zip(*clus[m])[1], '.', \
markersize=8, color=cs, alpha=0.5)
else:
plt.plot(zip(*X)[0], zip(*X)[1], '.', alpha=0.5)
if self.method == '++':
tit = 'K-means++'
else:
tit = 'K-means with random initialization'
pars = 'N=%s, K=%s' % (str(self.N), str(self.K))
plt.title('\n'.join([pars, tit]), fontsize=16)
plt.savefig('kpp_N%s_K%s.png' % (str(self.N), str(self.K)), \
bbox_inches='tight', dpi=200)
def _cluster_points(self):
mu = self.mu
clusters = {}
for x in self.X:
bestmukey = min([(i[0], np.linalg.norm(x-mu[i[0]])) \
for i in enumerate(mu)], key=lambda t:t[1])[0]
try:
clusters[bestmukey].append(x)
except KeyError:
clusters[bestmukey] = [x]
self.clusters = clusters
def _reevaluate_centers(self):
clusters = self.clusters
newmu = []
keys = sorted(self.clusters.keys())
for k in keys:
newmu.append(np.mean(clusters[k], axis = 0))
self.mu = newmu
def _has_converged(self):
K = len(self.oldmu)
return(set([tuple(a) for a in self.mu]) == \
set([tuple(a) for a in self.oldmu])\
and len(set([tuple(a) for a in self.mu])) == K)
def find_centers(self,K, method='random'):
self.method = method
X = self.X
K = self.K
self.oldmu = random.sample(X, K)
if method != '++':
# Initialize to K random centers
self.mu = random.sample(X, K)
while not self._has_converged():
self.oldmu = self.mu
# Assign all points in X to clusters
self._cluster_points()
# Reevaluate centers
self._reevaluate_centers()
The KPlusPlus class inherits from kmeans to find the optimal seeding
class KPlusPlus(KMeans):
def _dist_from_centers(self):
cent = self.mu
X = self.X
D2 = np.array([min([np.linalg.norm(x-c)**2 for c in cent]) for x in X])
self.D2 = D2
def _choose_next_center(self):
self.probs = self.D2/self.D2.sum()
self.cumprobs = self.probs.cumsum()
r = random.random()
ind = np.where(self.cumprobs >= r)[0][0]
return(self.X[ind])
def init_centers(self,K):
self.K = K
self.mu = random.sample(self.X, 1)
while len(self.mu) < self.K:
self._dist_from_centers()
self.mu.append(self._choose_next_center())
def plot_init_centers(self):
X = self.X
fig = plt.figure(figsize=(5,5))
plt.xlim(-1,1)
plt.ylim(-1,1)
plt.plot(zip(*X)[0], zip(*X)[1], '.', alpha=0.5)
plt.plot(zip(*self.mu)[0], zip(*self.mu)[1], 'ro')
plt.savefig('kpp_init_N%s_K%s.png' % (str(self.N),str(self.K)), \
bbox_inches='tight', dpi=200)
The class Detk inherits from KPlusPlus to find the optmal number of clusters based on gap statistic
class DetK(KPlusPlus):
def fK(self, thisk, Skm1=0):
X = self.X
Nd = len(X[0])
a = lambda k, Nd: 1 - 3/(4*Nd) if k == 2 else a(k-1, Nd) + (1-a(k-1, Nd))/6
self.find_centers(thisk, method='++')
mu, clusters = self.mu, self.clusters
Sk = sum([np.linalg.norm(mu[i]-c)**2 \
for i in range(thisk) for c in clusters[i]])
if thisk == 1:
fs = 1
elif Skm1 == 0:
fs = 1
else:
fs = Sk/(a(thisk,Nd)*Skm1)
return fs, Sk
def _bounding_box(self):
X = self.X
xmin, xmax = min(X,key=lambda a:a[0])[0], max(X,key=lambda a:a[0])[0]
ymin, ymax = min(X,key=lambda a:a[1])[1], max(X,key=lambda a:a[1])[1]
return (xmin,xmax), (ymin,ymax)
def gap(self, thisk):
X = self.X
(xmin,xmax), (ymin,ymax) = self._bounding_box()
self.init_centers(thisk)
self.find_centers(thisk, method='++')
mu, clusters = self.mu, self.clusters
Wk = np.log(sum([np.linalg.norm(mu[i]-c)**2/(2*len(c)) \
for i in range(thisk) for c in clusters[i]]))
# Create B reference datasets
B = 10
BWkbs = zeros(B)
for i in range(B):
Xb = []
for n in range(len(X)):
Xb.append([random.uniform(xmin,xmax), \
random.uniform(ymin,ymax)])
Xb = np.array(Xb)
kb = DetK(thisk, X=Xb)
kb.init_centers(thisk)
kb.find_centers(thisk, method='++')
ms, cs = kb.mu, kb.clusters
BWkbs[i] = np.log(sum([np.linalg.norm(ms[j]-c)**2/(2*len(c)) \
for j in range(thisk) for c in cs[j]]))
Wkb = sum(BWkbs)/B
sk = np.sqrt(sum((BWkbs-Wkb)**2)/float(B))*np.sqrt(1+1/B)
return Wk, Wkb, sk
def run(self, maxk, which='both'):
ks = range(1,maxk)
fs = zeros(len(ks))
Wks,Wkbs,sks = zeros(len(ks)+1),zeros(len(ks)+1),zeros(len(ks)+1)
# Special case K=1
self.init_centers(1)
if which == 'f':
fs[0], Sk = self.fK(1)
elif which == 'gap':
Wks[0], Wkbs[0], sks[0] = self.gap(1)
else:
fs[0], Sk = self.fK(1)
Wks[0], Wkbs[0], sks[0] = self.gap(1)
# Rest of Ks
for k in ks[1:]:
self.init_centers(k)
if which == 'f':
fs[k-1], Sk = self.fK(k, Skm1=Sk)
elif which == 'gap':
Wks[k-1], Wkbs[k-1], sks[k-1] = self.gap(k)
else:
fs[k-1], Sk = self.fK(k, Skm1=Sk)
Wks[k-1], Wkbs[k-1], sks[k-1] = self.gap(k)
if which == 'f':
self.fs = fs
elif which == 'gap':
G = []
for i in range(len(ks)):
G.append((Wkbs-Wks)[i] - ((Wkbs-Wks)[i+1]-sks[i+1]))
self.G = np.array(G)
else:
self.fs = fs
G = []
for i in range(len(ks)):
G.append((Wkbs-Wks)[i] - ((Wkbs-Wks)[i+1]-sks[i+1]))
self.G = np.array(G)
When I try to run the following program on a given number of points (locArray)
locArray = np.array(locArrayMaster[counter])
kmeanscluster = DetK(2, X = locArray)
kmeanscluster.run(5)
noClusters[counter] = np.where(kmeanscluster.fs == min(kmeanscluster.fs))[0][0]+ 1
it returns me the following error
File "C:\Users\Anaconda2\lib\site-packages\spyderlib\widgets\externalshell\sitecustomize.py", line 714, in runfile
execfile(filename, namespace)
File "C:\Users\Anaconda2\lib\site-packages\spyderlib\widgets\externalshell\sitecustomize.py", line 74, in execfile
exec(compile(scripttext, filename, 'exec'), glob, loc)
File "C:/Users/Documents/SUMOTraffic/kplusplus.py", line 355, in <module>
kmeanscluster.run(5)
File "C:/Users/Documents/SUMOTraffic/kplusplus.py", line 217, in run
Wks[0], Wkbs[0], sks[0] = self.gap(1)
File "C:/Users/Documents/SUMOTraffic/kplusplus.py", line 200, in gap
for j in range(thisk) for c in cs[j]]))
TypeError: 'NoneType' object has no attribute '__getitem__'
Thanks for any help.
The error is due to the failure of the kmeans algorithm to find the cluster centres when the number of clusters is just 1. Hence the cluster dictionary is not created for this case. So, added an extra line of code in the class DetK which checks if the type of cluster dictionary is 'NoneType' and if it returns TRUE, recalculates the cluster centres again.
class DetK(KPlusPlus):
def fK(self, thisk, Skm1=0):
X = self.X
Nd = len(X[0])
a = lambda k, Nd: 1 - 3/(4*Nd) if k == 2 else a(k-1, Nd) + (1-a(k-1, Nd))/6
self.find_centers(thisk, method='++')
while type(self.clusters) is not dict:
self.find_centers(thisk, method = '++')
mu, clusters = self.mu, self.clusters
Sk = sum([np.linalg.norm(mu[i]-c)**2 \
for i in range(thisk) for c in clusters[i]])
if thisk == 1:
fs = 1
elif Skm1 == 0:
fs = 1
else:
fs = Sk/(a(thisk,Nd)*Skm1)
return fs, Sk
I have been using matplotlib from python to show the animation of 1D wave equation.But I got a problem of making the animation.I want the image of the wave to change with time.It means that I may need a loop to form many different pictures of the wave equation.But it seems that the time cannot be put into the wave functions ,so the images do not change at all.Please help me with the mistake that I made.
Here are the codes that I wrote:(Part of the codes comes from the book "Python Scripting for Computational Science")
from numpy import zeros,linspace,sin,pi
import matplotlib.pyplot as mpl
def I(x):
return sin(2*x*pi/L)
def f(x,t):
return sin(x*t)
def solver0(I,f,c,L,n,dt,tstop):
# f is a function of x and t, I is a function of x
x = linspace(0,L,n+1)
dx = L/float(n)
if dt <= 0:
dt = dx/float(c)
C2 = (c*dt/dx)**2
dt2 = dt*dt
up = zeros(n+1)
u = up.copy()
um = up.copy()
t = 0.0
for i in range(0,n):
u[i] = I(x[i])
for i in range(1,n-1):
um[i] = u[i]+0.5*C2*(u[i-1] - 2*u[i] + u[i+1]) + dt2*f(x[i],t)
um[0] = 0
um[n] = 0
while t <= tstop:
t_old = t
t += dt
#update all inner points:
for i in range(1,n-1):
up[i] = -um[i] + 2*u[i] + C2*(u[i-1] - 2*u[i] + u[i+1]) + dt2*f(x[i],t_old)
#insert boundary conditions:
up[0] = 0
up[n] = 0
#update data structures for next step
um = u.copy()
u = up.copy()
return u
c = 3.0 #given by myself
L = 10
n = 100
dt = 0
tstart = 0
tstop = 6
x = linspace(0,L,n+1)
t_values = linspace(tstart,tstop,31)
mpl.ion()
y = solver0(I, f, c, L, n, dt, tstop)
lines = mpl.plot(x,y)
mpl.axis([x[0], x[-1], -1.0, 1.0])
mpl.xlabel('x')
mpl.ylabel('y')
counter = 0
for t in t_values:
y = solver0(I,f,c,L,n,dt,tstop)
lines[0].set_ydata(y)
mpl.draw()
mpl.legend(['t=%4.1f' % t])
mpl.savefig('sea_%04d.png' %counter)
counter += 1
Maybe that's what you need?
y = solver0(I,f,c,L,n,dt,t)
This question is a follow-up to my previous question here: Assistance, tips and guidelines for converting Matlab code to Python
I have converted the Matlab code manually. I am using a MAC OS and running Python from the terminal. But how do I run the code below, for some value of N, where N is an even number? I should get a graph (specified by the plot code).
When I run it as is, I get nothing.
My code is below:
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import odeint
def Array(N):
K00 = np.logspace(0,3,101,10)
len1 = len(K00)
y0 = [0]*(3*N/2+3)
S = [np.zeros((len1,1)) for kkkk in range(N/2+1)]
KS = [np.zeros((len1,1)) for kkkk in range(N/2)]
PS = [np.zeros((len1,1)) for kkkk in range(N/2)]
Splot = [np.zeros((len1,1)) for kkkk in range(N/2+1)]
KSplot = [np.zeros((len1,1)) for kkkk in range(N/2)]
PSplot = [np.zeros((len1,1)) for kkkk in range(N/2)]
Kplot = np.zeros((len1,1))
Pplot = np.zeros((len1,1))
for series in range(0,len1):
K0 = K00[series]
Q = 10
r1 = 0.0001
r2 = 0.001
d = 0.001
a = 0.001
k = 0.999
P0 = 1
S10 = 1e5
tf = 1e10
time = np.linspace(0,tf,len1)
y0[0] = S10
y0[3*N/2+1] = K0
y0[3*N/2+2] = P0
for i in range(1,3*N/2+1):
y0[i] = 0
[t,y] = odeint(EqnsArray,y0,time, mxstep = 5000)
for alpha in range(0,(N/2+1)):
S[alpha] = y[:,alpha]
for beta in range((N/2)+1,N+1):
KS[beta-N/2-1] = y[:,beta]
for gamma in range(N+1,3*N/2+1):
PS[gamma-N-1] = y[:,gamma]
for alpha in range(0,(N/2+1)):
Splot[alpha][series] = y[len1-1,alpha]
for beta in range((N/2)+1,N+1):
KSplot[beta-N/2-1][series] = y[len1-1,beta]
for gamma in range(N+1,3*N/2+1):
PSplot[gamma-N-1][series] = y[len1-1,gamma]
for alpha in range(0,(N/2+1)):
u1 = u1 + Splot[alpha]
for beta in range((N/2)+1,N+1):
u2 = u2 + KSplot[beta-N/2-1]
for gamma in range(N+1,3*N/2+1):
u3 = u3 + PSplot[gamma-N-1]
K = soln[:,3*N/2+1]
P = soln[:,3*N/2+2]
Kplot[series] = soln[len1-1,3*N/2+1]
Pplot[series] = soln[len1-1,3*N/2+2]
utot = u1+u2+u3
#Plot
plt.plot(np.log10(K00),utot)
plt.show()
def EqnsArray(y,t):
for alpha in range(0,(N/2+1)):
S[alpha] = y[alpha]
for beta in range((N/2)+1,N+1):
KS[beta-N/2-1] = y[beta]
for gamma in range(N+1,3*N/2+1):
PS[gamma-N-1] = y[gamma]
K = y[3*N/2+1]
P = y[3*N/2+2]
# The model equations
ydot = np.zeros((3*N/2+3,1))
B = range((N/2)+1,N+1)
G = range(N+1,3*N/2+1)
runsumPS = 0
runsum1 = 0
runsumKS = 0
runsum2 = 0
for m in range(0,N/2):
runsumPS = runsumPS + PS[m]
runsum1 = runsum1 + S[m+1]
runsumKS = runsumKS + KS[m]
runsum2 = runsum2 + S[m]
ydot[B[m]] = a*K*S[m]-(d+k+r1)*KS[m]
for i in range(0,N/2-1):
ydot[G[i]] = a*P*S[i+1]-(d+k+r1)*PS[i]
for p in range(1,N/2):
ydot[p] = -S[p]*(r1+a*K+a*P)+k*KS[p-1]+d*(PS[p-1]+KS[p])
ydot[0] = Q-(r1+a*K)*S[0]+d*KS[0]+k*runsumPS
ydot[N/2] = k*KS[N/2-1]-(r2+a*P)*S[N/2]+d*PS[N/2-1]
ydot[G[N/2-1]] = a*P*S[N/2]-(d+k+r2)*PS[N/2-1]
ydot[3*N/2+1] = (d+k+r1)*runsumKS-a*K*runsum2
ydot[3*N/2+2] = (d+k+r1)*(runsumPS-PS[N/2-1])- \
a*P*runsum1+(d+k+r2)*PS[N/2-1]
ydot_new = []
for j in range(0,3*N/2+3):
ydot_new.extend(ydot[j])
return ydot_new
You have to call your function, like:
Array(12)
You have to add this at the end of your code.