Python using givens rotation for QR decomposition - python

I need help defining a function to compute the QR decomposition of a matrix using rotators and a conditional to check if a number is nearly zero before applying a rotator (tolerance of 1e-15)
import numpy as np
A = np.array(([(1,-2,3),(0,-1,4),(0,2,5)]))
def QRrot(A):
m,n = A.shape
indexI = np.zeros(m,n)
indexJ = np.zeros(m,n)
C = np.zeros(m,n)
S = np.zeros(m,n)
for i in range(1, n):
for j in range(i+1, m):
c = A(i,i)/((A(i,i))**2 + (A(j,i)**2))**0.5;
s = A(j,i)/((A(i,i))**2 + (A(j,i)**2))**0.5;
A[i,:] = c*A[i,:] + s*A[j,:];
A[j,:] = -s*A[i,:] + c*A[j,:];
indexI[j,i] = i;
indexJ[j,i] = j;
C[j,i] = c;
S[j,i] = s;
R = A
Q = np.eye(m)
Q[:,i] = c*Q[:,i] + s*Q[:,j];
Q[:,j] = -s*Q[:,i] + c*Q[:,j];
return(Q,R)

Related

Conjugate gradient with tensorflow and sparse tensor runs slower than scipy with sparse matrices

I implemented the conjugate gradient method using TensorFlow to invert a sparse matrix.
The matrix I used to test the method is well-conditioned, as it is the sum of a mass matrix and a stiffness matrix obtained with finite elements.
I compared with the same method implemented using scipy and on the same data.
The solutions obtained with either methods are the same, however, TensorFlow is 5 times slower (I tested under colab environment).
Under colab environment, scipy ran in 0.27 s, while TensorFlow required 1.37 s
Why the algorithm is so slow under TensorFlow?
I can not cast to dense matrices, as I want to use the formula with matrices of large size (100k X100k or more).
Thanks,
Cesare
Here is the code I used to test this:
import tensorflow as tf
import numpy as np
from scipy.sparse import coo_matrix,linalg
import os
import sys
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
from time import time
from scipy.spatial import Delaunay
def create_mesh(Lx=1,Ly=1,Nx=100,Ny=100):
mesh0=dict()
dx = Lx/Nx
dy = Ly/Ny
XX,YY=np.meshgrid(np.arange(0,Lx+dx,dx),np.arange(0,Ly+dy,dy))
points=np.vstack((XX.ravel(),YY.ravel())).T
#np.random.shuffle(points)
tri = Delaunay(points)
mesh0['Pts']=np.copy(points).astype(np.float32)
mesh0['Tria']=np.copy(tri.simplices).astype(int)
return(mesh0)
def eval_connectivity(mesh0):
print('computing mesh connectivity')
npt=mesh0['Pts'].shape[0]
connectivity = {}
for jpt in range(npt):
connectivity[jpt] = []
for Tria in mesh0['Tria']:
for ilpt in range(3):
iglobalPt=Tria[ilpt]
for jlpt in range(1+ilpt,3):
jglobalPt=Tria[jlpt]
connectivity[iglobalPt].append(jglobalPt)
connectivity[jglobalPt].append(iglobalPt)
for key,value in connectivity.items():
connectivity[key]=np.unique(np.array(value,dtype=int))
return(connectivity)
def eval_local_mass(mesh0,iTri):
lmass = np.zeros(shape=(3,3),dtype=np.float32)
Tria=mesh0['Tria'][iTri]
v10 = mesh0['Pts'][Tria[1],:]-mesh0['Pts'][Tria[0],:]
v20 = mesh0['Pts'][Tria[2],:]-mesh0['Pts'][Tria[0],:]
N12 = np.cross(v10,v20)
Tsurf = 0.5*np.linalg.norm(N12)
for ipt in range(3):
lmass[ipt,ipt]=1.0/12.0
for jpt in range(1+ipt,3):
lmass[ipt,jpt] = 1.0/24.0
lmass[jpt,ipt] = lmass[ipt,jpt]
lmass = 2.0*Tsurf*lmass
return(lmass)
def eval_local_stiffness(mesh0,iTri):
Tria = mesh0['Tria'][iTri]
v10 = mesh0['Pts'][Tria[1],:]-mesh0['Pts'][Tria[0],:]
v20 = mesh0['Pts'][Tria[2],:]-mesh0['Pts'][Tria[0],:]
N12 = np.cross(v10,v20)
Tsurf = 0.5*np.linalg.norm(N12)
covbT = np.zeros(shape=(3,3),dtype=np.float32)
covbT[0,:2] = v10
covbT[1,:2] = v20
covbT[2,2] = N12/(2*Tsurf)
contrb = np.linalg.inv(covbT)
v1 = contrb[:,0]
v2 = contrb[:,1]
a = np.dot(v1,v1)
b = np.dot(v1,v2)
c = np.dot(v2,v2)
gij_c = np.array([[a,b],[b,c]],dtype=np.float32)
lgrad = np.array([[-1.0,1.0,0.0], [-1.0,0.0,1.0] ],dtype=np.float32)
lstif = Tsurf*np.matmul( np.matmul(lgrad.T,gij_c), lgrad )
return(lstif)
def compute_vectors_sparse_matrices(mesh0):
npt = mesh0['Pts'].shape[0]
connect = eval_connectivity(mesh0)
nzero = 0
for key,value in connect.items():
nzero += (1+value.shape[0])
I = np.zeros(shape=(nzero),dtype=int)
J = np.zeros(shape=(nzero),dtype=int)
VM = np.zeros(shape=(nzero),dtype=np.float32)
VS = np.zeros(shape=(nzero),dtype=np.float32)
k0 = np.zeros(shape=(npt+1),dtype=int)
k0[0] = 0
k = -1
for jpt in range(npt):
loc_con = connect[jpt].tolist()[:]
loc_con.append(jpt)
loc_con = np.sort(loc_con)
k0[jpt+1]=k0[jpt]+loc_con.shape[0]
for jloc in range(loc_con.shape[0]):
k=k+1
I[k]= jpt
J[k]= loc_con[jloc]
for iTr, Tria in enumerate(mesh0['Tria']):
lstiff = eval_local_stiffness(mesh0,iTr)
lmass = eval_local_mass(mesh0,iTr)
for iEntry,irow in enumerate(Tria):
loc_con = connect[irow].tolist()[:]
loc_con.append(irow)
loc_con = np.sort(loc_con)
for jEntry,jcol in enumerate(Tria):
indexEntry = k0[irow]+np.where(loc_con==jcol)[0]
VM[indexEntry] = VM[indexEntry]+lmass[iEntry,jEntry]
VS[indexEntry] = VS[indexEntry]+lstiff[iEntry,jEntry]
return(I,J,VM,VS)
def compute_global_sparse_matrices(mesh0):
I,J,VM,VS = compute_vectors_sparse_matrices(mesh0)
npt = mesh0['Pts'].shape[0]
MASS = coo_matrix((VM,(I,J)),shape=(npt,npt))
STIFF = coo_matrix((VS,(I,J)),shape=(npt,npt))
return(MASS,STIFF)
def compute_global_sparse_tensors(mesh0):
I,J,VM,VS = compute_vectors_sparse_matrices(mesh0)
npt = mesh0['Pts'].shape[0]
indices = np.hstack([I[:,np.newaxis], J[:,np.newaxis]])
MASS = tf.sparse.SparseTensor(indices=indices, values=VM.astype(np.float32), dense_shape=[npt, npt])
STIFF = tf.sparse.SparseTensor(indices=indices, values=VS.astype(np.float32), dense_shape=[npt, npt])
return(MASS,STIFF)
def compute_matrices_scipy(mesh0):
MASS,STIFF = compute_global_sparse_matrices(mesh0)
return(MASS,STIFF)
def compute_matrices_tensorflow(mesh0):
MASS,STIFF = compute_global_sparse_tensors(mesh0)
return(MASS,STIFF)
def conjgrad_scipy(A,b,x0,niter=100,toll=1.e-5):
x = np.copy(x0)
r = b - A * x
p = np.copy(r)
rsold = np.dot(r,r)
for it in range(niter):
Ap = A * p
alpha = rsold /np.dot(p,Ap)
x += alpha * p
r -= alpha * Ap
rsnew = np.dot(r,r)
if (np.sqrt(rsnew) < toll):
break
p = r + (rsnew / rsold) * p
rsold = rsnew
return([x,it,np.sqrt(rsnew)])
def conjgrad_tensorflow(A,b,x0,niter=100,toll=1.e-5):
x = x0
r = b - tf.sparse.sparse_dense_matmul(A,x)
p = r
rsold = tf.reduce_sum(tf.multiply(r, r))
for it in range(niter):
Ap = tf.sparse.sparse_dense_matmul(A,p)
alpha = rsold /tf.reduce_sum(tf.multiply(p, Ap))
x += alpha * p
r -= alpha * Ap
rsnew = tf.reduce_sum(tf.multiply(r, r))
if (tf.sqrt(rsnew) < toll):
break
p = r + (rsnew / rsold) * p
rsold = rsnew
return([x,it,tf.sqrt(rsnew)])
mesh = create_mesh(Lx=10,Ly=10,Nx=100,Ny=100)
x0 = tf.constant( (mesh['Pts'][:,0]<5 ).astype(np.float32) )
nit_time = 10
dcoef = 1.0
maxit = x0.shape[0]//2
stoll = 1.e-6
print('nb of nodes:\t{}'.format(mesh['Pts'].shape[0]))
print('nb of trias:\t{}'.format(mesh['Tria'].shape[0]))
t0 = time()
MASS0,STIFF0 = compute_matrices_scipy(mesh)
elapsed_scipy=time()-t0
print('Matrices; elapsed: {:3.5f} s'.format(elapsed_scipy))
A = MASS0+dcoef*STIFF0
x = np.copy(np.squeeze(x0.numpy()) )
t0 = time()
for jt in range(nit_time):
b = MASS0*x
x1,it,tol=conjgrad_scipy(A,b,x,niter=maxit,toll=stoll)
x=np.copy(x1)
print('time {}; iters {}; resid: {:3.2f}'.format(1+jt,it,tol) )
elapsed_scipy=time()-t0
print('elapsed, scipy: {:3.5f} s'.format(elapsed_scipy))
t0 = time()
MASS,STIFF =compute_matrices_tensorflow(mesh)
elapsed=time()-t0
print('Matrices; elapsed: {:3.5f} s'.format(elapsed))
x = None
x1 = None
A = tf.sparse.add(MASS,tf.sparse.map_values(tf.multiply, STIFF, dcoef))
x = tf.expand_dims(tf.identity(x0),axis=1)
t0 = time()
for jt in range(nit_time):
b = tf.sparse.sparse_dense_matmul(MASS,x)
x1,it,tol=conjgrad_tensorflow(A,b,x,niter=maxit,toll=stoll)
x = x1
print('time {}; iters {}; resid: {:3.2f}'.format(1+jt,it,tol) )
elapsed_tf=time()-t0
print('elapsed, tf: {:3.2f} s'.format(elapsed_tf))
print('elapsed times:')
print('scipy: {:3.2f} s\ttf: {:3.2f} s'.format(elapsed_scipy,elapsed_tf))

SciPy Optimise minimise error - challenge to solve

How do I solve this error?
TypeError: NumPy boolean subtract, the `-` operator, is not supported, use the bitwise_xor, the `^` operator, or the logical_xor function instead.
I have programmed an optimizing program that must minimize the cost of a wall design. The wall is based on 3 parameters, x, k and m. There are constraints to the sizes of x, k and m as shown. Another constraint is that z (or deflection) must be kept under 100mm. The equation for deflection changes based on a certain t (or time) at which the blast wall is experiencing the blast. If t is below a certain time value which is calculated dependent on, x, k and m the equation is as shown. If t is above the same certain time value, the equation for z changes.
Here is the programming... Please help many thanks :)
import numpy as np
from numpy import linspace
from math import cos
from math import sin
from scipy.optimize import minimize
#Function for minimising
def calcCost(c):
k = c[0]
m = c[1]
x = c[2]
Cost = (900 + 825*k**2 - 1725) + (10*m - 200) + ((2400*x**2)/4)
return Cost
#Objective function
def objective(c):
return calcCost(c)
#Defining Variables
def calck(c):
k = c[0]
k=k
k.resize(12,)
return k
def calcm(c):
m = c[1]
m=m
m.resize(12,)
return m
def calcx(c):
x = c[2]
x=x
x.resize(12,)
return x
def calcz(c):
k = c[0]
x = c[1]
m = c[2]
l = linspace(0,140,141)
for t in l:
if t <= ((20 - 0.12*x**2 + 4.2*x)/1000):
deflection = ((((1000+9*x**2-183*x)*1000)/k)*(1-cos(t*((k/m)**0.5))) + (((1000+9*x**2-183*x)*1000)/k*((20 - 0.12*x**2 + 4.2*x)/1000))*((sin(t*((k/m)**0.5))/((k/m)**0.5))-t))*1000
else:
deflection = ((((1000+9*x**2-183*x)*1000)/(k*((k/m)**0.5)*((20 - 0.12*x**2 + 4.2*x)/1000)))*(sin(((k/m)**0.5)*t))-(sin(((k/m)**0.5)*(t-((20 - 0.12*x**2 + 4.2*x)/1000))))-(((1000+9*x**2-183*x)*1000)/k)*cos(((k/m)**0.5)*t))*1000
deflection.resize(12,)
return deflection
#Constraint functions
def kconstraint1(c):
k = c[0]
return k-(1*10**6) >= 0
def kconstraint2(c):
k = c[0]
return k-(7*10**6) <= 0
def mconstraint1(c):
m = c[0]
return m-200 >= 0
def mconstraint2(c):
m = c[0]
return m-1200 <= 0
def xconstraint1(c):
x = c[0]
return x >= 0
def xconstraint2(c):
x = c[0]
return x <= 10
def zconstraint1(c):
k = c[0]
x = c[1]
m = c[2]
l = linspace(0,140,141)
for t in l:
if t <= ((20 - 0.12*x**2 + 4.2*x)/1000):
deflection = ((((1000+9*x**2-183*x)*1000)/k)*(1-cos(t*((k/m)**0.5))) + (((1000+9*x**2-183*x)*1000)/k*((20 - 0.12*x**2 + 4.2*x)/1000))*((sin(t*((k/m)**0.5))/((k/m)**0.5))-t))*1000
else:
deflection = ((((1000+9*x**2-183*x)*1000)/(k*((k/m)**0.5)*((20 - 0.12*x**2 + 4.2*x)/1000)))*(sin(((k/m)**0.5)*t))-(sin(((k/m)**0.5)*(t-((20 - 0.12*x**2 + 4.2*x)/1000))))-(((1000+9*x**2-183*x)*1000)/k)*cos(((k/m)**0.5)*t))*1000
return deflection <= 99.99999999
b = (0.5,1)
be = (0.5,10)
bb = (0.1,2.0)
bnds = (b,be,bb,bb)
con1 = ({'type':'ineq','fun':kconstraint1})
con2 = ({'type':'ineq','fun':kconstraint2})
con3 = ({'type':'ineq','fun':mconstraint1})
con4 = ({'type':'ineq','fun':mconstraint2})
con5 = ({'type':'ineq','fun':xconstraint1})
con6 = ({'type':'ineq','fun':xconstraint2})
con7 = ({'type':'ineq','fun':zconstraint1})
cons = [con1,con2,con3,con4,con5,con6,con7]
xGUESS = 5
kGUESS = 3*10**6
mGUESS = 700
zGUESS = 90
x0 = np.array([xGUESS,kGUESS,mGUESS,zGUESS])
sol = minimize(objective,x0,method='SLSQP',bounds=bnds,constraints=cons,options={'disp':True})
xOpt = sol.x
CostOPT = sol.fun
kOPT = calck(xOpt)
xOPT = calcx(xOpt)
mOPT = calcm(xOpt)
zOPT = calcz(xOpt)
print(str(CostOPT))
print(str(calcx))
print(str(calcm))
print(str(calck))
print(str(calcz))

The problem with rewriting the code from Matlab to python

I am new to Python and I am trying to convert and modify the following code from Matlab to python:
function A = alg_a(RGBimage, n)
[row, column, d] = size(RGBimage);
if (d==3)
HSVimage = rgb2hsv(RGBimage);
V = HSVimage(:,:,3);
else
V =double(RGBimage)/255;
end
V=V(:);
[Vsorted, ix] = sort(V);
s = (row*column)/n;
i=0;
h=[];
while (i < n)
i=i+1;
z = Vsorted(((floor(s*(i-1))+1)):floor(s*i));
Vstart = (s*(i-1))/(row*column);
Vstop = (s*i)/(row*column);
r=z-z(1);
f = (1/n)/(r(size(r,1)));
g = r*f;
if(isnan(g(1)))
g = r + Vstop;
else
g = g + Vstart;
end
h=vertcat(h,g);
end
m(ix)=h;
m=m(:);
if(d==3)
HSVimage(:,:,3) = reshape(m,row,column);
A=hsv2rgb(HSVimage);
else
A=reshape(m,row,column);
end
return;
end
This function implements the SMQT image preprocessing algorithm, and my problem is that I don't really understand how it works, which is why I'm stuck here:
import cv2
import numpy as np
import math
img = cv2.imread("16.bmp")
n = 8
(row, column, dim) = img.shape
if dim == 3:
hsv_img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
V = hsv_img[:, :, 2]
else:
V = img / 255
V = V.flatten(order='F')
Ix = V.argsort()[::-1]
V_sorted = V[Ix]
s = int(row*column/n)
i = 0
h = []
while i < n:
i += 1
z = V_sorted[math.floor(s*(i-1))+1:math.floor(s*i)]
z = np.array(z)
V_start = (s*(i-1))/(row*column)
V_stop = (s * i) / (row * column)
r = z - z[0]
f = (1 / n) / r[len(r)-1]
g = r * f
if math.isnan(g[0]):
g = r + V_stop
else:
g = g + V_start
if len(h) == 0:
h = g
else:
h = np.vstack((h, g))
M = np.array([])
M[Ix] = h
M = M.flatten(order='F')
if dim == 3:
hsv_img = np.reshape(M, row, column)
img_res = cv2.cvtColor(img, cv2.COLOR_HSV2BGR)
else:
img_res = np.reshape(M, row, column)
cv2.imwrite('16_smqt', img_res)
It seems that I wrote a code similar in functionality, but it does not work, and I haven't any idea why. The Matlab code was taken by me from the article, so I have no doubt that it works. Please help me find inaccuracies in my translation from Matlab to python.

Handwriting neural network weights don't change

from struct import unpack
import gzip
import numpy
from numpy import *
import matplotlib.pyplot as plt
learningRate = 0.1
def get_labeled_data(imagefile, labelfile):
"""Read input-vector (image) and target class (label, 0-9) and return
it as list of tuples.
"""
# Open the images with gzip in read binary mode
images = gzip.open(imagefile, 'rb')
labels = gzip.open(labelfile, 'rb')
# Read the binary data
# We have to get big endian unsigned int. So we need '>I'
# Get metadata for images
images.read(4) # skip the magic_number
number_of_images = images.read(4)
number_of_images = unpack('>I', number_of_images)[0]
rows = images.read(4)
rows = unpack('>I', rows)[0]
cols = images.read(4)
cols = unpack('>I', cols)[0]
# Get metadata for labels
labels.read(4) # skip the magic_number
N = labels.read(4)
N = unpack('>I', N)[0]
if number_of_images != N:
raise Exception('number of labels did not match the number of images')
# Get the data
x = zeros((N, rows, cols), dtype="float32") # Initialize numpy array
y = zeros((N, 1), dtype="uint8") # Initialize numpy array
for i in range(N):
if i % 1000 == 0:
print("i: %i" % i)
for row in range(rows):
for col in range(cols):
tmp_pixel = images.read(1) # Just a single byte
tmp_pixel = unpack('>B', tmp_pixel)[0]
x[i][row][col] = tmp_pixel
tmp_label = labels.read(1)
y[i] = unpack('>B', tmp_label)[0]
return (x, y)
ld = get_labeled_data("C:/Users/XBGFD/Desktop/Programming/NeuralNetworks/HRR/train-images-idx3-ubyte.gz", "C:/Users/XBGFD/Desktop/Programming/NeuralNetworks/HRR/train-labels-idx1-ubyte.gz")
def sigmoid(x):
return 1/(1+numpy.exp(-x))
def sigmoid_P(x):
return sigmoid(x) * (1 - sigmoid(x))
def cost(i, t):
return (i - t) ** 2
def cost_P(i, t):
return 2 * (i - t)
# 10x28x28 - number x row x column
weights = numpy.random.random((10, 28, 28))
biases = numpy.random.random((10, 28, 28))
dr = 0
da = 0
for loopi in range(10000):
r = numpy.random.randint(0, len(ld[0][0]))
targets = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
image = ld[0][r]
label = ld[1][r][0]
# weighted 3D Matrix of [number][row][column]
predictions = []
wPredictions = []
# average of predictions for each number
avgPred = []
avgPred2 = []
img = list(image)
for i in range(10):
x = []
y = []
for b, w in zip(biases[i], weights[i]):
x.append(sigmoid(numpy.dot(w, image) + b))
y.append(numpy.dot(w, image) + b)
predictions.append(x)
avgPred.append(numpy.average(list(x)))
avgPred2.append(numpy.average(list(y)))
for i in range(10):
sqError = cost(avgPred[i], targets[i])
# derivative of the cost with respect to each of the weights and biases
dc_dp = cost_P(avgPred[i], targets[i])
dp_dz = sigmoid_P(avgPred2[i])
#for b, w in zip(biases[i], weights[i]):
for imgRow in range(28):
for imgCol in range(28):
dz_dw = image[imgRow][imgCol]
dz_db = 1
print("dc_dp: " + str(dc_dp) + "\ndp_dz: "+ str(dp_dz) + "\ndz_dw: " + str(dz_dw))
dc_dw = dc_dp * dp_dz * dz_dw
dc_db = dc_dp * dp_dz * dz_db
dr = dc_dw
weights[i][imgRow][imgCol] -= learningRate * dc_dw
da = weights[i][imgRow][imgCol]
biases[i][imgRow][imgCol] -= learningRate * dc_db
while True:
big = 0
intid = int(input())
imag = ld[0][intid]
for l in range(10):
papa = []
for b, w in zip(biases[i], weights[i]):
papa.append(sigmoid(numpy.dot(w, imag) + b))
lol = numpy.average(papa)
if(lol > big):
big = l
print(str(dr) + " " + str(da))
print(big)
The weights aren't changing because dp_dz is always 0, I'm not sure what's causing that. I don't mean that they're changing but only a very small change, they're literally NOT changing at all. I believe it has to do with my approach in general, but I'm not sure how else I could approach this problem, I'm very new to neural networks. Any help would be greatly appreciated!

Using matplotlib.pyplot to make the animation of the 1D wave equation

I have been using matplotlib from python to show the animation of 1D wave equation.But I got a problem of making the animation.I want the image of the wave to change with time.It means that I may need a loop to form many different pictures of the wave equation.But it seems that the time cannot be put into the wave functions ,so the images do not change at all.Please help me with the mistake that I made.
Here are the codes that I wrote:(Part of the codes comes from the book "Python Scripting for Computational Science")
from numpy import zeros,linspace,sin,pi
import matplotlib.pyplot as mpl
def I(x):
return sin(2*x*pi/L)
def f(x,t):
return sin(x*t)
def solver0(I,f,c,L,n,dt,tstop):
# f is a function of x and t, I is a function of x
x = linspace(0,L,n+1)
dx = L/float(n)
if dt <= 0:
dt = dx/float(c)
C2 = (c*dt/dx)**2
dt2 = dt*dt
up = zeros(n+1)
u = up.copy()
um = up.copy()
t = 0.0
for i in range(0,n):
u[i] = I(x[i])
for i in range(1,n-1):
um[i] = u[i]+0.5*C2*(u[i-1] - 2*u[i] + u[i+1]) + dt2*f(x[i],t)
um[0] = 0
um[n] = 0
while t <= tstop:
t_old = t
t += dt
#update all inner points:
for i in range(1,n-1):
up[i] = -um[i] + 2*u[i] + C2*(u[i-1] - 2*u[i] + u[i+1]) + dt2*f(x[i],t_old)
#insert boundary conditions:
up[0] = 0
up[n] = 0
#update data structures for next step
um = u.copy()
u = up.copy()
return u
c = 3.0 #given by myself
L = 10
n = 100
dt = 0
tstart = 0
tstop = 6
x = linspace(0,L,n+1)
t_values = linspace(tstart,tstop,31)
mpl.ion()
y = solver0(I, f, c, L, n, dt, tstop)
lines = mpl.plot(x,y)
mpl.axis([x[0], x[-1], -1.0, 1.0])
mpl.xlabel('x')
mpl.ylabel('y')
counter = 0
for t in t_values:
y = solver0(I,f,c,L,n,dt,tstop)
lines[0].set_ydata(y)
mpl.draw()
mpl.legend(['t=%4.1f' % t])
mpl.savefig('sea_%04d.png' %counter)
counter += 1
Maybe that's what you need?
y = solver0(I,f,c,L,n,dt,t)

Categories

Resources