I have been trying to code up the neural network for recognizing MNIST that was given by Michael Nielsen here http://neuralnetworksanddeeplearning.com/chap1.html
The original was written using Python 2.7 I believe, I'm using v3. The network does go through the test examples and updates the weights and biases, but it doesn't learn, and gets around 10% of the test examples right (so as good as random guessing).
I have also tried simply copying the code from the site and running it in Python 2.7, and it works as it should (getting up to 95% accuracy). The only significant difference in the nets are the data set (I'm using the one downloaded directly from MINST two days ago) and the two locations where i switched np.dot into np.outer, just to make it easier to keep track of the array shapes (I tried sticking to (N,) instead of (N,1)). But that part seems to be fine, since the layer sizes are different and the multiplications are going through. I am also using the same learning rate and layer sizes as given in the example.
I cannot see what could be throwing the net off. If anyone has tried doing the same, or has some insight into this I would greatly appreciate it.
Thanks !
The code :
import matplotlib.pyplot as plt
import numpy as np
import idx2numpy
import random
### LOAD DATASET ###
train = idx2numpy.convert_from_file("mnist/train-images.idx3-ubyte")
train_labels = idx2numpy.convert_from_file("mnist/train-labels.idx1-ubyte")
test = idx2numpy.convert_from_file("mnist/t10k-images.idx3-ubyte")
test_labels = idx2numpy.convert_from_file("mnist/t10k-labels.idx1-ubyte")
def vectorize(x):
e = np.zeros(10)
e[x] = 1.0
return e
training_images = [np.reshape(i, (784))/255 for i in train]
training_labels = [vectorize(i) for i in train_labels]
training_set = list(zip(training_images,training_labels))
test_images = [np.reshape(i, (784))/255 for i in test]
test_set = list(zip(training_images,test_labels))
### NETWORK CLASS ###
class myNet():
def __init__ (self , sizes ):
self.sizes = sizes
self.N = len(sizes)
self.w = [np.random.randn(y, x) for x, y in zip( sizes [: -1] , sizes [1:]) ]
self.b = [np.random.randn(i) for i in sizes[1:]]
def sigmoid (self,z):
return 1.0/(1.0+ np.exp(-z))
def sigmoid_prime (self,z):
return self.sigmoid (z)*(1 - self.sigmoid (z))
def cost_derivative (self,output_activations , y):
return ( output_activations - y)
def feedforward (self , a):
for bb, ww in zip(self.b , self.w ):
a = self.sigmoid (np.dot(ww, a)+bb)
return a
def backprop (self , x, y):
nabla_b = [np. zeros (bb. shape ) for bb in self.b ]
nabla_w = [np. zeros (ww. shape ) for ww in self.w ]
activation = x
activations = [x] # list to store all the activations , layer by layer
zs = [] # list to store all the z vectors , layer by layer
for bb, ww in zip(self.b , self. w ):
z = np.dot(ww, activation )+bb
zs. append (z)
activation = self.sigmoid (z)
activations . append ( activation )
# backward pass
delta = self. cost_derivative ( activations [-1], y) * self.sigmoid_prime (zs [ -1])
nabla_b [-1] = delta
nabla_w [-1] = np.outer(delta , activations [ -2])
# Note that the variable l in the loop below is used a little
for l in range (2, self.N ):
z = zs[-l]
sp = self.sigmoid_prime (z)
delta = np.dot(self.w [-l+1]. transpose () , delta ) * sp
nabla_b [-l] = delta
nabla_w [-l] = np.outer(delta , activations [-l -1])
return (nabla_b , nabla_w )
def update(self,mini_batch,eta):
nabla_b = [np.zeros (bb.shape ) for bb in self.b ]
nabla_w = [np.zeros (ww.shape ) for ww in self.w ]
for x, y in mini_batch :
delta_nabla_b , delta_nabla_w = self. backprop (x, y)
nabla_b = [nb+dnb for nb , dnb in zip(nabla_b , delta_nabla_b )]
nabla_w = [nw+dnw for nw , dnw in zip(nabla_w , delta_nabla_w )]
self.w = [ww -( eta/len( mini_batch ))*nw
for ww, nw in zip(self.w , nabla_w )]
self.b = [bb -( eta/len( mini_batch ))*nb
for bb, nb in zip(self.b, nabla_b )]
return
def gradient_descent(self,training_data,epochs,mini_batch_size,eta,test_data):
i = 0
n = len( training_data )
for j in range (epochs):
random.shuffle (training_data)
mini_batches = [
training_data [k:k+ mini_batch_size ]
for k in range (0, n, mini_batch_size )]
for mini_batch in mini_batches :
self.update( mini_batch , eta)
print("Epoch {0}: {1}". format (
j, self.evaluate(test_data)))
return
def evaluate (self, test_data):
test_results = [( np.argmax (self.feedforward (x)), y)
for (x, y) in test_data ]
return sum(int(x == y) for (x, y) in test_results )
sizes =[28*28, 30, 10]
net = myNet(sizes)
net.gradient_descent(training_set,30,10,3.0,test_set)
I found the mistake... By mistake I zipped up the training images with the test labels to form the test set, which is clearly not what it should be. Now that I formed the test set properly, everything works and gets up to around 95% accuracy. Here is the full corrected code for completeness (usable in Python3)
import matplotlib.pyplot as plt
import numpy as np
import idx2numpy
import random
def vectorize(x):
e = np.zeros(10)
e[x] = 1.0
return e
### LOAD DATASET ###
train_images = idx2numpy.convert_from_file("mnist/train-images.idx3-ubyte")/255
train_labels = idx2numpy.convert_from_file("mnist/train-labels.idx1-ubyte")
train_images = [np.reshape(x,(784)).astype('float32') for x in train_images]
train_labels = [vectorize(i) for i in train_labels]
test_images = idx2numpy.convert_from_file("mnist/t10k-images.idx3-ubyte")/255
test_labels = idx2numpy.convert_from_file("mnist/t10k-labels.idx1-ubyte")
test_images = [np.reshape(x,(784)).astype('float32') for x in test_images]
training_set = list(zip(train_images,train_labels))
test_set = list(zip(test_images,test_labels)) ## THIS IS WHERE I MESSED UP
### NETWORK CLASS ###
class myNet():
def __init__ (self , sizes ):
self.sizes = sizes
self.N = len(sizes)
self.w = [np.random.randn(y, x) for x, y in zip( sizes [: -1] , sizes [1:]) ]
self.b = [np.random.randn(i) for i in sizes[1:]]
def sigmoid (self,z):
return 1.0/(1.0+ np.exp(-z))
def sigmoid_prime (self,z):
return self.sigmoid (z)*(1 - self.sigmoid (z))
def cost_derivative (self,output_activations , y):
return ( output_activations - y)
def feedforward (self , a):
for bb, ww in zip(self.b , self.w ):
a = self.sigmoid (np.dot(ww, a)+bb)
return a
def backprop (self , x, y):
nabla_b = [np. zeros (bb. shape ) for bb in self.b ]
nabla_w = [np. zeros (ww. shape ) for ww in self.w ]
activation = x
activations = [x] # list to store all the activations , layer by layer
zs = [] # list to store all the z vectors , layer by layer
for bb, ww in zip(self.b , self. w ):
z = np.dot(ww, activation )+bb
zs. append (z)
activation = self.sigmoid (z)
activations . append ( activation )
# backward pass
delta = self. cost_derivative ( activations [-1], y) * self.sigmoid_prime (zs [ -1])
nabla_b [-1] = delta
nabla_w [-1] = np.outer(delta , activations [ -2])
# Note that the variable l in the loop below is used a little
for l in range (2, self.N ):
z = zs[-l]
sp = self.sigmoid_prime (z)
delta = np.dot(self.w [-l+1]. transpose () , delta ) * sp
nabla_b [-l] = delta
nabla_w [-l] = np.outer(delta , activations [-l -1])
return (nabla_b , nabla_w )
def update(self,mini_batch,eta):
nabla_b = [np.zeros (bb.shape ) for bb in self.b ]
nabla_w = [np.zeros (ww.shape ) for ww in self.w ]
for x, y in mini_batch :
delta_nabla_b , delta_nabla_w = self. backprop (x, y)
nabla_b = [nb+dnb for nb , dnb in zip(nabla_b , delta_nabla_b )]
nabla_w = [nw+dnw for nw , dnw in zip(nabla_w , delta_nabla_w )]
self.w = [ww -( eta/len( mini_batch ))*nw
for ww, nw in zip(self.w , nabla_w )]
self.b = [bb -( eta/len( mini_batch ))*nb
for bb, nb in zip(self.b, nabla_b )]
return
def gradient_descent(self,training_data,epochs,mini_batch_size,eta,test_data):
i = 0
n = len( training_data )
for j in range (epochs):
random.shuffle (training_data)
mini_batches = [
training_data [k:k+ mini_batch_size ]
for k in range (0, n, mini_batch_size )]
for mini_batch in mini_batches :
self.update( mini_batch , eta)
print("Epoch {0}: {1}". format (
j, self.evaluate(test_data)))
return
def evaluate (self, test_data):
test_results = [( np.argmax (self.feedforward (x)), y)
for (x, y) in test_data ]
return sum(int(x == y) for (x, y) in test_results )
sizes =[28*28, 30, 10]
net = myNet(sizes)
net.gradient_descent(training_set,30,10,3.0,test_set)
Related
I'm new to Machine Learning. My Perceptron code works but just for the first training example. Also, the b_final is n-dimensional array instead of being a scalar. Can you help me with these issues? The X.shape = (150,4) and y.shape = (100,).
The dataset is "Iris dataset".
m, n = X.shape
def weighted_sum(X,w,b):
for i in range(m):
f_Xwb = np.dot(X[i],w) + b
return f_Xwb
def Prediction(X,initial_w,initial_b):
return np.where(weighted_sum(X,initial_w,initial_b) > 0 , 1 , -1)
def weights(X , y , w , b , epochs , eta):
for i in range(epochs):
for xi,yi in zip(X,y):
w = w - eta * ((Prediction(xi,initial_w,initial_b)-yi) * xi)
b = b - eta * (Prediction(xi,initial_w,initial_b)-yi)
return w , b
initial_w = np.ones(n)
initial_b = 1
w_final , b_final = weights(X,y,initial_w,initial_b,50,0.3)
print(w_final,b_final)
y_prediction = Prediction(X,w_final,b_final)
y_prediction
Also, the y_prediction is n-dimensional array instead of being a scalar.
I am currently going through the first chapter of Michael Nielsen's neural network book.
I am running into an anomaly where the exact same code is producing wildly different results, with the only difference between the code being where it is copied from. One set of code is the one which I wrote as I went through the chapter, and the other set is from copying Nielsen's source code from github. Naturally, his works and mine doesn't. Nonetheless, if you plug both blocks of code into a text comparing tool, you can see that they're the exact same, save for a few comments.
The code is building a basic neural network that seeks to identify handwritten digits. The code runs epochs in the neural network, and then outputs how many numbers are correctly identified in each epoch.
Given the current parameters, each epoch should produce roughly 8000 to 9500 out of 10,000 correct results, and this result is printed to the terminal. This is what you will consistently observe when Nielsen's code is ran.
Conversely, when the same code is ran, but copied from my file, the results are consistently ~50-450 out of 10,000.
Perhaps this is something to do with VS code? I'm really at a loss for why the same code to the letter is producing different results. I was hoping someone smarter than me would know why it matters where you ctrl-c ctrl-v from. Thanks!
Here is the code I wrote:
#mine
import random
import numpy as np
class Network(object):
def __init__(self, sizes):
self.num_layers = len(sizes)
self.sizes = sizes
self.biases = [np.random.randn(y, 1) for y in sizes[1:]]
self.weights = [np.random.randn(y, x)
for x, y in zip(sizes[:-1], sizes[1:])]
def feedforward(self, a):
for b, w in zip(self.biases, self.weights):
a = sigmoid(np.dot(w, a)+b)
return a
def SGD(self, training_data, epochs, mini_batch_size, eta,
test_data=None):
if test_data: n_test = len(test_data)
n = len(training_data)
for j in range(epochs):
random.shuffle(training_data)
mini_batches = [
training_data[k:k+mini_batch_size]
for k in range(0, n, mini_batch_size)]
for mini_batch in mini_batches:
self.update_mini_batch(mini_batch, eta)
if test_data:
print("Epoch {0}: {1} / {2}".format(
j, self.evaluate(test_data), n_test))
else:
print("Epoch {0} complete".format(j))
def update_mini_batch(self, mini_batch, eta):
nabla_b = [np.zeros(b.shape) for b in self.biases]
nabla_w = [np.zeros(w.shape) for w in self.weights]
for x, y in mini_batch:
delta_nabla_b, delta_nabla_w = self.backprop(x, y)
nabla_b = [nb+dnb for nb, dnb in zip(nabla_b, delta_nabla_b)]
nabla_w = [nw+dnw for nw, dnw in zip(nabla_w, delta_nabla_w)]
self.weights = [w-(eta/len(mini_batch))*nw
for w, nw in zip(self.weights, nabla_w)]
self.biases = [b-(eta/len(mini_batch))*nb
for b, nb in zip(self.biases, nabla_b)]
def backprop(self, x, y):
nabla_b = [np.zeros(b.shape) for b in self.biases]
nabla_w = [np.zeros(w.shape) for w in self.weights]
activation = x
activations = [x]
zs = []
for b, w in zip(self.biases, self.weights):
z = np.dot(w, activation)+b
zs.append(z)
activation = sigmoid(z)
activations.append(activation)
delta = self.cost_derivative(activations[-1], y) * \
sigmoid_prime(zs[-1])
nabla_b[-1] = delta
nabla_w[-1] = np.dot(delta, activations[-2].transpose())
for l in range(2, self.num_layers):
z = zs[-l]
sp = sigmoid_prime(z)
delta = np.dot(self.weights[-l+1].transpose(), delta) * sp
nabla_b[-l] = delta
nabla_w[-l] = np.dot(delta, activations[-l-1].transpose())
return (nabla_b, nabla_w)
def evaluate(self, test_data):
test_results = [(np.argmax(self.feedforward(x)), y)
for (x, y) in test_data]
return sum(int(x == y) for (x, y) in test_results)
def cost_derivative(self, output_activations, y):
return (output_activations-y)
### Non Network functions
def sigmoid(z):
return 1.0/(1.0+np.exp(-z))
def sigmoid_prime(z):
return sigmoid(z)*(1-sigmoid(z))
And here is the code that he wrote:
#his
import random
import numpy as np
class Network(object):
def __init__(self, sizes):
self.num_layers = len(sizes)
self.sizes = sizes
self.biases = [np.random.randn(y, 1) for y in sizes[1:]]
self.weights = [np.random.randn(y, x)
for x, y in zip(sizes[:-1], sizes[1:])]
def feedforward(self, a):
for b, w in zip(self.biases, self.weights):
a = sigmoid(np.dot(w, a)+b)
return a
def SGD(self, training_data, epochs, mini_batch_size, eta,
test_data=None):
if test_data: n_test = len(test_data)
n = len(training_data)
for j in range(epochs):
random.shuffle(training_data)
mini_batches = [
training_data[k:k+mini_batch_size]
for k in range(0, n, mini_batch_size)]
for mini_batch in mini_batches:
self.update_mini_batch(mini_batch, eta)
if test_data:
print("Epoch {0}: {1} / {2}".format(
j, self.evaluate(test_data), n_test))
else:
print("Epoch {0} complete".format(j))
def update_mini_batch(self, mini_batch, eta):
nabla_b = [np.zeros(b.shape) for b in self.biases]
nabla_w = [np.zeros(w.shape) for w in self.weights]
for x, y in mini_batch:
delta_nabla_b, delta_nabla_w = self.backprop(x, y)
nabla_b = [nb+dnb for nb, dnb in zip(nabla_b, delta_nabla_b)]
nabla_w = [nw+dnw for nw, dnw in zip(nabla_w, delta_nabla_w)]
self.weights = [w-(eta/len(mini_batch))*nw
for w, nw in zip(self.weights, nabla_w)]
self.biases = [b-(eta/len(mini_batch))*nb
for b, nb in zip(self.biases, nabla_b)]
def backprop(self, x, y):
nabla_b = [np.zeros(b.shape) for b in self.biases]
nabla_w = [np.zeros(w.shape) for w in self.weights]
activation = x
activations = [x]
zs = []
for b, w in zip(self.biases, self.weights):
z = np.dot(w, activation)+b
zs.append(z)
activation = sigmoid(z)
activations.append(activation)
delta = self.cost_derivative(activations[-1], y) * \
sigmoid_prime(zs[-1])
nabla_b[-1] = delta
nabla_w[-1] = np.dot(delta, activations[-2].transpose())
for l in range(2, self.num_layers):
z = zs[-l]
sp = sigmoid_prime(z)
delta = np.dot(self.weights[-l+1].transpose(), delta) * sp
nabla_b[-l] = delta
nabla_w[-l] = np.dot(delta, activations[-l-1].transpose())
return (nabla_b, nabla_w)
def evaluate(self, test_data):
test_results = [(np.argmax(self.feedforward(x)), y)
for (x, y) in test_data]
return sum(int(x == y) for (x, y) in test_results)
def cost_derivative(self, output_activations, y):
return (output_activations-y)
#### Miscellaneous functions
def sigmoid(z):
return 1.0/(1.0+np.exp(-z))
def sigmoid_prime(z):
return sigmoid(z)*(1-sigmoid(z))
Below is the helper file and the main file. The number image data used can be downloaded here.
helper:
"""
mnist_loader
~~~~~~~~~~~~~~~~
A library used to load mnist data.
"""
#### Libraries
# Standard library
import pickle
import gzip
# Third party libraries
import numpy as np
def load_data():
"""Returns the MNIST data as a tuple containing the training data,
the validation data, and the test data.
The ``training_data`` is returned as a tuple with two entries.
The first entry contains the actual training images. This is a
numpy ndarray with 50,000 entries. Each entry is, in turn, a
numpy ndarray with 784 values, representing the 28 * 28 = 784
pixels in a single MNIST image.
The second entry in the ``training_data`` tuple is a numpy ndarray
containing 50,000 entries. Those entries are just the digit
values (0...9) for the corresponding images contained in the first
entry of the tuple.
The ``validation_data`` and ``test_data`` are similar, except
each contains only 10,000 images.
This is a nice data format, but for use in neural networks it's
helpful to modify the format of the ``training_data`` a little.
That's done in the wrapper function ``load_data_wrapper()``, see
below.
"""
f = gzip.open('./data/mnist.pkl.gz', 'rb')
u = pickle._Unpickler( f )
u.encoding = 'latin1'
training_data, validation_data, test_data = u.load()
# training_data, validation_data, test_data = pickle.load(f)
f.close()
return(training_data, validation_data, test_data)
def load_data_wrapper():
"""Return a tuple containing ``(training_data, validation_data,
test_data)``. Based on ``load_data``, but the format is more
convenient for use in our implementation of neural networks.
In particular, ``training_data`` is a list containing 50,000
2-tuples ``(x, y)``. ``x`` is a 784-dimensional numpy.ndarray
containing the input image. ``y`` is a 10-dimensional
numpy.ndarray representing the unit vector corresponding to the
correct digit for ``x``.
``validation_data`` and ``test_data`` are lists containing 10,000
2-tuples ``(x, y)``. In each case, ``x`` is a 784-dimensional
numpy.ndarry containing the input image, and ``y`` is the
corresponding classification, i.e., the digit values (integers)
corresponding to ``x``.
Obviously, this means we're using slightly different formats for
the training data and the validation / test data. These formats
turn out to be the most convenient for use in our neural network
code."""
tr_d, va_d, te_d = load_data()
training_inputs = [np.reshape(x, (784, 1)) for x in tr_d[0]]
training_results = [vectorized_result(y) for y in tr_d[1]]
training_data = list(zip(training_inputs, training_results))
validation_inputs = [np.reshape(x, (784, 1)) for x in va_d[0]]
validation_data = list(zip(validation_inputs, va_d[1]))
test_inputs = [np.reshape(x, (784, 1)) for x in te_d[0]]
test_data = list(zip(test_inputs, te_d[1]))
return (training_data, validation_data, test_data)
def vectorized_result(j):
"""Return a 10-dimensional unit vector with a 1.0 in the jth position
and zeroes elsewhere. This converts a a digit (0...9) into a
corresponding desired output from the neural network.
"""
e = np.zeros((10, 1))
e[j] = 1.0
return e
main:
import mnist_loader
training_data, validation_data, test_data = mnist_loader.load_data_wrapper()
import test #change this to test2 to check nielsens code
net = test.Network([784, 30, 10]) # change here too
net.SGD(training_data, 30, 10, 3.0, test_data=test_data)
So they weren't "eXacTLy tHe SaME". The difference was in the for loop of the feedforward function. I was returning at every iteration, which was incorrect.
I am trying to create a denoising autoencoder for 1d cyclic signals like cos(x) etc.
The process of creating the dataset is that I pass a list of cyclic functions and for each example generated it rolls random coefficients for each function in the list so every function generated is different yet cyclic. eg - 0.856cos(x) - 1.3cos(0.1x)
Then I add noise and normalize the signal to be between [0, 1).
Next, I train my autoencoder on it but it learns to output a constant (usually 0.5). my guess is that it happens because 0.5 is the usual mean value of the normalized functions. But this is not the result im aspiring to get at all.
I am providing the code I wrote for the autoencoder, the data generator and the training loop as well as two pictures depicting the problem im having.
first example:
second example:
Linear autoencoder:
class LinAutoencoder(nn.Module):
def __init__(self, in_channels, K, B, z_dim, out_channels):
super(LinAutoencoder, self).__init__()
self.in_channels = in_channels
self.K = K # number of samples per 2pi interval
self.B = B # how many intervals
self.out_channels = out_channels
encoder_layers = []
decoder_layers = []
encoder_layers += [
nn.Linear(in_channels * K * B, 2*z_dim, bias=True),
nn.ReLU(),
nn.Linear(2*z_dim, z_dim, bias=True),
nn.ReLU(),
nn.Linear(z_dim, z_dim, bias=True),
nn.ReLU()
]
decoder_layers += [
nn.Linear(z_dim, z_dim, bias=True),
nn.ReLU(),
nn.Linear(z_dim, 2*z_dim, bias=True),
nn.ReLU(),
nn.Linear(2*z_dim, out_channels * K * B, bias=True),
nn.Tanh()
]
self.encoder = nn.Sequential(*encoder_layers)
self.decoder = nn.Sequential(*decoder_layers)
def forward(self, x):
batch_size = x.shape[0]
x_flat = torch.flatten(x, start_dim=1)
enc = self.encoder(x_flat)
dec = self.decoder(enc)
res = dec.view((batch_size, self.out_channels, self.K * self.B))
return res
The data generator:
def lincomb_generate_data(batch_size, intervals, sample_length, functions, noise_type="gaussian", **kwargs)->torch.tensor:
channels = 1
mul_term = 2 * np.pi / sample_length
positions = np.arange(0, sample_length * intervals)
x_axis = positions * mul_term
X = np.tile(x_axis, (channels, 1))
y = X
Y = np.repeat(y[np.newaxis, :], batch_size, axis=0)
if noise_type == "gaussian":
# defaults to 0, 0.4
noise_mean = kwargs.get("noise_mean", 0)
noise_std = kwargs.get("noise_std", 0.4)
noise = np.random.normal(noise_mean, noise_std, Y.shape)
if noise_type == "uniform":
# defaults to 0, 1
noise_low = kwargs.get("noise_low", 0)
noise_high = kwargs.get("noise_high", 1)
noise = np.random.uniform(noise_low, noise_high, Y.shape)
coef_lo = -2
coef_hi = 2
coef_mat = np.random.uniform(coef_lo, coef_hi, (batch_size, len(functions))) # creating a matrix of coefficients
coef_mat = np.where(np.abs(coef_mat) < 10**-1, 0, coef_mat)
for i in range(batch_size):
curr_res = np.zeros((channels, sample_length * intervals))
for func_id, function in enumerate(functions):
curr_func = functions[func_id]
curr_coef = coef_mat[i][func_id]
curr_res += curr_coef * curr_func(Y[i, :, :])
Y[i, :, :] = curr_res
clean = Y
noisy = clean + noise
# Normalizing
clean -= clean.min(axis=2, keepdims=2)
clean /= clean.max(axis=2, keepdims=2) + 1e-5 #avoiding zero division
noisy -= noisy.min(axis=2, keepdims=2)
noisy /= noisy.max(axis=2, keepdims=2) + 1e-5 #avoiding zero division
clean = torch.from_numpy(clean)
noisy = torch.from_numpy(noisy)
return x_axis, clean, noisy
Training loop:
functions = [lambda x: np.cos(0.1*x),
lambda x: np.cos(x),
lambda x: np.cos(3*x)]
num_epochs = 200
lin_loss_list = []
criterion = torch.nn.MSELoss()
lin_optimizer = torch.optim.SGD(lin_model.parameters(), lr=0.01, momentum=0.9)
_, val_clean, val_noisy = util.lincomb_generate_data(batch_size, B, K, functions, noise_type="gaussian")
print("STARTED TRAINING")
for epoch in range(num_epochs):
# generate data returns the x-axis used for plotting as well as the clean and noisy data
_, t_clean, t_noisy = util.lincomb_generate_data(batch_size, B, K, functions, noise_type="gaussian")
# ===================forward=====================
lin_output = lin_model(t_noisy.float())
lin_loss = criterion(lin_output.float(), t_clean.float())
lin_loss_list.append(lin_loss.data)
# ===================backward====================
lin_optimizer.zero_grad()
lin_loss.backward()
lin_optimizer.step()
val_lin_loss = F.mse_loss(lin_model(val_noisy.float()), val_clean.float())
print("DONE TRAINING")
edit: shared the parameters requested
L = 1
K = 512
B = 2
batch_size = 64
z_dim = 64
noise_mean = 0
noise_std = 0.4
The problem was I didnt use nn.BatchNorm1d in my model so i guess something wrong happened during training (probably vanishing gradients).
I try to reproduce results generated by the LSTMCell from TensorFlow to be sure that I know what it does.
Here is my TensorFlow code:
num_units = 3
lstm = tf.nn.rnn_cell.LSTMCell(num_units = num_units)
timesteps = 7
num_input = 4
X = tf.placeholder("float", [None, timesteps, num_input])
x = tf.unstack(X, timesteps, 1)
outputs, states = tf.contrib.rnn.static_rnn(lstm, x, dtype=tf.float32)
sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)
x_val = np.random.normal(size = (1, 7, num_input))
res = sess.run(outputs, feed_dict = {X:x_val})
for e in res:
print e
Here is its output:
[[-0.13285545 -0.13569424 -0.23993783]]
[[-0.04818152 0.05927373 0.2558436 ]]
[[-0.13818116 -0.13837864 -0.15348436]]
[[-0.232219 0.08512601 0.05254192]]
[[-0.20371495 -0.14795329 -0.2261929 ]]
[[-0.10371902 -0.0263292 -0.0914975 ]]
[[0.00286371 0.16377522 0.059478 ]]
And here is my own implementation:
n_steps, _ = X.shape
h = np.zeros(shape = self.hid_dim)
c = np.zeros(shape = self.hid_dim)
for i in range(n_steps):
x = X[i,:]
vec = np.concatenate([x, h])
#vec = np.concatenate([h, x])
gs = np.dot(vec, self.kernel) + self.bias
g1 = gs[0*self.hid_dim : 1*self.hid_dim]
g2 = gs[1*self.hid_dim : 2*self.hid_dim]
g3 = gs[2*self.hid_dim : 3*self.hid_dim]
g4 = gs[3*self.hid_dim : 4*self.hid_dim]
I = vsigmoid(g1)
N = np.tanh(g2)
F = vsigmoid(g3)
O = vsigmoid(g4)
c = c*F + I*N
h = O * np.tanh(c)
print h
And here is its output:
[-0.13285543 -0.13569425 -0.23993781]
[-0.01461723 0.08060743 0.30876374]
[-0.13142865 -0.14921292 -0.16898363]
[-0.09892188 0.11739943 0.08772941]
[-0.15569218 -0.15165766 -0.21918869]
[-0.0480604 -0.00918626 -0.06084118]
[0.0963612 0.1876516 0.11888081]
As you might notice I was able to reproduce the first hidden vector, but the second one and all the following ones are different. What am I missing?
i examined this link and your code is almost perfect but you forgot to add forget_bias value(default 1.0) in this line F = vsigmoid(g3) its actualy F = vsigmoid(g3+self.forget_bias) or in your case its 1 F = vsigmoid(g3+1)
here is my imp with numpy:
import numpy as np
import tensorflow as tf
num_units = 3
lstm = tf.nn.rnn_cell.LSTMCell(num_units = num_units)
batch=1
timesteps = 7
num_input = 4
X = tf.placeholder("float", [batch, timesteps, num_input])
x = tf.unstack(X, timesteps, 1)
outputs, states = tf.contrib.rnn.static_rnn(lstm, x, dtype=tf.float32)
sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)
x_val = np.reshape(range(28),[batch, timesteps, num_input])
res = sess.run(outputs, feed_dict = {X:x_val})
for e in res:
print(e)
print("\nmy imp\n")
#my impl
def sigmoid(x):
return 1/(1+np.exp(-x))
kernel,bias=sess.run([lstm._kernel,lstm._bias])
f_b_=lstm._forget_bias
c,h=np.zeros([batch,num_input-1]),np.zeros([batch,num_input-1])
for step in range(timesteps):
inpt=np.split(x_val,7,1)[step][0]
lstm_mtrx=np.matmul(np.concatenate([inpt,h],1),kernel)+bias
i,j,f,o=np.split(lstm_mtrx,4,1)
c=sigmoid(f+f_b_)*c+sigmoid(i)*np.tanh(j)
h=sigmoid(o)*np.tanh(c)
print(h)
output:
[[ 0.06964055 -0.06541953 -0.00682676]]
[[ 0.005264 -0.03234607 0.00014838]]
[[ 1.617855e-04 -1.316892e-02 8.596722e-06]]
[[ 3.9425286e-06 -5.1347450e-03 7.5078127e-08]]
[[ 8.7508155e-08 -1.9560163e-03 6.3853928e-10]]
[[ 1.8867894e-09 -7.3784427e-04 5.8551406e-12]]
[[ 4.0385355e-11 -2.7728223e-04 5.3957669e-14]]
my imp
[[ 0.06964057 -0.06541953 -0.00682676]]
[[ 0.005264 -0.03234607 0.00014838]]
[[ 1.61785520e-04 -1.31689185e-02 8.59672610e-06]]
[[ 3.94252745e-06 -5.13474567e-03 7.50781122e-08]]
[[ 8.75080644e-08 -1.95601574e-03 6.38539112e-10]]
[[ 1.88678843e-09 -7.37844070e-04 5.85513438e-12]]
[[ 4.03853841e-11 -2.77282006e-04 5.39576024e-14]]
Tensorflow uses glorot_uniform() function to initialize the lstm kernel, which samples weights from a random uniform distribution. We need to fix a value for the kernel to get reproducible results:
import tensorflow as tf
import numpy as np
np.random.seed(0)
timesteps = 7
num_input = 4
x_val = np.random.normal(size = (1, timesteps, num_input))
num_units = 3
def glorot_uniform(shape):
limit = np.sqrt(6.0 / (shape[0] + shape[1]))
return np.random.uniform(low=-limit, high=limit, size=shape)
kernel_init = glorot_uniform((num_input + num_units, 4 * num_units))
My implementation of the LSTMCell (well, actually it's just slightly rewritten tensorflow's code):
def sigmoid(x):
return 1. / (1 + np.exp(-x))
class LSTMCell():
"""Long short-term memory unit (LSTM) recurrent network cell.
"""
def __init__(self, num_units, initializer=glorot_uniform,
forget_bias=1.0, activation=np.tanh):
"""Initialize the parameters for an LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell.
initializer: The initializer to use for the kernel matrix. Default: glorot_uniform
forget_bias: Biases of the forget gate are initialized by default to 1
in order to reduce the scale of forgetting at the beginning of
the training.
activation: Activation function of the inner states. Default: np.tanh.
"""
# Inputs must be 2-dimensional.
self._num_units = num_units
self._forget_bias = forget_bias
self._activation = activation
self._initializer = initializer
def build(self, inputs_shape):
input_depth = inputs_shape[-1]
h_depth = self._num_units
self._kernel = self._initializer(shape=(input_depth + h_depth, 4 * self._num_units))
self._bias = np.zeros(shape=(4 * self._num_units))
def call(self, inputs, state):
"""Run one step of LSTM.
Args:
inputs: input numpy array, must be 2-D, `[batch, input_size]`.
state: a tuple of numpy arrays, both `2-D`, with column sizes `c_state` and
`m_state`.
Returns:
A tuple containing:
- A `2-D, [batch, output_dim]`, numpy array representing the output of the
LSTM after reading `inputs` when previous state was `state`.
Here output_dim is equal to num_units.
- Numpy array(s) representing the new state of LSTM after reading `inputs` when
the previous state was `state`. Same type and shape(s) as `state`.
"""
num_proj = self._num_units
(c_prev, m_prev) = state
input_size = inputs.shape[-1]
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
lstm_matrix = np.hstack([inputs, m_prev]).dot(self._kernel)
lstm_matrix += self._bias
i, j, f, o = np.split(lstm_matrix, indices_or_sections=4, axis=0)
# Diagonal connections
c = (sigmoid(f + self._forget_bias) * c_prev + sigmoid(i) *
self._activation(j))
m = sigmoid(o) * self._activation(c)
new_state = (c, m)
return m, new_state
X = x_val.reshape(x_val.shape[1:])
cell = LSTMCell(num_units, initializer=lambda shape: kernel_init)
cell.build(X.shape)
state = (np.zeros(num_units), np.zeros(num_units))
for i in range(timesteps):
x = X[i,:]
output, state = cell.call(x, state)
print(output)
Produces output:
[-0.21386017 -0.08401277 -0.25431477]
[-0.22243588 -0.25817422 -0.1612211 ]
[-0.2282134 -0.14207162 -0.35017249]
[-0.23286737 -0.17129192 -0.2706512 ]
[-0.11768674 -0.20717363 -0.13339118]
[-0.0599215 -0.17756104 -0.2028935 ]
[ 0.11437953 -0.19484555 0.05371994]
While your Tensorflow code, if you replace the second line with
lstm = tf.nn.rnn_cell.LSTMCell(num_units = num_units, initializer = tf.constant_initializer(kernel_init))
returns:
[[-0.2138602 -0.08401276 -0.25431478]]
[[-0.22243595 -0.25817424 -0.16122109]]
[[-0.22821338 -0.1420716 -0.35017252]]
[[-0.23286738 -0.1712919 -0.27065122]]
[[-0.1176867 -0.2071736 -0.13339119]]
[[-0.05992149 -0.177561 -0.2028935 ]]
[[ 0.11437953 -0.19484554 0.05371996]]
Here is a blog which will answer any conceptual questions related to LSTM's. Seems that there is a lot which goes into building an LSTM from scratch!
Of course, this answer doesn't solve your question but just giving a direction.
Considering Linear Algebra, it's possible to exist a dimension mismatch in the matrix multiplication between I*N (red circle), affecting the output, given that n x m dot m x p will give you a n x p dimensional output.
I am following a tutorial on rnn's in TensorFlow but I have a question concerning the input formats.
They are taking raw_x (one hot vector) and basically first cutting that up in pieces of length 200 (batch_size) to form data_x. That is good.
Then they further cut up data_x in pieces of length 5 (num_step, or graph width) with:
for i in range(epoch_size):
x = data_x[:, i * num_steps:(i + 1) * num_steps]
y = data_y[:, i * num_steps:(i + 1) * num_steps]
yield (x, y)
However, if I look in the data, the slices of x do not match data_x. The first one does, but then they diverge.
Am I misunderstanding the above code? I would like to understand how x is being created or what it is supposed to look like.
I had expected the second item to be 0 1 0 1 0.
Also, I thought an epoch is when you go through the data completely, from this it seems that they split up the data in 1000 parts (epoch size)?
If it helps, this is my full code. I am trying to figure out what is going on in x. at line 48:
import numpy as np
import tensorflow as tf
# %matplotlib inline
import matplotlib.pyplot as plt
# Global config variables
num_steps = 5 # number of truncated backprop steps ('n' in the discussion above)
batch_size = 200
num_classes = 2
state_size = 4
learning_rate = 0.1
def gen_data(size=1000000):
print('generating data');
X = np.array(np.random.choice(2, size=(size,)))
Y = []
for i in range(size):
threshold = 0.5
if X[i-3] == 1:
threshold += 0.5
if X[i-8] == 1:
threshold -= 0.25
if np.random.rand() > threshold:
Y.append(0)
else:
Y.append(1)
return X, np.array(Y)
# adapted from https://github.com/tensorflow/tensorflow/blob/master/tensorflow/models/rnn/ptb/reader.py
def gen_batch(raw_data, batch_size, num_steps):
print('generating batches');
raw_x, raw_y = raw_data
data_length = len(raw_x)
# partition raw data into batches and stack them vertically in a data matrix
batch_partition_length = data_length // batch_size
data_x = np.zeros([batch_size, batch_partition_length], dtype=np.int32)
data_y = np.zeros([batch_size, batch_partition_length], dtype=np.int32)
for i in range(batch_size):
data_x[i] = raw_x[batch_partition_length * i:batch_partition_length * (i + 1)]
data_y[i] = raw_y[batch_partition_length * i:batch_partition_length * (i + 1)]
# further divide batch partitions into num_steps for truncated backprop
epoch_size = batch_partition_length // num_steps
for i in range(epoch_size):
x = data_x[:, i * num_steps:(i + 1) * num_steps]
y = data_y[:, i * num_steps:(i + 1) * num_steps]
yield (x, y)
def gen_epochs(n, num_steps):
for i in range(n):
yield gen_batch(gen_data(), batch_size, num_steps)
"""
Placeholders
"""
x = tf.placeholder(tf.int32, [batch_size, num_steps], name='input_placeholder')
y = tf.placeholder(tf.int32, [batch_size, num_steps], name='labels_placeholder')
init_state = tf.zeros([batch_size, state_size])
"""
RNN Inputs
"""
# Turn our x placeholder into a list of one-hot tensors:
# rnn_inputs is a list of num_steps tensors with shape [batch_size, num_classes]
x_one_hot = tf.one_hot(x, num_classes)
rnn_inputs = tf.unstack(x_one_hot, axis=1)
"""
Definition of rnn_cell
This is very similar to the __call__ method on Tensorflow's BasicRNNCell. See:
https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/ops/rnn_cell.py
"""
with tf.variable_scope('rnn_cell'):
W = tf.get_variable('W', [num_classes + state_size, state_size])
b = tf.get_variable('b', [state_size], initializer=tf.constant_initializer(0.0))
def rnn_cell(rnn_input, state):
with tf.variable_scope('rnn_cell', reuse=True):
W = tf.get_variable('W', [num_classes + state_size, state_size])
b = tf.get_variable('b', [state_size], initializer=tf.constant_initializer(0.0))
return tf.tanh(tf.matmul(tf.concat(axis=1, values=[rnn_input, state]), W) + b)
"""
Adding rnn_cells to graph
This is a simplified version of the "rnn" function from Tensorflow's api. See:
https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/ops/rnn.py
"""
state = init_state
rnn_outputs = []
for rnn_input in rnn_inputs:
state = rnn_cell(rnn_input, state)
rnn_outputs.append(state)
final_state = rnn_outputs[-1]
"""
Predictions, loss, training step
Losses and total_loss are simlar to the "sequence_loss_by_example" and "sequence_loss"
functions, respectively, from Tensorflow's api. See:
https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/ops/seq2seq.py
"""
#logits and predictions
with tf.variable_scope('softmax'):
W = tf.get_variable('W', [state_size, num_classes])
b = tf.get_variable('b', [num_classes], initializer=tf.constant_initializer(0.0))
logits = [tf.matmul(rnn_output, W) + b for rnn_output in rnn_outputs]
predictions = [tf.nn.softmax(logit) for logit in logits]
# Turn our y placeholder into a list labels
y_as_list = [tf.squeeze(i, axis=[1]) for i in tf.split(axis=1, num_or_size_splits=num_steps, value=y)]
#losses and train_step
losses = [tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logit,labels=label) for \
logit, label in zip(logits, y_as_list)]
total_loss = tf.reduce_mean(losses)
train_step = tf.train.AdagradOptimizer(learning_rate).minimize(total_loss)
"""
Function to train the network
"""
def train_network(num_epochs, num_steps, state_size=4, verbose=True):
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
training_losses = []
for idx, epoch in enumerate(gen_epochs(num_epochs, num_steps)):
training_loss = 0
training_state = np.zeros((batch_size, state_size))
if verbose:
print("\nEPOCH", idx)
for step, (X, Y) in enumerate(epoch):
tr_losses, training_loss_, training_state, _ = \
sess.run([losses,
total_loss,
final_state,
train_step],
feed_dict={x:X, y:Y, init_state:training_state})
training_loss += training_loss_
if step % 100 == 0 and step > 0:
if verbose:
print("Average loss at step", step,
"for last 250 steps:", training_loss/100)
training_losses.append(training_loss/100)
training_loss = 0
return training_losses
training_losses = train_network(1,num_steps)
plt.plot(training_losses)
Seems like the batches are actually transposed.
So the first elements of the x-matrix (200 x 5) will fit the first 5 elements of x_raw.
Then only in the next iteration, the next 5-10 elements of x_raw will be in the first elements (again) of x.