Say that, for some reason, I want to fit a linear regression using PyTorch, as illustrated below.
How could I compute the Hessian matrix of the model to, ultimately, compute the standard error for my parameter estimates?
import torch
import torch.nn as nn
# set seed
torch.manual_seed(42)
# define the model
class OLS_pytorch(nn.Module):
def __init__(self, X, Y):
super(OLS_pytorch, self).__init__()
self.X = X
self.Y = Y
self.beta = nn.Parameter(torch.ones(X.shape[1], 1, requires_grad=True))
self.intercept = nn.Parameter(torch.ones(1, requires_grad=True))
self.loss = nn.MSELoss()
def forward(self):
return self.X # self.beta + self.intercept
def fit(self, lr=0.01, epochs=1000):
optimizer = torch.optim.Adam(self.parameters(), lr=lr)
for epoch in range(epochs):
optimizer.zero_grad()
loss = self.loss(self.forward(), self.Y)
loss.backward()
optimizer.step()
if epoch % 10 == 0:
print(f"Epoch {epoch} loss: {loss.item()}")
return self
Generating some data and using the model
# Generate some data
X = torch.randn(100, 1)
Y = 2 * X + 3 + torch.randn(100, 1)
# fit the model
model = OLS_pytorch(X, Y)
model.fit()
#extract parameters
model.beta, model.intercept
#Epoch 980 loss: 0.7803605794906616
#Epoch 990 loss: 0.7803605794906616
#(Parameter containing:
# tensor([[2.0118]], requires_grad=True),
# Parameter containing:
# tensor([3.0357], requires_grad=True))
For instance, in R, using the same data and the lm() function, I recover the same parameters, but I am also able to recover the Hessian matrix, and them I am able to compute standard errors.
ols <- lm(Y ~ X, data = xy)
ols$coefficients
#(Intercept) X
# 3.035674 2.011811
vcov(ols)
# (Intercept) X
# (Intercept) 0.0079923921 -0.0004940884
# X -0.0004940884 0.0082671053
summary(ols)
# Coefficients:
# Estimate Std. Error t value Pr(>|t|)
# (Intercept) 3.03567 0.08940 33.96 <2e-16 ***
# X 2.01181 0.09092 22.13 <2e-16 ***
# ---
# Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
UPDATE: Using the answer from #cherrywoods
here is how you would match the standard errors produced by lm() in R
# predict
y_pred = model.X # model.beta + model.intercept
sigma_hat = torch.sum((y_pred - model.Y)**2)/ (N-2) #2 is the number of estimated parameters.
from torch.autograd.functional import hessian
def loss(beta, intercept):
y_pred = model.X # beta + intercept
return model.loss(y_pred, model.Y)
H = torch.Tensor(hessian(loss, (model.beta, model.intercept)))
vcov = torch.sqrt(torch.diag(sigma_hat*torch.inverse(H/2)) )
print(vcov)
#tensor([0.9092, 0.8940], grad_fn=<SqrtBackward0>)
You can compute the Hessian using torch.autograd.functional.hessian.
from torch.autograd.functional import hessian
def loss(beta, intercept):
y_pred = model.X # beta + intercept
return model.loss(y_pred, model.Y)
H = hessian(loss, (model.beta, model.intercept))
Related
I created a model class which is a subclass of keras.Model. While training the model, I want to change the weights of the loss functions after some epochs. In order to do that I created boolean variables to my model indicating that the model should start training with additional loss function. I add a pseudo code that mainly shows what I am trying to achieve.
class MyModel(keras.Model):
self.start_loss_2 = False
def train_step(self):
# Check if training with loss_2 started
weight_loss_2 = 0.0
if self.start_loss_2:
weight_loss_2 = 0.5
# Pass the data through model
# Calculate two loss values
total_loss = loss_1 + weight_loss_2 * loss_2
# Calculate gradients with tf.Tape
# Update variables
# This is called via Callback after each epoch
def epoch_finised(epoch_num):
if epoch_num > START_LOSS_2:
self.start_loss_2 = True
My questions is:
Is it valid to use if-else statement whose value changes after some time? If it is not, how can achieve this?
Yes. You can create a tf.Variable and then assign a new value to it based on some training criteria.
Example:
import numpy as np
import tensorflow as tf
# simple toy network
x_in = tf.keras.Input((10))
x = tf.keras.layers.Dense(25)(x_in)
x_out = tf.keras.layers.Dense(1)(x)
# model
m = tf.keras.Model(x_in, x_out)
# fake data
X = tf.random.normal((100, 10))
y0 = tf.random.normal((100, ))
y1 = tf.random.normal((100, ))
# optimizer
m_opt = tf.keras.optimizers.Adam(1e-2)
# prep data
ds = tf.data.Dataset.from_tensor_slices((X, y0, y1))
ds = ds.repeat().batch(5)
train_iter = iter(ds)
# toy loss function that uses a weight
def loss_fn(y_true0, y_true1, y_pred, weight):
mse = tf.keras.losses.MSE
mse_0 = tf.math.reduce_mean(mse(y_true0, y_pred))
mse_1 = tf.math.reduce_mean(mse(y_true1, y_pred))
return mse_0 + weight * mse_1
NUM_EPOCHS = 4
NUM_BATCHES_PER_EPOCH = 10
START_NEW_LOSS_AT_GLOBAL_STEP = 20
# the weight variable set to 0 initially and then
# will be changed after a certain number of steps
# (or some other training criteria)
w = tf.Variable(0.0, trainable=False)
for epoch in range(NUM_EPOCHS):
losses = []
for batch in range(NUM_BATCHES_PER_EPOCH):
X_train, y0_train, y1_train = next(train_iter)
with tf.GradientTape() as tape:
y_hat = m(X_train)
loss = loss_fn(y0_train, y1_train, y_hat, w)
losses.append(loss)
m_vars = m.trainable_variables
m_grads = tape.gradient(loss, m_vars)
m_opt.apply_gradients(zip(m_grads, m_vars))
print(f"epoch: {epoch}\tloss: {np.mean(losses):.4f}")
losses = []
# if the criteria is met assign a huge number to see if the
# loss spikes up
if (epoch + 1) * (batch + 1) >= START_NEW_LOSS_AT_GLOBAL_STEP:
w.assign(10000.0)
# epoch: 0 loss: 1.8226
# epoch: 1 loss: 1.1143
# epoch: 2 loss: 8788.2227 <= looks like assign worked
# epoch: 3 loss: 10999.5449
I'm doing a hands-on for learning and have created a model in python using numpy that's being trained on breast cancer dataSet from sklearn library. Model is running without any error and giving me Train and Test accuracy as 92.48826291079813% and 90.9090909090909% respectively. However somehow I'm not able to complete the hands-on since (probably) my result is different than expected. I don't know where the problem is because I don't know the right answer, also don't see any error.
Would request someone to help me with this. Code is given below.
#Import numpy as np and pandas as pd
"""
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_breast_cancer
**Define method initialiseNetwork() initilise weights with zeros of shape(num_features, 1) and also bias b to zero
parameters: num_features(number of input features)
returns : dictionary of weight vector and bias**
def initialiseNetwork(num_features):
W = np.zeros((num_features,1))
b = 0
parameters = {"W": W, "b": b}
return parameters
** define function sigmoid for the input z.
parameters: z
returns: $1/(1+e^{(-z)})$ **
def sigmoid(z):
a = 1/(1 + np.exp(-z))
return a
** Define method forwardPropagation() which implements forward propagtion defined as Z = (W.T dot_product X) + b, A = sigmoid(Z)
parameters: X, parameters
returns: A **
def forwardPropagation(X, parameters):
W = parameters["W"]
b = parameters["b"]
Z = np.dot(W.T,X) + b
A = sigmoid(Z)
return A
** Define function cost() which calculate the cost given by −(sum(Y\*log(A)+(1−Y)\*log(1−A)))/num_samples, here * is elementwise product
parameters: A,Y,num_samples(number of samples)
returns: cost **
def cost(A, Y, num_samples):
cost = -1/num_samples * np.sum(Y*np.log(A) + (1-Y)*(np.log(1-A)))
#cost = Y*np.log(A) + (1-Y)*(np.log(1-A))
return cost
** Define method backPropgation() to get the derivatives of weigths and bias
parameters: X,Y,A,num_samples
returns: dW,db **
def backPropagration(X, Y, A, num_samples):
dZ = A - Y
dW = (np.dot(X,dZ.T))/num_samples #(X dot_product dZ.T)/num_samples
db = np.sum(dZ)/num_samples #sum(dZ)/num_samples
return dW, db
** Define function updateParameters() to update current parameters with its derivatives
w = w - learning_rate \* dw
b = b - learning_rate \* db
parameters: parameters,dW,db, learning_rate
returns: dictionary of updated parameters **
def updateParameters(parameters, dW, db, learning_rate):
W = parameters["W"] - (learning_rate * dW)
b = parameters["b"] - (learning_rate * db)
return {"W": W, "b": b}
** Define the model for forward propagation
parameters: X,Y, num_iter(number of iterations), learning_rate
returns: parameters(dictionary of updated weights and bias) **
def model(X, Y, num_iter, learning_rate):
num_features = X.shape[0]
num_samples = X.shape[1]
parameters = initialiseNetwork(num_features) #call initialiseNetwork()
for i in range(num_iter):
#A = forwardPropagation(X, Y, parameters) # calculate final output A from forwardPropagation()
A = forwardPropagation(X, parameters)
if(i%100 == 0):
print("cost after {} iteration: {}".format(i, cost(A, Y, num_samples)))
dW, db = backPropagration(X, Y, A, num_samples) # calculate derivatives from backpropagation
parameters = updateParameters(parameters, dW, db, learning_rate) # update parameters
return parameters
** Run the below cell to define the function to predict the output.It takes updated parameters and input data as function parameters and returns the predicted output **
def predict(X, parameters):
W = parameters["W"]
b = parameters["b"]
b = b.reshape(b.shape[0],1)
Z = np.dot(W.T,X) + b
Y = np.array([1 if y > 0.5 else 0 for y in sigmoid(Z[0])]).reshape(1,len(Z[0]))
return Y
** The code in the below cell loads the breast cancer data set from sklearn.
The input variable(X_cancer) is about the dimensions of tumor cell and targrt variable(y_cancer) classifies tumor as malignant(0) or benign(1) **
(X_cancer, y_cancer) = load_breast_cancer(return_X_y = True)
** Split the data into train and test set using train_test_split(). Set the random state to 25. Refer the code snippet in topic 4 **
X_train, X_test, y_train, y_test = train_test_split(X_cancer, y_cancer,
random_state = 25)
** Since the dimensions of tumor is not uniform you need to normalize the data before feeding to the network
The below function is used to normalize the input data. **
def normalize(data):
col_max = np.max(data, axis = 0)
col_min = np.min(data, axis = 0)
return np.divide(data - col_min, col_max - col_min)
** Normalize X_train and X_test and assign it to X_train_n and X_test_n respectively **
X_train_n = normalize(X_train)
X_test_n = normalize(X_test)
** Transpose X_train_n and X_test_n so that rows represents features and column represents the samples
Reshape Y_train and y_test into row vector whose length is equal to number of samples.Use np.reshape() **
X_trainT = X_train_n.T
#print(X_trainT.shape)
X_testT = X_test_n.T
#print(X_testT.shape)
y_trainT = y_train.reshape(1,X_trainT.shape[1])
y_testT = y_test.reshape(1,X_testT.shape[1])
** Train the network using X_trainT,y_trainT with number of iterations 4000 and learning rate 0.75 **
parameters = model(X_trainT, y_trainT, 4000, 0.75) #call the model() function with parametrs mentioned in the above cell
** Predict the output of test and train data using X_trainT and X_testT using predict() method> Use the parametes returned from the trained model **
yPredTrain = predict(X_trainT, parameters) # pass weigths and bias from parameters dictionary and X_trainT as input to the function
yPredTest = predict(X_testT, parameters) # pass the same parameters but X_testT as input data
** Run the below cell print the accuracy of model on train and test data. ***
accuracy_train = 100 - np.mean(np.abs(yPredTrain - y_trainT)) * 100
accuracy_test = 100 - np.mean(np.abs(yPredTest - y_testT)) * 100
print("train accuracy: {} %".format(accuracy_train))
print("test accuracy: {} %".format(accuracy_test))
My Output:
train accuracy: 92.48826291079813 %
test accuracy: 90.9090909090909 %
I figured out where the problem was. It was the third line in predict function where I was reshaping bias which was not at all necessary.
def predict(X, parameters):
W = parameters["W"]
b = parameters["b"]
**b = b.reshape(b.shape[0],1)**
Z = np.dot(W.T,X) + b
Y = np.array([1 if y > 0.5 else 0 for y in sigmoid(Z[0])]).reshape(1,len(Z[0]))
return Y
and third line in back-propagation function needed to be corrected as np.sum(dZ)/num_samples.
def backPropagration(X, Y, A, num_samples):
dZ = A - Y
dW = (np.dot(X,dZ.T))/num_samples
** db = sum(dZ)/num_samples **
return dW, db
After I corrected both functions, the model gave me train accuracy as 98.59154929577464% and test accuracy as 93.00699300699301%.
I am trying to complete an implementation of a neural network class that uses pytorch.
But the upgrade step is causing the error to pop up related to None Type.
I am using Pytorch Pkg with Python 3.73 using Jupyter Notebook.
The problem is in the step where I have to ake weight update step, then zero the gradient values.
class NNet(torch.nn.Module):
def __init__(self, n_inputs, n_hiddens_per_layer, n_outputs, act_func='tanh'):
super().__init__() # call parent class (torch.nn.Module) constructor
# Set self.n_hiddens_per_layer to [] if argument is 0, [], or [0]
if n_hiddens_per_layer == 0 or n_hiddens_per_layer == [] or n_hiddens_per_layer == [0]:
self.n_hiddens_per_layer = []
else:
self.n_hiddens_per_layer = n_hiddens_per_layer
self.hidden_layers = torch.nn.ModuleList() # necessary for model.to('cuda')
for nh in self.n_hiddens_per_layer:
self.hidden_layers.append( torch.nn.Sequential(
torch.nn.Linear(n_inputs, nh),
torch.nn.Tanh() if act_func == 'tanh' else torch.nn.ReLU()))
n_inputs = nh
self.output_layer = torch.nn.Linear(n_inputs, n_outputs)
self.Xmeans = None
self.Xstds = None
self.Tmeans = None
self.Tstds = None
self.error_trace = []
def forward(self, X):
Y = X
for hidden_layer in self.hidden_layers:
Y = hidden_layer(Y)
Y = self.output_layer(Y)
return Y
def train(self, X, T, n_epochs, learning_rate, verbose=True):
# Set data matrices to torch.tensors if not already.
if not isinstance(X, torch.Tensor):
X = torch.from_numpy(X).float()
if not isinstance(T, torch.Tensor):
T = torch.from_numpy(T).float()
W = torch.zeros((2, 1), requires_grad=True)
print(W.requires_grad)
# Calculate standardization parameters if not already calculated
if self.Xmeans is None:
self.Xmeans = X.mean(0)
self.Xstds = X.std(0)
self.Xstds[self.Xstds == 0] = 1
self.Tmeans = T.mean(0)
self.Tstds = T.std(0)
self.Tstds[self.Tstds == 0] = 1
# Standardize inputs and targets
X = (X - self.Xmeans) / self.Xstds
T = (T - self.Tmeans) / self.Tstds
# Set optimizer to Adam and loss functions to MSELoss
optimizer = torch.optim.Adam(self.parameters(), lr=learning_rate)
mse_func = torch.nn.MSELoss()
# For each epoch:
# Do forward pass to calculate output Y.
# Calculate mean squared error loss, mse.
# Calculate gradient of mse with respect to all weights by calling mse.backward().
# Take weight update step, then zero the gradient values.
# Unstandardize the mse error and save in self.error_trace
# Print epoch+1 and unstandardized error if verbose is True and
# (epoch+1 is n_epochs or epoch+1 % (n_epochs // 10) == 0)
for epoch in range(n_epochs):
# Do forward pass to calculate output Y.
Y = self.forward(X)
print("Y = \n",Y)
# Calculate mean squared error loss, mse.
mse = ((T - Y)**2).mean()
#mse = torch.mean((T - Y[-1]) ** 2)
print("Y shape = \n",Y.shape)
print("Tshape = \n",T.shape)
print("MSE = \n",mse)
# Calculate gradient of mse with respect to all weights by calling mse.backward().
#W.retain_grad()
mse.backward(torch.ones(100))
#print("mse.backward(torch.ones(100))",mse.backward(torch.ones(100)))
# Take weight update step, then zero the gradient values.
#print("W.grad = ",W.grad())
with torch.no_grad():
W = learning_rate*W.grad()
print("kuttu",W.requires_grad)
W -= learning_rate * W.grad()
W.grad.zero_()
# Unstandardize the mse error and save in self.error_trace
self.error_trace = mse * self.Tstds
#. . .
def use(self, X):
# Set input matrix to torch.tensors if not already.
if not isinstance(X, torch.Tensor):
X = torch.from_numpy(X).float()
# Standardize X
print("here=\n",type(X))
X = (X - torch.mean(X)) / self.Xstds
# Do forward pass and unstandardize resulting output. Assign to variable Y.
# Return output Y after detaching from computation graph and converting to numpy
return Y.detach().numpy()
*<ipython-input-20-6e1e577f866d> in train(self, X, T, n_epochs, learning_rate, verbose)
86 # Take weight update step, then zero the gradient values.
87 with torch.no_grad():
---> 88 W = learning_rate*W.grad()
89 print("w",W.requires_grad)
90 W -= learning_rate * W.grad()*
TypeError: 'NoneType' object is not callable
I'm trying to build a neural network on the Mnist dataset for a HW assignment. I'm not asking anyone to DO the assignment for me, I'm just having trouble figuring out why the Training accuracy and Test Accuracy seem to be static for every epoch?
It's as if my way of updating weights is not working.
Epoch: 0, Train Accuracy: 10.22%, Train Cost: 3.86, Test Accuracy: 10.1%
Epoch: 1, Train Accuracy: 10.22%, Train Cost: 3.86, Test Accuracy: 10.1%
Epoch: 2, Train Accuracy: 10.22%, Train Cost: 3.86, Test Accuracy: 10.1%
Epoch: 3, Train Accuracy: 10.22%, Train Cost: 3.86, Test Accuracy: 10.1%
.
.
.
However, when I run the actual forward and backprop lines in a loop without any 'fluff' of classes or methods the cost goes down. I just can't seem to get it working in the current class setup.
I've tried building my own methods that pass the weights and biases between the backprop and feed-forward methods explicitly, however, those changes haven't done anything to fix this gradient descent issue.
I'm pretty sure it has to do with the definition of the backprop method in the NeuralNetwork class below. I've been struggling to find a way to update the weights by accessing the weight and bias variables in the main training loop.
def backward(self, Y_hat, Y):
'''
Backward pass through network. Update parameters
INPUT
Y_hat: Network predicted
shape: (?, 10)
Y: Correct target
shape: (?, 10)
RETURN
cost: calculate J for errors
type: (float)
'''
#Naked Backprop
dJ_dZ2 = Y_hat - Y
dJ_dW2 = np.matmul(np.transpose(X2), dJ_dZ2)
dJ_db2 = Y_hat - Y
dJ_dX2 = np.matmul(dJ_db2, np.transpose(NeuralNetwork.W2))
dJ_dZ1 = dJ_dX2 * d_sigmoid(Z1)
inner_mat = np.matmul(Y-Y_hat,np.transpose(NeuralNetwork.W2))
dJ_dW1 = np.matmul(np.transpose(X),inner_mat) * d_sigmoid(Z1)
dJ_db1 = np.matmul(Y - Y_hat, np.transpose(NeuralNetwork.W2)) * d_sigmoid(Z1)
lr = 0.1
# weight updates here
#just line 'em up and do lr * the dJ_.. vars you found above
NeuralNetwork.W2 = NeuralNetwork.W2 - lr * dJ_dW2
NeuralNetwork.b2 = NeuralNetwork.b2 - lr * dJ_db2
NeuralNetwork.W1 = NeuralNetwork.W1 - lr * dJ_dW1
NeuralNetwork.b1 = NeuralNetwork.b1 - lr * dJ_db1
# calculate the cost
cost = -1 * np.sum(Y * np.log(Y_hat))
# calc gradients
# weight updates
return cost#, W1, W2, b1, b2
I'm really at a loss here, any help is appreciated!
Full code is shown here...
import keras
import numpy as np
import matplotlib.pyplot as plt
from keras.datasets import mnist
np.random.seed(0)
"""### Load MNIST Dataset"""
(x_train, y_train), (x_test, y_test) = mnist.load_data()
X = x_train[0].reshape(1,-1)/255.; Y = y_train[0]
zeros = np.zeros(10); zeros[Y] = 1
Y = zeros
#Here we implement the forward pass for the network using the single example, $X$, from above
### Initialize weights and Biases
num_hidden_nodes = 200
num_classes = 10
# init weights
#first set of weights (these are what the input matrix is multiplied by)
W1 = np.random.uniform(-1e-3,1e-3,size=(784,num_hidden_nodes))
#this is the first bias layer and i think it's a 200 dimensional vector of the biases that go into each neuron before the sigmoid function.
b1 = np.zeros((1,num_hidden_nodes))
#again this are the weights for the 2nd layer that are multiplied by the activation output of the 1st layer
W2 = np.random.uniform(-1e-3,1e-3,size=(num_hidden_nodes,num_classes))
#these are the biases that are added to each neuron before the final softmax activation.
b2 = np.zeros((1,num_classes))
# multiply input with weights
Z1 = np.add(np.matmul(X,W1), b1)
def sigmoid(z):
return 1 / (1 + np.exp(- z))
def d_sigmoid(g):
return sigmoid(g) * (1. - sigmoid(g))
# activation function of Z1
X2 = sigmoid(Z1)
Z2 = np.add(np.matmul(X2,W2), b2)
# softmax
def softmax(z):
# subracting the max adds numerical stability
shiftx = z - np.max(z)
exps = np.exp(shiftx)
return exps / np.sum(exps)
def d_softmax(Y_hat, Y):
return Y_hat - Y
# the hypothesis,
Y_hat = softmax(Z2)
"""Initially the network guesses all categories equally. As we perform backprop the network will get better at discerning images and their categories."""
"""### Calculate Cost"""
cost = -1 * np.sum(Y * np.log(Y_hat))
#so i think the main thing here is like a nested chain rule thing, where we find the change in the cost with respec to each
# set of matrix weights and biases?
#here is probably the order of how we do things based on whats in math below...
'''
1. find the partial deriv of the cost function with respect to the output of the second layer, without the softmax it looks like for some reason?
2. find the partial deriv of the cost function with respect to the weights of the second layer, which is dope cause we can re-use the partial deriv from step 1
3. this one I know intuitively we're looking for the parial deriv of cost with respect to the bias term of the second layer, but how TF does that math translate into
numpy? is that the same y_hat - Y from the first step? where is there anyother Y_hat - y?
4. This is also confusing cause I know where to get the weights for layer 2 from and how to transpose them, but again, where is the Y_hat - Y?
5. Here we take the missing partial deriv from step 4 and multiply it by the d_sigmoid function of the first layer outputs before activations.
6. In this step we multiply the first layer weights (transposed) by the var from 5
7. And this is weird too, this just seems like the same step as number 5 repeated for some reason but with y-y_hat instead of y_hat-y
'''
#look at tutorials like this https://www.youtube.com/watch?v=7qYtIveJ6hU
#I think the most backprop layer steps are fine without biases but how do we find the bias derivatives
#maybe just the hypothesis matrix minus the actual y matrix?
dJ_dZ2 = Y_hat - Y
#find partial deriv of cost w respect to 2nd layer weights
dJ_dW2 = np.matmul(np.transpose(X2), dJ_dZ2)
#finding the partial deriv of cost with respect to the 2nd layer biases
#I'm still not 100% sure why this is here and why it works out to Y_hat - Y
dJ_db2 = Y_hat - Y
#finding the partial deriv of cost with respect to 2nd layer inputs
dJ_dX2 = np.matmul(dJ_db2, np.transpose(W2))
#finding the partial deriv of cost with respect to Activation of layer 1
dJ_dZ1 = dJ_dX2 * d_sigmoid(Z1)
#y-yhat matmul 2nd layer weights
#I added the transpose to the W2 var because the matrices were not compaible sizes without it
inner_mat = np.matmul(Y-Y_hat,np.transpose(W2))
dJ_dW1 = np.matmul(np.transpose(X),inner_mat) * d_sigmoid(Z1)
class NeuralNetwork:
# set learning rate
lr = 0.01
# init weights
W1 = np.random.uniform(-1e-3,1e-3,size=(784,num_hidden_nodes))
b1 = np.zeros((1,num_hidden_nodes))
W2 = np.random.uniform(-1e-3,1e-3,size=(num_hidden_nodes,num_classes))
b2 = np.zeros((1,num_classes))
def __init__(self, num_hidden_nodes, num_classes, lr=0.01):
'''
# set learning rate
lr = lr
# init weights
W1 = np.random.uniform(-1e-3,1e-3,size=(784,num_hidden_nodes))
b1 = np.zeros((1,num_hidden_nodes))
W2 = np.random.uniform(-1e-3,1e-3,size=(num_hidden_nodes,num_classes))
b2 = np.zeros((1,num_classes))
'''
def forward(self, X1):
'''
Forward pass through the network
INPUT
X: input to network
shape: (?, 784)
RETURN
Y_hat: prediction from output of network
shape: (?, 10)
'''
Z1 = np.add(np.matmul(X,W1), b1)
X2 = sigmoid(Z1)# activation function of Z1
Z2 = np.add(np.matmul(X2,W2), b2)
Y_hat = softmax(Z2)
#return the hypothesis
return Y_hat
# store input for backward pass
# you can basically copy and past what you did in the forward pass above here
# think about what you need to store for the backward pass
return
def backward(self, Y_hat, Y):
'''
Backward pass through network. Update parameters
INPUT
Y_hat: Network predicted
shape: (?, 10)
Y: Correct target
shape: (?, 10)
RETURN
cost: calculate J for errors
type: (float)
'''
#Naked Backprop
dJ_dZ2 = Y_hat - Y
dJ_dW2 = np.matmul(np.transpose(X2), dJ_dZ2)
dJ_db2 = Y_hat - Y
dJ_dX2 = np.matmul(dJ_db2, np.transpose(NeuralNetwork.W2))
dJ_dZ1 = dJ_dX2 * d_sigmoid(Z1)
inner_mat = np.matmul(Y-Y_hat,np.transpose(NeuralNetwork.W2))
dJ_dW1 = np.matmul(np.transpose(X),inner_mat) * d_sigmoid(Z1)
dJ_db1 = np.matmul(Y - Y_hat, np.transpose(NeuralNetwork.W2)) * d_sigmoid(Z1)
lr = 0.1
# weight updates here
#just line 'em up and do lr * the dJ_.. vars you found above
NeuralNetwork.W2 = NeuralNetwork.W2 - lr * dJ_dW2
NeuralNetwork.b2 = NeuralNetwork.b2 - lr * dJ_db2
NeuralNetwork.W1 = NeuralNetwork.W1 - lr * dJ_dW1
NeuralNetwork.b1 = NeuralNetwork.b1 - lr * dJ_db1
# calculate the cost
cost = -1 * np.sum(Y * np.log(Y_hat))
# calc gradients
# weight updates
return cost#, W1, W2, b1, b2
nn = NeuralNetwork(200,10,lr=.01)
num_train = float(len(x_train))
num_test = float(len(x_test))
for epoch in range(10):
train_correct = 0; train_cost = 0
# training loop
for i in range(len(x_train)):
x = x_train[i]; y = y_train[i]
# standardizing input to range 0 to 1
X = x.reshape(1,784) /255.
# forward pass through network
Y_hat = nn.forward(X)
# get pred number
pred_num = np.argmax(Y_hat)
# check if prediction was accurate
if pred_num == y:
train_correct += 1
# make a one hot categorical vector; same as keras.utils.to_categorical()
zeros = np.zeros(10); zeros[y] = 1
Y = zeros
# compute gradients and update weights
train_cost += nn.backward(Y_hat, Y)
test_correct = 0
# validation loop
for i in range(len(x_test)):
x = x_test[i]; y = y_test[i]
# standardizing input to range 0 to 1
X = x.reshape(1,784) /255.
# forward pass
Y_hat = nn.forward(X)
# get pred number
pred_num = np.argmax(Y_hat)
# check if prediction was correct
if pred_num == y:
test_correct += 1
# no backward pass here!
# compute average metrics for train and test
train_correct = round(100*(train_correct/num_train), 2)
test_correct = round(100*(test_correct/num_test ), 2)
train_cost = round( train_cost/num_train, 2)
# print status message every epoch
log_message = 'Epoch: {epoch}, Train Accuracy: {train_acc}%, Train Cost: {train_cost}, Test Accuracy: {test_acc}%'.format(
epoch=epoch,
train_acc=train_correct,
train_cost=train_cost,
test_acc=test_correct
)
print (log_message)
also, The project is in this colab & ipynb notebook
I believe this is pretty clear, in this part of your loop:
for epoch in range(10):
train_correct = 0; train_cost = 0
# training loop
for i in range(len(x_train)):
x = x_train[i]; y = y_train[i]
# standardizing input to range 0 to 1
X = x.reshape(1,784) /255.
# forward pass through network
Y_hat = nn.forward(X)
# get pred number
pred_num = np.argmax(Y_hat)
# check if prediction was accurate
if pred_num == y:
train_correct += 1
# make a one hot categorical vector; same as keras.utils.to_categorical()
zeros = np.zeros(10); zeros[y] = 1
Y = zeros
# compute gradients and update weights
train_cost += nn.backward(Y_hat, Y)
test_correct = 0
# validation loop
for i in range(len(x_test)):
x = x_test[i]; y = y_test[i]
# standardizing input to range 0 to 1
X = x.reshape(1,784) /255.
# forward pass
Y_hat = nn.forward(X)
# get pred number
pred_num = np.argmax(Y_hat)
# check if prediction was correct
if pred_num == y:
test_correct += 1
# no backward pass here!
# compute average metrics for train and test
train_correct = round(100*(train_correct/num_train), 2)
test_correct = round(100*(test_correct/num_test ), 2)
train_cost = round( train_cost/num_train, 2)
# print status message every epoch
log_message = 'Epoch: {epoch}, Train Accuracy: {train_acc}%, Train Cost: {train_cost}, Test Accuracy: {test_acc}%'.format(
epoch=epoch,
train_acc=train_correct,
train_cost=train_cost,
test_acc=test_correct
)
print (log_message)
For every epoch of the 10 epochs in your loop, you are setting your train_correct and train_cost to 0, hence there is no updating after each epoch
H1, I am try to make NN model that satisfy simple formula.
y = X1^2 + X2^2
But when i use CrossEntropyLoss for loss function, i get two different error message.
First, when i set code like this
x = torch.randn(batch_size, 2)
y_hat = model(x)
y = answer(x).long()
optimizer.zero_grad()
loss = loss_func(y_hat, y)
loss.backward()
optimizer.step()
i get this message
RuntimeError: Assertion `cur_target >= 0 && cur_target < n_classes' failed. at
c:\programdata\miniconda3\conda-bld\pytorch_1533090623466\work\aten\src\thnn\generic/Cl
assNLLCriterion.c:93
Second, I change code like this
x = torch.randn(batch_size, 2)
y_hat = model(x)
y = answer(x).long().view(batch_size,1,1)
optimizer.zero_grad()
loss = loss_func(y_hat, y)
loss.backward()
optimizer.step()
then i get message like
RuntimeError: multi-target not supported at c:\programdata\miniconda3\conda-bld\pytorch_1533090623466\work\aten\src\thnn\generic/ClassNLLCriterion.c:21
How can i solve this problem? Thanks.(sorry for my English)
This is my code
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
def answer(x):
y = x[:,0].pow(2) + x[:,1].pow(2)
return y
class Model(nn.Module):
def __init__(self, input_size, output_size):
super(Model, self).__init__()
self.linear1 = nn.Linear(input_size, 10)
self.linear2 = nn.Linear(10, 1)
def forward(self, x):
y = F.relu(self.linear1(x))
y = F.relu(self.linear2(y))
return y
model = Model(2,1)
print(model, '\n')
loss_func = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr = 0.001)
batch_size = 3
epoch_n = 100
iter_n = 100
for epoch in range(epoch_n):
loss_avg = 0
for i in range(iter_n):
x = torch.randn(batch_size, 2)
y_hat = model(x)
y = answer(x).long().view(batch_size,1,1)
optimizer.zero_grad()
loss = loss_func(y_hat, y)
loss.backward()
optimizer.step()
loss_avg += loss
loss_avg = loss_avg / iter_n
if epoch % 10 == 0:
print(loss_avg)
if loss_avg < 0.001:
break
Can i make those dataset using dataloader in pytorch? Thanks for your help.
You are using the wrong loss function. CrossEntropyLoss is used for classification problems generally wheread your problem is that of regression. So you should use losses which are meant for regression like tasks like Mean Squared Error Loss, L1 Loss etc. Take a look at this, this, this and this.