I am working on a Pytorch LSTM model that is able to detect patterns in sequence of N variables that leads to a good outcome vs bad outcome.
I tested it with a simple test-case which has 12 training examples.
I see that using batch size of 1 (i.e compute loss function, call .backward() and .step() after every sample) results in 1s and 0s being separated much more compared to updating loss function once with all samples). Difference is shown below.
Can someone give me the intuition behind this behavior?
Is there a rule of thumb of batch size to use based on training data size? Smaller Batch size makes things slow but I assume there is also higher chance things may not converge?
Final predictions with batch size 1
Train set targets: [[0.0]], predictions:[0.24779687821865082]
Train set targets: [[1.0]], predictions:[0.9567258954048157]
Train set targets: [[1.0]], predictions:[0.8191764950752258]
Train set targets: [[0.0]], predictions:[0.20435290038585663]
Train set targets: [[0.0]], predictions:[0.1295892596244812]
Train set targets: [[1.0]], predictions:[0.9186112284660339]
Train set targets: [[1.0]], predictions:[0.6797895431518555]
Train set targets: [[1.0]], predictions:[0.9642216563224792]
Train set targets: [[1.0]], predictions:[0.9764360785484314]
Train set targets: [[0.0]], predictions:[0.670409619808197]
Train set targets: [[1.0]], predictions:[0.7026165723800659]
Train set targets: [[1.0]], predictions:[0.8404821157455444]
Test set targets: [[0.0]], predictions:[0.08575308322906494]
Test set targets: [[1.0]], predictions:[0.7602054476737976]
Test set targets: [[1.0]], predictions:[0.7767713069915771]
Final predictions with batch size that includes all samples
Train set
targets: [[0.0], [1.0], [0.0], [1.0], [1.0], [1.0], [1.0], [0.0], [1.0], [1.0], [0.0], [1.0]],
predictions:[0.6307732462882996, 0.6873687505722046, 0.6007956862449646, 0.7481836080551147, 0.7676156759262085, 0.6568607091903687, 0.7259970307350159, 0.597843587398529, 0.6819412708282471, 0.6660482287406921, 0.5785030126571655, 0.7716434597969055]
Test set
Targets: [[0.0], [1.0], [1.0]],
Predictions:[0.36408719420433044, 0.7265898585319519, 0.7854364514350891]
Code
Model
class LSTMModel(nn.Module):
def __init__(self, input_dim, hidden_dim):
super(LSTMFrzModel, self).__init__()
self.input_dim = input_dim
self.hidden_dim = hidden_dim
# LSTM
self.lstm = nn.LSTM(input_dim, hidden_dim, batch_first=True)
# Readout layer
self.fc = nn.Linear(hidden_dim, 1)
self.sigmoid = nn.Sigmoid()
def forward(self, inputs):
mlp_out, (hidden, _) = self.lstm(inputs)
output = self.fc(hidden)
output = self.sigmoid(output)
return output
Dataset code
class LSTMDataset(Dataset):
def __init__(self, x, y):
self.x = x
self.y = y
def __len__(self):
return len(self.y)
def __getitem__(self,idx):
inputs = [torch.from_numpy(self.x[idx][ts]).unsqueeze(0) for ts in range(len(self.x[idx]))]
inputs = torch.cat(inputs)
target = torch.Tensor([self.y[idx]])
return inputs, target
Training code
# This function is used to train a single model
def train_lstm(
X_training,
y_training,
X_testing,
y_testing,
hidden_dim=10,
lr=1e-4,
f1_thresh = 0.5,
use_gpu=False,
batch_size=100,
num_epochs=4000,
assym_wt=0, #Equal weights for 0 and 1
):
start_time = time.time()
clf = LSTMModel(len(X_training[0][0]), hidden_dim)
# Move to GPU if available
use_gpu = use_gpu and torch.cuda.is_available()
device = torch.device("cuda" if use_gpu else "cpu")
# Define the loss function and optimizer
optimizer = torch.optim.Adam(clf.parameters(), lr=lr)
clf = clf.to(device)
loss_function = nn.BCELoss()
loss_function = loss_function.to(device)
dataset = LSTMDataset(X_training, y_training)
trainloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=True)
# Run the training loop
# per_epoch_precision = []
# per_epoch_recall = []
train_preds = []
train_targets = []
for epoch in range(0, num_epochs):
# Set current loss value
current_loss = 0.0
# Iterate over the DataLoader for training data
clf.train() # set to train mode
for i, data in enumerate(trainloader):
# Get inputs
inputs, targets = data
# Zero the gradients
optimizer.zero_grad()
# Perform forward pass
outputs = clf(inputs)
# Store predictions/targets in last epoch to compute accuracy stats
if epoch == num_epochs - 1:
train_targets += targets
train_preds += outputs.view(-1).tolist()
print(f'Train set targets: {targets.tolist()}, predictions:{outputs.view(-1).tolist()}')
# Compute loss
targets = torch.FloatTensor(targets).unsqueeze(0)
# Apply assymetric weights to handle unbalanced datasets
if assym_wt > 0:
loss_function = nn.BCELoss(weight=assym_wt * targets + 1)
loss_function = loss_function.to(device)
loss = loss_function(outputs, targets)
# Perform backward pass
loss.backward()
# Perform optimization
optimizer.step()
# Print statistics
current_loss += loss.item()
if (epoch % 250) == 249:
print("Loss after epoch %5d: %.3f" % (epoch + 1, current_loss / 500))
current_loss = 0.0
# Process is complete.
print("Training process has finished.")
train_preds = torch.FloatTensor(train_preds)
train_targets = torch.FloatTensor(train_targets)
clf.eval() # set to eval mode
dataset = LSTMDataset(X_testing, y_testing)
testloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=True)
test_preds = []
test_targets = []
with torch.no_grad():
for i, data in enumerate(testloader):
# Get inputs
inputs, targets = data
# Perform forward pass
preds = clf(inputs)
print(f'Test set targets: {targets.tolist()}, predictions:{preds.view(-1).tolist()}')
# Store predictions and targets so that we can compute stats
test_preds += preds.view(-1).tolist()
test_targets += targets
test_preds = torch.FloatTensor(test_preds)
test_targets = torch.FloatTensor(test_targets)
pr_fig = go.Figure()
pr_fig.update_xaxes(title_text="Recall")
pr_fig.update_yaxes(title_text="Precision")
plot_pr_withtorch(test_targets, test_preds, pr_fig, "Test PR ")
plot_pr_withtorch(train_targets, train_preds, pr_fig, "Train PR ")
pr_fig.show()
train_F1 = get_scores(train_targets, train_preds > f1_thresh, "Train set scores")
test_F1 = get_scores(test_targets, test_preds > f1_thresh, "Test set scores")
print(f"Train time is {time.time() - start_time}")
return train_F1, test_F1
Test case
# test case
train = np.array([[10, 20, 5, 10, 1], [10, 5, 8, 3, 0], [10, 5, 5, 10, 1]])
for ind in range(0, 3):
for spread in [5, 10, 50]:
newrow = spread + train[ind]
newrow[-1] -= spread
# print(newrow)
train = np.vstack([train, newrow])
test = np.array([[0, 8, 3, 7, 1], [19, 8, 12, 3, 0], [1000, 450, 75, 135, 1]])
train_df = pd.DataFrame(train, columns = ['f1_1','f1_2','f2_1','f2_2', 'op'])
test_df = pd.DataFrame(test, columns = ['f1_1','f1_2','f2_1','f2_2', 'op'])
tc_Xtrain = train_df[train_df.columns[~train_df.columns.isin(['op'])]]
tc_ytrain = train_df['op'].astype(np.float32)
tc_Xtest = test_df[test_df.columns[~test_df.columns.isin(['op'])]]
tc_ytest = test_df['op'].astype(np.float32)
#normalize values
scaler = StandardScaler()
tc_Xtrain = scaler.fit_transform(tc_Xtrain)
# print(tc_Xtrain)
# print(f'Type after standard scaler = {type(tc_Xtrain)}')
tc_Xtest = scaler.transform(tc_Xtest)
X_train = np.apply_along_axis(create_ts_features, 1, tc_Xtrain, num_features=2).astype(np.float32)
X_test = np.apply_along_axis(create_ts_features, 1, tc_Xtest, num_features=2).astype(np.float32)
# print(X_train)
# print(tc_ytrain)
train_lstm(X_train, tc_ytrain, X_test, tc_ytest, num_epochs=4000, f1_thresh=0.6, batch_size=100)
Metric computation
def plot_pr_withtorch(target, pred, fig, title):
pr_curve = PrecisionRecallCurve(pos_label=1)
precision, recall, thresholds = pr_curve(pred, target)
print_key_prs(precision, recall, thresholds, title)
N = len(recall)
fig.add_trace(go.Scatter(x=recall[0 : N - 1], y=precision[0 : N - 1], name=title))
def get_scores(y, y_preds, print_label):
print(f"{print_label} summary:")
confusion = confusion_matrix(y, y_preds)
print(f"Confusion matrix: {confusion}")
print("Accuracy: {:.2f}".format(accuracy_score(y, y_preds)))
print("Precision: {:.2f}".format(precision_score(y, y_preds)))
print("Recall: {:.2f}".format(recall_score(y, y_preds)))
F1_score = f1_score(y, y_preds)
print("F1: {:.2f}".format(f1_score(y, y_preds)))
return F1_score
Related
I am building several simple networks to predict the bike rentals at 500 stations in the upcoming hour, given rentals at all stations in the previous 24 hours. I am working with two architectures, one with a graph convolution (which amounts to updating each station with a learned linear combination of other stations, at each hour) and a FNN layer to prediction, and a second with a graph convolution -> LSTM -> FNN to prediction.
Before I describe more, I'm getting poorer performance for my model which includes an LSTM unit, which is confusing me.
See these two images for a description of each architecture, for each architecture I also add hourly meta-data (weather, time, etc) as variation, they are in the images in red, and not relevant to my question. Image links at the bottom of the post.
[Architecture 1: GCNN + FNN][1]
[Architecture 2: GCNN + LSTM + FNN][2]
Confusingly, the test RMSE for the first model is 3.46, for the second model its 3.57. Could someone please explain to me why the second wouldn't be lower, as it seems to be running the exact same processes, except with an additional LSTM unit.
Here are relevant snippets of my code for the GCNN+FNN model:
def gcnn_ddgf(hidden_layer, node_num, feature_in, horizon, learning_rate, beta, batch_size, early_stop_th, training_epochs, X_training, Y_training, X_val, Y_val, X_test, Y_test, scaler, display_step):
n_output_vec = node_num * horizon # length of output vector at the final layer
early_stop_k = 0 # early stop patience
best_val = 10000
traing_error = 0
test_error = 0
pred_Y = []
tf.reset_default_graph()
batch_size = batch_size
early_stop_th = early_stop_th
training_epochs = training_epochs
# tf Graph input and output
X = tf.placeholder(tf.float32, [None, node_num, feature_in]) # X is the input signal
Y = tf.placeholder(tf.float32, [None, n_output_vec]) # y is the regression output
# define dictionaries to store layers weight & bias
weights_hidden = {}
weights_A = {}
biases = {}
vec_length = feature_in
weights_hidden['h1'] = tf.Variable(tf.random_normal([vec_length, hidden_layer], stddev=0.5))
biases['b1'] = tf.Variable(tf.random_normal([1, hidden_layer], stddev=0.5))
weights_A['A1'] = tf.Variable(tf.random_normal([node_num, node_num], stddev=0.5))
weights_hidden['out'] = tf.Variable(tf.random_normal([hidden_layer, horizon], stddev=0.5))
biases['bout'] = tf.Variable(tf.random_normal([1, horizon], stddev=0.5))
# Construct model
pred= gcn(X, weights_hidden, weights_A, biases, node_num, horizon) #see below
pred = scaler.inverse_transform(pred)
Y_original = scaler.inverse_transform(Y)
cost = tf.sqrt(tf.reduce_mean(tf.pow(pred - Y_original, 2)))
#optimizer = tf.train.RMSPropOptimizer(learning_rate, decay).minimize(cost)
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate, beta1=beta).minimize(cost)
# Initializing the variables
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
for epoch in range(training_epochs):
avg_cost_sq = 0.
num_train = X_training.shape[0]
total_batch = int(num_train/batch_size)
for i in range(total_batch):
_, c = sess.run([optimizer, cost], feed_dict={X: X_training[i*batch_size:(i+1)*batch_size,],
Y: Y_training[i*batch_size:(i+1)*batch_size,]})
avg_cost_sq += np.square(c) * batch_size #/ total_batch
# rest part of training dataset
if total_batch * batch_size != num_train:
_, c = sess.run([optimizer, cost], feed_dict={X: X_training[total_batch*batch_size:num_train,],
Y: Y_training[total_batch*batch_size:num_train,]})
avg_cost_sq += np.square(c) * (num_train - total_batch*batch_size)
avg_cost = np.sqrt(avg_cost_sq / num_train)
# validation
c_val, = sess.run([cost], feed_dict={X: X_val, Y: Y_val})
if c_val < best_val:
# testing
c_tes, preds, Y_true = sess.run([cost, pred, Y_original], feed_dict={X: X_test,Y: Y_test})
best_val = c_val
test_error = c_tes
traing_error = avg_cost
pred_Y = preds
early_stop_k = 0 # reset to 0
# update early stopping patience
if c_val >= best_val:
early_stop_k += 1
# threshold
if early_stop_k == early_stop_th:
break
if epoch % display_step == 0:
print ("Epoch:", '%04d' % (epoch+1), "Training RMSE: ","{:.9f}".format(avg_cost))
print("Validation RMSE: ", c_val)
print("Lowest test RMSE: ", test_error)
print("epoch is ", epoch)
print("training RMSE is ", traing_error)
print("Optimization Finished! the lowest validation RMSE is ", best_val)
print("The test RMSE is ", test_error)
return best_val, pred_Y ,Y_true,test_error
# code that creates the model
def gcn(signal_in, weights_hidden, weights_A, biases, node_num, horizon):
signal_in = tf.transpose(signal_in, [1, 0, 2]) # node_num, batch, feature_in
feature_len = signal_in.shape[2] # feature vector length at the node of the input graph
signal_in = tf.reshape(signal_in, [node_num, -1]) # node_num, batch*feature_in
Adj = 0.5*(weights_A['A1'] + tf.transpose(weights_A['A1']))
Adj = normalize_adj(Adj)
Z = tf.matmul(Adj, signal_in) # node_num, batch*feature_in
Z = tf.reshape(Z, [-1, int(feature_len)]) # node_num * batch, feature_in
signal_output = tf.add(tf.matmul(Z, weights_hidden['h1']), biases['b1'])
signal_output = tf.nn.relu(signal_output) # node_num * batch, hidden_vec
final_output = tf.add(tf.matmul(signal_output, weights_hidden['out']), biases['bout']) # node_num * batch, horizon
# final_output = tf.nn.relu(final_output)
final_output = tf.reshape(final_output, [node_num, -1, horizon]) # node_num, batch, horizon
final_output = tf.transpose(final_output, [1, 0, 2]) # batch, node_num, horizon
final_output = tf.reshape(final_output, [-1, node_num*horizon]) # batch, node_num*horizon
return final_output
And the code for the GCNN+LSTM+FNN model:
def gcnn_ddgf_lstm(node_num, feature_in, learning_rate, beta, batch_size, early_stop_th, training_epochs, X_training,
Y_training, X_val, Y_val, X_test, Y_test, scaler, lstm_layer):
n_output_vec = node_num # length of output vector at the final layer
early_stop_k = 0 # early stop patience
display_step = 1 # frequency of printing results
best_val = 10000
traing_error = 0
test_error = 0
predic_res = []
tf.reset_default_graph()
batch_size = batch_size
early_stop_th = early_stop_th
training_epochs = training_epochs
# tf Graph input and output
X = tf.placeholder(tf.float32, [None, node_num, feature_in]) # X is the input signal
Y = tf.placeholder(tf.float32, [None, n_output_vec]) # y is the regression output
lstm_cell = tf.nn.rnn_cell.LSTMCell(lstm_layer, state_is_tuple=True)
# define dictionaries to store layers weight & bias
weights_hidden = {}
weights_A = {}
biases = {}
weights_A['A1'] = tf.Variable(tf.random_normal([node_num, node_num], stddev=0.5))
weights_hidden['h1'] = tf.Variable(tf.random_normal([lstm_layer, node_num], stddev=0.5))
biases['h1'] = tf.Variable(tf.random_normal([1, node_num], stddev=0.5))
weights_hidden['out'] = tf.Variable(tf.random_normal([node_num, node_num], stddev=0.5))
biases['bout'] = tf.Variable(tf.random_normal([1, node_num], stddev=0.5))
# Construct model
pred= gcn_lstm(X, weights_hidden, weights_A, biases, node_num, lstm_cell)
# pred = scaler.inverse_transform(pred)
# Y_original = scaler.inverse_transform(Y)
cost = tf.sqrt(tf.reduce_mean(tf.pow(pred - Y, 2)))
#optimizer = tf.train.RMSPropOptimizer(learning_rate, decay).minimize(cost)
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate, beta1=beta).minimize(cost)
# Initializing the variables
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
for epoch in range(training_epochs):
avg_cost_sq = 0.
num_train = X_training.shape[0]
total_batch = int(num_train/batch_size)
for i in range(total_batch):
_, c = sess.run([optimizer, cost], feed_dict={X: X_training[i*batch_size:(i+1)*batch_size,],
Y: Y_training[i*batch_size:(i+1)*batch_size,]})
avg_cost_sq += np.square(c) * batch_size #/ total_batch
# rest part of training dataset
if total_batch * batch_size != num_train:
_, c = sess.run([optimizer, cost], feed_dict={X: X_training[total_batch*batch_size:num_train,],
Y: Y_training[total_batch*batch_size:num_train,]})
avg_cost_sq += np.square(c) * (num_train - total_batch*batch_size)
avg_cost = np.sqrt(avg_cost_sq / num_train)
# validation
c_val, = sess.run([cost], feed_dict={X: X_val, Y: Y_val})
if c_val < best_val:
c_tes, preds = sess.run([cost, pred], feed_dict={X: X_test,Y: Y_test})
best_val = c_val
# save model
#saver.save(sess, './bikesharing_gcnn_ddgf')
test_error = c_tes
traing_error = avg_cost
early_stop_k = 0 # reset to 0
# update early stopping patience
if c_val >= best_val:
early_stop_k += 1
# threshold
if early_stop_k == early_stop_th:
pred_Y = scaler.inverse_transform(preds)
Y_true = scaler.inverse_transform(Y_test)
test_err = tf.sqrt(tf.reduce_mean(tf.pow(pred_Y - Y_true, 2)))
break
if epoch % display_step == 0:
print ("Epoch:", '%04d' % (epoch+1), "Training RMSE: ","{:.9f}".format(avg_cost))
print("Validation RMSE: ", c_val)
print("Lowest test RMSE: ", test_error)
print("epoch is ", epoch)
print("training RMSE is ", traing_error)
print("Optimization Finished! the lowest validation RMSE is ", best_val)
print("The scaled test RMSE is ", test_error)
return pred_Y, Y_true
def gcn_lstm(signal_in, weights_hidden, weights_A, biases, node_num, lstm_cell):
signal_in = tf.transpose(signal_in, [1, 0, 2]) # node_num, batch, feature_in
feature_len = signal_in.shape[2] # feature vector length at the node of the input graph
signal_in = tf.reshape(signal_in, [node_num, -1]) # node_num, batch*feature_in
Adj = 0.5*(weights_A['A1'] + tf.transpose(weights_A['A1']))
Adj = normalize_adj(Adj)
Z = tf.matmul(Adj, signal_in) # node_num, batch*feature_in
Z = tf.reshape(Z, [node_num, -1, int(feature_len)]) # node_num, batch, feature_in
Z = tf.transpose(Z,[1,2,0]) # batch, feature_in, node_num
# init_state = cell.zero_state(batch_size, tf.float32)
_, Z = tf.nn.dynamic_rnn(lstm_cell, Z, dtype = tf.float32) # init_state?
dense_output = tf.add(tf.matmul(Z[1], weights_hidden['h1']), biases['h1'])
dense_output = tf.nn.relu(dense_output)
final_output = tf.add(tf.matmul(dense_output, weights_hidden['out']), biases['bout']) # batch, node_num*horizon
return final_output
In particular, should I be weary that _, Z = tf.nn.dynamic_rnn(lstm_cell, Z, dtype = tf.float32) causes my variables defined elsewhere not to train?
Thanks a lot for any help :)
[1]: https://i.stack.imgur.com/MAO2t.png
[2]: https://i.stack.imgur.com/UDjHw.png
I resolved this.
I have three years of bike use data to make the prediction, and was using the ~last three months as my validation/test set. The last few months were winter with lower bike use. I got expected results (GCNN+LSTM outperforms GCNN, though not by much) when I shuffled my training data prior to allocating to sets (with sequences preserved for LSTM)
I have set up custom training and testing functions in my project so I can minutely customise the training process. I use k-fold cross-validation to evaluate my model. For whatever reason, the model trains correctly for the first fold, and then on the second in throws this error.
tensorflow.python.framework.errors_impl.FailedPreconditionError: Could not find variable _AnonymousVar13. This could mean that the variable has been deleted. In TF1, it can also mean the variable is uninitialized. Debug info: container=localhost, status=Not found: Resource localhost/_AnonymousVar13/N10tensorflow3VarE does not exist. [[node test_model/dense_2/Tensordot/ReadVariableOp (defined at training_example.py:33) ]] [Op:__inference__train_step_1082]
I have no idea what's happening. I assumed the error arose because of poor initialisation, so I model.build() with the input shape. I have tried initialising the graph's weights with blank tensor, too, but that didn't work. I have also reset the backend on the last line in case there was a conflict with names, but that doesn't do the trick.
import numpy as np
import sklearn.model_selection
import tensorflow as tf
from tensorflow.python.keras.metrics import Mean, Precision, Recall
from tensorflow.python.keras.optimizer_v2.adam import Adam
n_splits = 5
batch_size = 16
n_epochs = 2
loss_function = tf.keras.losses.BinaryCrossentropy()
optimiser_fn = Adam
metrics = [
Mean(name='loss'),
Precision(name='prec'),
Recall(name='recall'),
]
learning_rate = 1e-2
dense_outputs = [10,10]
activation = 'relu'
class TestModel(tf.keras.Model):
def __init__(self):
super().__init__()
self._dense_ops = [tf.keras.layers.Dense(o) for o in dense_outputs]
self._output = tf.keras.layers.Dense(1)
def call(self, inputs):
hidden = inputs
for l in self._dense_ops:
hidden = l(hidden)
return self._output(hidden)
def _load_fold_sets_for_training(fold, fold_idcs, features, labels, batch_size):
# Get the indices for the sets.
train_idcs, validation_idcs, _ = fold_idcs[fold]
# Get the training data and labels.
training_data = features[train_idcs]
training_labels = labels[train_idcs]
# Load the training, validation and testing sets.
training_set = tf.data.Dataset.from_tensor_slices(
(training_data, training_labels)
)
training_set = training_set.batch(batch_size, drop_remainder=False)
validation_set = tf.data.Dataset.from_tensor_slices(
(features[validation_idcs], labels[validation_idcs])
)
validation_set = validation_set.batch(batch_size, drop_remainder=False)
return training_set, validation_set
#tf.function
def _train_step(batch_samples, batch_labels):
batch_predictions = model(batch_samples, training=True)
loss = loss_function(batch_predictions, batch_labels)
gradients = tf.gradients(loss, model.trainable_variables)
optimiser.apply_gradients(
zip(gradients, model.trainable_variables)
)
batch_predictions = tf.sigmoid(batch_predictions)
metrics[0].update_state(loss)
[m.update_state(batch_labels, batch_predictions) for m in metrics[1:]]
#tf.function
def _inference_step(batch_samples, batch_labels):
batch_predictions = model(batch_samples, training=False)
loss = loss_function(batch_predictions, batch_labels)
batch_predictions = tf.sigmoid(batch_predictions)
metrics[0].update_state(loss)
[m.update_state(batch_labels, batch_predictions) for m in metrics[1:]]
# Generate dataset.
features = np.random.rand(15,1440,1)
labels = np.random.rand(15,1440)
# Set up splits.
kfold = sklearn.model_selection.KFold(n_splits=n_splits, shuffle=True)
splits = []
for train_idcs, test_idcs in kfold.split(features):
train_idcs, val_idcs = sklearn.model_selection.train_test_split(train_idcs)
splits += [[train_idcs, val_idcs, test_idcs]]
fold = 0
while fold < n_splits:
# Load datasets for fold.
training_set, validation_set = _load_fold_sets_for_training(fold, splits, features, labels, batch_size)
# Load model.
model = TestModel()
# Build model.
model.build((1440, 1))
# Initialise Adam optimiser.
optimiser = optimiser_fn(learning_rate)
epoch = 0
while epoch < n_epochs:
epoch += 1
# Training.
for batch_features, batch_labels in training_set: _train_step(batch_features, batch_labels)
print(f'fold {fold}: epoch {epoch}:', ' '.join(f'train_{m.name}: {m.result():0.05f}' for m in metrics))
# Validation.
for batch_features, batch_labels in validation_set: _inference_step(batch_features, batch_labels)
print(f'fold {fold}: epoch {epoch}:', ' '.join(f'val_{m.name}: {m.result():0.05f}' for m in metrics))
tf.keras.backend.clear_session()
fold += 1
Any ideas?
The issue was the placement of the _train_step and _inference_step. If the two functions are redefined on every iteration of the fold, the error disappears and the model trains. I don't know why they must be redefined every step.
import numpy as np
import sklearn.model_selection
import tensorflow as tf
from tensorflow.python.keras.metrics import Mean, Precision, Recall
from tensorflow.python.keras.optimizer_v2.adam import Adam
n_splits = 5
batch_size = 2
n_epochs = 2
loss_function = tf.keras.losses.BinaryCrossentropy()
optimiser_fn = Adam
metrics = [
Mean(name='loss'),
Precision(name='prec'),
Recall(name='recall'),
]
learning_rate = 1e-2
dense_outputs = [10, 10]
activation = 'relu'
class TestModel(tf.keras.Model):
def __init__(self):
super().__init__()
self._dense_ops = [tf.keras.layers.Dense(o) for o in dense_outputs]
self._output = tf.keras.layers.Dense(1)
def call(self, inputs):
hidden = inputs
for l in self._dense_ops:
hidden = l(hidden)
return self._output(hidden)
def _load_fold_sets_for_training(fold, fold_idcs, features, labels, batch_size):
# Get the indices for the sets.
train_idcs, validation_idcs, _ = fold_idcs[fold]
# Get the training data and labels.
training_data = features[train_idcs]
training_labels = labels[train_idcs]
# Load the training, validation and testing sets.
training_set = tf.data.Dataset.from_tensor_slices(
(training_data, training_labels)
)
training_set = training_set.batch(batch_size, drop_remainder=False)
validation_set = tf.data.Dataset.from_tensor_slices(
(features[validation_idcs], labels[validation_idcs])
)
validation_set = validation_set.batch(batch_size, drop_remainder=False)
return training_set, validation_set
# Generate dataset.
features = np.random.rand(15, 1440, 1)
labels = np.random.rand(15, 1440)
# Set up splits.
kfold = sklearn.model_selection.KFold(n_splits=n_splits, shuffle=True)
splits = []
for train_idcs, test_idcs in kfold.split(features):
train_idcs, val_idcs = sklearn.model_selection.train_test_split(train_idcs)
splits += [[train_idcs, val_idcs, test_idcs]]
fold = 0
while fold < n_splits:
#tf.function
def _train_step(batch_samples, batch_labels):
batch_predictions = model(batch_samples, training=True)
loss = loss_function(batch_predictions, batch_labels)
gradients = tf.gradients(loss, model.trainable_variables)
optimiser.apply_gradients(
zip(gradients, model.trainable_variables)
)
batch_predictions = tf.sigmoid(batch_predictions)
metrics[0].update_state(loss)
[m.update_state(batch_labels, batch_predictions) for m in metrics[1:]]
#tf.function
def _inference_step(batch_samples, batch_labels):
batch_predictions = model(batch_samples, training=False)
loss = loss_function(batch_predictions, batch_labels)
batch_predictions = tf.sigmoid(batch_predictions)
metrics[0].update_state(loss)
[m.update_state(batch_labels, batch_predictions) for m in metrics[1:]]
# Load datasets for fold.
training_set, validation_set = _load_fold_sets_for_training(fold, splits, features, labels,
batch_size)
# Load model.
model = TestModel()
# Build model.
model.build((1440, 1))
# Initialise Adam optimiser.
optimiser = optimiser_fn(learning_rate)
epoch = 0
while epoch < n_epochs:
epoch += 1
# Training.
for batch_features, batch_labels in training_set: _train_step(batch_features,
batch_labels)
print(f'fold {fold}: epoch {epoch}:', ' '.join(f'train_{m.name}: {m.result():0.05f}' for
m in metrics))
# Validation.
for batch_features, batch_labels in validation_set: _inference_step(batch_features,
batch_labels)
print(f'fold {fold}: epoch {epoch}:', ' '.join(f'val_{m.name}: {m.result():0.05f}' for m
in metrics))
tf.keras.backend.clear_session()
fold += 1
I have the below code for a binary classification and it works fine but i would like to modify the nn.Sequential parameters and add an BiLSTM layer. I have the below code:
class BertClassifier(nn.Module):
def __init__(self, freeze_bert=False):
super(BertClassifier, self).__init__()
# Specify hidden size of BERT, hidden size of our classifier, and number of labels
D_in, H, D_out = 768, 50, 2
# Instantiate BERT model
self.bert = BertModel.from_pretrained('bert-base-multilingual-uncased')
# Instantiate an one-layer feed-forward classifier
self.classifier = nn.Sequential(nn.Linear(D_in, H),nn.ReLU(),nn.Linear(H, D_out))
# Freeze the BERT model
if freeze_bert:
for param in self.bert.parameters():
param.requires_grad = False
def forward(self, input_ids, attention_mask):
# Feed input to BERT
outputs = self.bert(input_ids=input_ids,attention_mask=attention_mask)
# Extract the last hidden state of the token `[CLS]` for classification task
last_hidden_state_cls = outputs[0][:, 0, :]
# Feed input to classifier to compute logits
logits = self.classifier(last_hidden_state_cls)
return logits
I have tried to modify the sequential like this self.classifier = nn.Sequential(nn.LSTM(D_in, H, batch_first=True, bidirectional=True),nn.ReLU(),nn.Linear(H, D_out)) but then it throws the error RuntimeError: input must have 3 dimensions, got 2 on line logits = self.classifier(last_hidden_state_cls). I found that I can use nn.ModuleDict instead of nn.Sequential and i made the below :
self.classifier = nn.ModuleDict({
'lstm': nn.LSTM(input_size=D_in, hidden_size=H,batch_first=True, bidirectional=True ),
'linear': nn.Linear(in_features=H,out_features=D_out)})
But now I'm having issues computing the forward function with this. Can someone advice how i can properly modify the forward function?
Update: I also installed CUDA and now when I run the code it returns the error CUDA out of memory. Tried to allocate 16.00 MiB and I tried to lower the batch size but that doesn't fix the problem. I also tried the below but didn't resolved either. Any advice, please?
import torch, gc
gc.collect()
torch.cuda.empty_cache()
Update with the code:
MAX_LEN = 64
# For fine-tuning BERT, the authors recommend a batch size of 16 or 32.
batch_size = 32
VALID_BATCH_SIZE = 4
file1 = open('MH.txt', 'r')
list_com = []
list_label = []
for line in file1:
possible_labels = 'positive|negative'
label = re.findall(possible_labels, line)
line = re.sub(possible_labels, ' ', line)
line = re.sub('\n', ' ', line)
list_com.append(line)
list_label.append(label[0])
list_tuples = list(zip(list_com, list_label))
file1.close()
labels = ['positive', 'negative']
df = pd.DataFrame(list_tuples, columns=['text', 'label'])
df['label'] = df['label'].map({'positive': 1, 'negative': 0})
for i in range(0,len(df['label'])):
list_label[i] = df['label'][i]
#print(df)
#print(df['label'].value_counts())
X = df.text.values
y = df.label.values
X_train, X_val, y_train, y_val =\
train_test_split(X, y, test_size=0.1, random_state=2020)
def text_preprocessing(text):
# Remove '#name'
text = re.sub(r'(#.*?)[\s]', ' ', text)
# Replace '&' with '&'
text = re.sub(r'&', '&', text)
# Remove trailing whitespace
text = re.sub(r'\s+', ' ', text).strip()
return text
tokenizer = BertTokenizer.from_pretrained('bert-base-multilingual-uncased', do_lower_case=True)
# Create a function to tokenize a set of texts
def preprocessing_for_bert(data):
input_ids = []
attention_masks = []
for sent in data:
encoded_sent = tokenizer.encode_plus(
text=text_preprocessing(sent), # Preprocess sentence
add_special_tokens=True, # Add `[CLS]` and `[SEP]`
max_length=MAX_LEN, # Max length to truncate/pad
pad_to_max_length=True, # Pad sentence to max length
# return_tensors='pt', # Return PyTorch tensor
return_attention_mask=True # Return attention mask
)
# Add the outputs to the lists
input_ids.append(encoded_sent.get('input_ids'))
attention_masks.append(encoded_sent.get('attention_mask'))
# Convert lists to tensors
input_ids = torch.tensor(input_ids)
attention_masks = torch.tensor(attention_masks)
return input_ids, attention_masks
train_inputs, train_masks = preprocessing_for_bert(X_train)
val_inputs, val_masks = preprocessing_for_bert(X_val)
# Convert other data types to torch.Tensor
train_labels = torch.tensor(y_train)
val_labels = torch.tensor(y_val)
# Create the DataLoader for our training set
train_data = TensorDataset(train_inputs, train_masks, train_labels)
train_sampler = RandomSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=batch_size)
# Create the DataLoader for our validation set
val_data = TensorDataset(val_inputs, val_masks, val_labels)
val_sampler = SequentialSampler(val_data)
val_dataloader = DataLoader(val_data, sampler=val_sampler, batch_size=batch_size)
# Create the BertClassfier class
class BertClassifier(nn.Module):
"""Bert Model for Classification Tasks."""
def __init__(self, freeze_bert=False):
"""
#param bert: a BertModel object
#param classifier: a torch.nn.Module classifier
#param freeze_bert (bool): Set `False` to fine-tune the BERT model
"""
super(BertClassifier, self).__init__()
# Specify hidden size of BERT, hidden size of our classifier, and number of labels
D_in, H, D_out = 768, 50, 2
# Instantiate BERT model
self.bert = BertModel.from_pretrained('bert-base-multilingual-uncased')
# Instantiate an one-layer feed-forward classifier
self.classifier = nn.ModuleDict({
'lstm': nn.LSTM(input_size=D_in, hidden_size=H, batch_first=True, bidirectional=True),
'linear': nn.Linear(in_features=H, out_features=D_out)})
# Freeze the BERT model
if freeze_bert:
for param in self.bert.parameters():
param.requires_grad = False
def forward(self, input_ids, attention_mask):
outputs = self.bert(input_ids=input_ids,attention_mask=attention_mask)
sequence_output = outputs[0]
sequence_output, _ = self.lstm(sequence_output)
linear_output = self.linear(sequence_output[:, -1])
return linear_output
def initialize_model(epochs=4):
# Instantiate Bert Classifier
bert_classifier = BertClassifier(freeze_bert=False)
print(bert_classifier)
# Tell PyTorch to run the model on GPU
bert_classifier.to(device)
# Create the optimizer
optimizer = AdamW(bert_classifier.parameters(), lr=5e-5)
# Total number of training steps
total_steps = len(train_dataloader) * epochs
# Set up the learning rate scheduler
scheduler = get_linear_schedule_with_warmup(optimizer,num_warmup_steps=0,num_training_steps=total_steps)
return bert_classifier, optimizer, scheduler
# Specify loss function
loss_fn = nn.CrossEntropyLoss()
def set_seed(seed_value=42):
"""Set seed for reproducibility."""
random.seed(seed_value)
np.random.seed(seed_value)
torch.manual_seed(seed_value)
torch.cuda.manual_seed_all(seed_value)
def train(model, train_dataloader, val_dataloader=None, epochs=4, evaluation=False):
"""Train the BertClassifier model."""
# Start training loop
print("Start training...\n")
for epoch_i in range(epochs):
# Print the header of the result table
print(f"{'Epoch':^7} | {'Batch':^7} | {'Train Loss':^12} | {'Val Loss':^10} | {'Val Acc':^9} | {'Elapsed':^9}")
print("-" * 70)
# Measure the elapsed time of each epoch
t0_epoch, t0_batch = time.time(), time.time()
# Reset tracking variables at the beginning of each epoch
total_loss, batch_loss, batch_counts = 0, 0, 0
# Put the model into the training mode
model.train()
# For each batch of training data...
for step, batch in enumerate(train_dataloader):
batch_counts += 1
# Load batch to GPU
b_input_ids, b_attn_mask, b_labels = tuple(t.to(device) for t in batch)
# Zero out any previously calculated gradients
model.zero_grad()
# Perform a forward pass. This will return logits.
logits = model(b_input_ids, b_attn_mask)
# Compute loss and accumulate the loss values
loss = loss_fn(logits, b_labels)
batch_loss += loss.item()
total_loss += loss.item()
# Perform a backward pass to calculate gradients
loss.backward()
# Clip the norm of the gradients to 1.0 to prevent "exploding gradients"
torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
# Update parameters and the learning rate
optimizer.step()
scheduler.step()
# Print the loss values and time elapsed for every 20 batches
if (step % 20 == 0 and step != 0) or (step == len(train_dataloader) - 1):
# Calculate time elapsed for 20 batches
time_elapsed = time.time() - t0_batch
# Print training results
print(
f"{epoch_i + 1:^7} | {step:^7} | {batch_loss / batch_counts:^12.6f} | {'-':^10} | {'-':^9} | {time_elapsed:^9.2f}")
# Reset batch tracking variables
batch_loss, batch_counts = 0, 0
t0_batch = time.time()
# Calculate the average loss over the entire training data
avg_train_loss = total_loss / len(train_dataloader)
print("-" * 70)
#Evaluation
if evaluation == True:
# After the completion of each training epoch, measure the model's performance
# on our validation set.
val_loss, val_accuracy = evaluate(model, val_dataloader)
# Print performance over the entire training data
time_elapsed = time.time() - t0_epoch
print(
f"{epoch_i + 1:^7} | {'-':^7} | {avg_train_loss:^12.6f} | {val_loss:^10.6f} | {val_accuracy:^9.2f} | {time_elapsed:^9.2f}")
print("-" * 70)
print("\n")
print("Training complete!")
def evaluate(model, val_dataloader):
"""After the completion of each training epoch, measure the model's performance
on our validation set.
"""
# Put the model into the evaluation mode. The dropout layers are disabled during
# the test time.
model.eval()
# Tracking variables
val_accuracy = []
val_loss = []
# For each batch in our validation set...
for batch in val_dataloader:
# Load batch to GPU
b_input_ids, b_attn_mask, b_labels = tuple(t.to(device) for t in batch)
# Compute logits
with torch.no_grad():
logits = model(b_input_ids, b_attn_mask)
# Compute loss
loss = loss_fn(logits, b_labels)
val_loss.append(loss.item())
# Get the predictions
preds = torch.argmax(logits, dim=1).flatten()
# Calculate the accuracy rate
accuracy = (preds == b_labels).cpu().numpy().mean() * 100
val_accuracy.append(accuracy)
# Compute the average accuracy and loss over the validation set.
val_loss = np.mean(val_loss)
val_accuracy = np.mean(val_accuracy)
return val_loss, val_accuracy
def accuracy(probs, y_true):
"""
- Print AUC and accuracy on the test set
#params probs (np.array): an array of predicted probabilities with shape (len(y_true), 2)
#params y_true (np.array): an array of the true values with shape (len(y_true),)
fpr, tpr, threshold = roc_curve(y_true, preds)
roc_auc = auc(fpr, tpr)
print(f'AUC: {roc_auc:.4f}')
"""
preds = probs[:, 1]
# Get accuracy over the test set
y_pred = np.where(preds >= 0.5, 1, 0)
accuracy = accuracy_score(y_true, y_pred)
print(f'Accuracy: {accuracy * 100:.2f}%')
def bert_predict(model, test_dataloader):
"""Perform a forward pass on the trained BERT model to predict probabilities on the test set."""
# Put the model into the evaluation mode. The dropout layers are disabled during the test time.
model.eval()
all_logits = []
# For each batch in our test set...
for batch in test_dataloader:
# Load batch to GPU
b_input_ids, b_attn_mask = tuple(t.to(device) for t in batch)[:2]
# Compute logits
with torch.no_grad():
logits = model(b_input_ids, b_attn_mask)
all_logits.append(logits)
# Concatenate logits from each batch
all_logits = torch.cat(all_logits, dim=0)
# Apply softmax to calculate probabilities
probs = F.softmax(all_logits, dim=1).cpu().numpy()
return probs
set_seed(42) # Set seed for reproducibility
bert_classifier, optimizer, scheduler = initialize_model(epochs=3)
# start training
train(bert_classifier, train_dataloader, val_dataloader, epochs=3, evaluation=True)
# Compute predicted probabilities on the test set
probs = bert_predict(bert_classifier, val_dataloader)
# Evaluate the Bert classifier
accuracy(probs, y_val)
Here is my code.I dont know why my train and validation accuracy increase too slow.Is that normal? I’m new at deep learning.This is my homework.Train and validation values dont change nearly till loop 500.Is that normal? I changed learning rate and add weight_decay etc. but i didnt see difference
# -*- coding: utf-8 -*-
#Libraries
import torch
import torch.nn.functional as F
from torch import autograd, nn
from torch.autograd import Variable
import numpy as np
import matplotlib.pyplot as plt
from torchvision import transforms, datasets
from torch.utils import data
"""
Olivetti face dataset
"""
from sklearn.datasets import fetch_olivetti_faces
# Olivetti dataset download
olivetti = fetch_olivetti_faces()
train = olivetti.images
label = olivetti.target
X = train
Y = label
print("\nDownload Ok")
"""
Set for train
"""
train_rate = 0.8
X_train = np.zeros([int(train_rate * X.shape[0]),64,64], dtype=float)
Y_train = np.zeros([int(train_rate * X.shape[0])], dtype=int)
X_val = np.zeros([int((1-train_rate) * X.shape[0]+1),64,64], dtype=float)
Y_val = np.zeros([int((1-train_rate) * X.shape[0]+1)], dtype=int)
#Split data for train and validation
ie=0
iv=0
for i in range(X.shape[0]):
if (i%10)/9 <= train_rate:
X_train[ie] = X[i]
Y_train[ie] = Y[i]
ie += 1
else:
X_val[iv] = X[i]
Y_val[iv] = Y[i]
iv += 1
X_train = X_train.reshape(320,-1,64,64)
X_val = X_val.reshape(80,-1,64,64)
print(Y_train.shape)
X_train = torch.Tensor(X_train)
Y_train = torch.Tensor(Y_train)
X_val = torch.Tensor(X_val)
Y_val = torch.Tensor(Y_val)
batch_size = 16
train_loader = torch.utils.data.DataLoader(X_train,
batch_size=batch_size,
)
val_loader = torch.utils.data.DataLoader(X_val,
batch_size=batch_size,
)
class CNNModule(nn.Module):
def __init__(self):
super(CNNModule, self).__init__()
self.conv1 = nn.Conv2d(1, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 13 * 13, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 40)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 16 * 13 * 13)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
def make_train(model,dataset,n_iters,gpu):
# Organize data
X_train,Y_train,X_val,Y_val = dataset
kriter = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(),lr=0.03)
#Arrays to save loss and accuracy
tl=np.zeros(n_iters) #For train loss
ta=np.zeros(n_iters) #For train accuracy
vl=np.zeros(n_iters) #For validation loss
va=np.zeros(n_iters) #For validation accuracy
# Convert labels to long
Y_train = Y_train.long()
Y_val = Y_val.long()
# GPU control
if gpu:
X_train,Y_train = X_train.cuda(),Y_train.cuda()
X_val,Y_val = X_val.cuda(),Y_val.cuda()
model = model.cuda() # Parameters to GPU!
print("Using GPU")
else:
print("Using CPU")
# print(X_train.shape)
# print(Y_train.shape)
for i in range(n_iters):
# train forward
train_out = model.forward(X_train)
train_loss = kriter(train_out,Y_train)
# Backward and optimization
train_loss.backward()
optimizer.step()
optimizer.zero_grad()
# Compute train accuracy
train_predict = train_out.cpu().detach().argmax(dim=1)
train_accuracy = (train_predict.cpu().numpy()==Y_train.cpu().numpy()).mean()
# For validation
val_out = model.forward(X_val)
val_loss = kriter(val_out,Y_val)
# Compute validation accuracy
val_predict = val_out.cpu().detach().argmax(dim=1)
val_accuracy = (val_predict.cpu().numpy()==Y_val.cpu().numpy()).mean()
tl[i] = train_loss.cpu().detach().numpy()
ta[i] = train_accuracy
vl[i] = val_loss.cpu().detach().numpy()
va[i] = val_accuracy
# Show result each 5 loop
if i%5==0:
print("Loop --> ",i)
print("Train Loss :",train_loss.cpu().detach().numpy())
print("Train Accuracy :",train_accuracy)
print("Validation Loss :",val_loss.cpu().detach().numpy())
print("Validation Accuracy :",val_accuracy)
model = model.cpu()
#Print result
plt.subplot(2,2,1)
plt.plot(np.arange(n_iters), tl, 'r-')
plt.subplot(2,2,2)
plt.plot(np.arange(n_iters), ta, 'b--')
plt.subplot(2,2,3)
plt.plot(np.arange(n_iters), vl, 'r-')
plt.subplot(2,2,4)
plt.plot(np.arange(n_iters), va, 'b--')
dataset = X_train,Y_train,X_val,Y_val
gpu = True
gpu = gpu and torch.cuda.is_available()
model = CNNModule()
make_train(model,dataset,1000,gpu)
OUTPUT:
Loop --> 0
Train Loss : 3.6910985
Train Accuracy : 0.025
Validation Loss : 3.6908844
Validation Accuracy : 0.025
Loop --> 5
Loop --> 215
Train Loss : 3.6849258
Train Accuracy : 0.025
Validation Loss : 3.6850574
Validation Accuracy : 0.025
Loop --> 500
Train Loss : 3.4057992
Train Accuracy : 0.103125
Validation Loss : 3.5042462
Validation Accuracy : 0.0875
Loop --> 995
Train Loss : 0.007807272
Train Accuracy : 1.0
Validation Loss : 0.64222467
Validation Accuracy : 0.8375
OUTPUT GRAPH IMAGE:
I don't know if this is the only problem - but please note that you zero the gradient, then do forward pass over the validation data. which means that new gradients of the validation data are stored in the model before the next iteration. The common practice should be to create some evaluation method, and use it to make prediction over the validation set without saving the gradients. something like:
def eval_model(data, X_val, Y_val):
model.eval(); # this sets the model to be in inferrence mode (for example if you have batchNorm or droput layers)
with torch.no_grad(): # tells the model to not compute gradients.
val_out = model.forward(X_val)
val_loss = criterion(val_out,Y_val)
# here put some prints or whatever you want to do
model.train() # this returns the model to be in training mode
On running the first training epoch of a 3-layer convnet on CIFAR-10, I am neither able to achieve a high enough validation accuracy nor minimize the objective function.
Specifically, the accuracy varies on the first iteration, and then settles at 8.7% for the following iterations. What's peculiar is that I've also trained a 2-layer, fully-connected network which does substantially better, consistently getting around 43% accuracy on the validation set.
NOTE: Bulk of the code is from a Jupyter notebook designed as an introduction to barebones Tensorflow (and Keras) provided as part of an assignment for Stanford's CS231n Convolutional Neural Networks for Visual Recognition and although I am neither a student of the course nor of the university, I am doing this purely for experiential purposes and out of my newborn interests in CV / deep learning.
My contribution is only implementations for the forward pass and the initialization of the network's parameters.
The author of the notebook left a comment stating that when correctly implemented this model should achieve above 40% accuracy after the first epoch without any hyperparameter tuning.
Implementation Notes
49,000 / 1000 : train/validation split, batch size = 64
Weights are initialized using Kaiming normalization, bias initialized with 0s
learning rate = 3e-3
Here are each of the layers of convnet in detail:
Convolutional layer (with bias) with 32 5x5 filters, with zero-padding 2
ReLU Convolutional layer (with bias) with 16 3x3 filters, with zero-padding 1
ReLU Fully-connected layer (with bias) to compute scores for 10 classes
Code
( mine is written between the 'TODO' comment blocks )
import tensorflow as tf
import numpy as np
def load_cifar10(num_training=49000, num_validation=1000, num_test=10000):
cifar10 = tf.keras.datasets.cifar10.load_data()
(X_train, y_train), (X_test, y_test) = cifar10
X_train = np.asarray(X_train, dtype=np.float32)
y_train = np.asarray(y_train, dtype=np.int32).flatten()
X_test = np.asarray(X_test, dtype=np.float32)
y_test = np.asarray(y_test, dtype=np.int32).flatten()
mask = range(num_training, num_training + num_validation)
X_val = X_train[mask]
y_val = y_train[mask]
mask = range(num_training)
X_train = X_train[mask]
y_train = y_train[mask]
mask = range(num_test)
X_test = X_test[mask]
y_test = y_test[mask]
mean_pixel = X_train.mean(axis=(0, 1, 2), keepdims=True)
std_pixel = X_train.std(axis=(0, 1, 2), keepdims=True)
X_train = (X_train - mean_pixel) / std_pixel
X_val = (X_val - mean_pixel) / std_pixel
X_test = (X_test - mean_pixel) / std_pixel
return X_train, y_train, X_val, y_val, X_test, y_test
class Dataset(object):
def __init__(self, X, y, batch_size, shuffle=False):
assert X.shape[0] == y.shape[0], 'Got different numbers of data and labels'
self.X, self.y = X, y
self.batch_size, self.shuffle = batch_size, shuffle
def __iter__(self):
N, B = self.X.shape[0], self.batch_size
idxs = np.arange(N)
if self.shuffle:
np.random.shuffle(idxs)
return iter((self.X[i:i+B], self.y[i:i+B]) for i in range(0, N, B))
def flatten(x):
N = tf.shape(x)[0]
return tf.reshape(x, (N, -1))
def three_layer_convnet(x, params):
conv_w1, conv_b1, conv_w2, conv_b2, fc_w, fc_b = params
scores = None
############################################################################
# TODO: Implement the forward pass for the three-layer ConvNet. #
############################################################################
h1_conv = tf.nn.conv2d(x,
conv_w1 + conv_b1,
strides=[1, 1, 1, 1],
padding='SAME'
)
h1 = tf.nn.relu(h1_conv)
h2_conv = tf.nn.conv2d(h1,
conv_w2 + conv_b2,
strides=[1, 1, 1, 1],
padding='SAME'
)
h2 = tf.nn.relu(h2_conv)
fc_params = flatten(fc_w + fc_b)
h2 = flatten(h2)
scores = tf.matmul(h2, fc_params)
############################################################################
# END OF YOUR CODE #
############################################################################
return scores
def training_step(scores, y, params, learning_rate):
losses = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=scores)
loss = tf.reduce_mean(losses)
grad_params = tf.gradients(loss, params)
new_weights = []
for w, grad_w in zip(params, grad_params):
new_w = tf.assign_sub(w, learning_rate * grad_w)
new_weights.append(new_w)
with tf.control_dependencies(new_weights):
return tf.identity(loss)
def check_accuracy(sess, dset, x, scores, is_training=None):
num_correct, num_samples = 0, 0
for x_batch, y_batch in dset:
feed_dict = {x: x_batch, is_training: 0}
scores_np = sess.run(scores, feed_dict=feed_dict)
y_pred = scores_np.argmax(axis=1)
num_samples += x_batch.shape[0]
num_correct += (y_pred == y_batch).sum()
acc = float(num_correct) / num_samples
print('Got %d / %d correct (%.2f%%)' % (num_correct, num_samples, 100 * acc))
def kaiming_normal(shape):
if len(shape) == 2:
fan_in, fan_out = shape[0], shape[1]
elif len(shape) == 4:
fan_in, fan_out = np.prod(shape[:3]), shape[3]
return tf.random_normal(shape) * np.sqrt(2.0 / fan_in)
def three_layer_convnet_init():
params = None
############################################################################
# TODO: Initialize the parameters of the three-layer network. #
############################################################################
conv_w1 = tf.Variable(kaiming_normal((5, 5, 3, 32)))
conv_b1 = tf.Variable(tf.zeros((32,)))
conv_w2 = tf.Variable(kaiming_normal((3, 3, 32, 16)))
conv_b2 = tf.Variable(tf.zeros((16,)))
fc_w = tf.Variable(kaiming_normal((32 * 32 * 16, 10)))
fc_b = tf.Variable(tf.zeros((10,)))
params = [conv_w1, conv_b1, conv_w2, conv_b2, fc_w, fc_b]
############################################################################
# END OF YOUR CODE #
############################################################################
return params
def main():
learning_rate = 3e-3
tf.reset_default_graph()
is_training = tf.placeholder(tf.bool, name='is_training')
X_train, y_train, X_val, y_val, X_test, y_test = load_cifar10()
train_dset = Dataset(X_train, y_train, batch_size=64, shuffle=True)
test_dset = Dataset(X_test, y_test, batch_size=64)
val_dset = Dataset(X_val, y_val, batch_size=64, shuffle=False)
print('Train data shape: ', X_train.shape)
print('Train labels shape: ', y_train.shape, y_train.dtype)
print('Validation data shape: ', X_val.shape)
print('Validation labels shape: ', y_val.shape)
print('Test data shape: ', X_test.shape)
print('Test labels shape: ', y_test.shape)
device = '/cpu:0'
with tf.device(device):
x = tf.placeholder(tf.float32, [None, 32, 32, 3])
y = tf.placeholder(tf.int32, [None])
params = three_layer_convnet_init()
scores = three_layer_convnet(x, params)
loss = training_step(scores, y, params, learning_rate)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for t, (x_np, y_np) in enumerate(train_dset):
feed_dict = {x: x_np, y: y_np}
loss_np = sess.run(loss, feed_dict=feed_dict)
if t % 100 == 0:
print('Iteration %d, loss = %.4f' % (t, loss_np))
check_accuracy(sess, val_dset, x, scores, is_training)
if __name__=="__main__":
main()
EDIT: removed unnecessary comments and code
the problem is here
h1_conv = tf.nn.conv2d(x,
conv_w1 + conv_b1,
strides=[1, 1, 1, 1],
padding='SAME'
)
This is wrong as here you are adding bias values (conv_b1) to the filter conv_w1 but bias has to be added to the output of conv layer. The right way would be something like this
h1_conv = tf.nn.conv2d(x,
conv_w1,
strides=[1, 1, 1, 1],
padding='SAME'
)
h1_bias = tf.nn.bias_add(h1_conv, conv_b1)
h1 = tf.nn.relu(h1_bias)
And Correct it for h2 too.