How to interpret the evolution of accuracy and loss? - python

I am training a neural network using pytorch. here is the code for my model and training loop.
class AccidentModel(nn.Module):
def __init__(self):
super().__init__()
self.fc1 = nn.Linear(89, 1600)
self.act1 = nn.ReLU()
self.fc2 = nn.Linear(1600, 800)
self.act2 = nn.ReLU()
self.dropout = nn.Dropout(p=0.5)
self.act3 = nn.Softmax()
self.fc3 = nn.Linear(800, 2)
def forward(self, x):
x = self.fc1(x)
x = self.act1()
x = self.fc2(x)
x = self.act2()
x = self.dropout(x)
x = self.act3()
x = self.fc3(X)
return x
def train(train_dl, model, epochs, losses, accuracies):
loss_function = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.0001)
for epoch in range(epochs):
with tqdm.tqdm(train_dl, unit="batch") as tepoch:
for (features, target) in tepoch:
tepoch.set_description(f"Epoch {epoch}")
optimizer.zero_grad()
features, target = features.to(get_device()), target.to(get_device())
output = model(features.float())
target = target.view(-1)
loss = loss_function(output, target)
loss.backward()
optimizer.step()
output = torch.argmax(output, dim=1)
correct = (output == target).float().sum()
accuracy = correct / features.shape[0]
losses.append(loss)
accuracies.append(accuracy)
tepoch.set_postfix(loss=loss.item(), accuracy=accuracy.item())
and here is the evolution of the accuracy (orange) and the loss (blue) function:
My question is if my model is really learning or not? anf how to interpret this graph?
thanks,

No it's not learning, your loss is not decreasing in this case of classification.
What type of datas are you using ? text ? images ? It might be good to begin with a "classical" architecture according to the task.
You may have to delete your third activation which is not necessary and/or wrongly placed.
There are a lot of guides for beginners online ...

Related

What it means when your model can't overfit a small batch of data?

I am trying to train RNN model to classify sentences into 4 classes, but it doesn’t seem to work. I tried to overfit 4 examples (blue line) which worked, but even as little as 8 examples (red line) is not working, let alone the whole dataset.
I tried different learning rates and sizes of hidden_size and embedding_size but it doesn’t seem to help, what am I missing? I know that if the model is not able to overfit small batch it means the capacity should be increased but in this case increasing capacity has no effect.
The architecture is as follows:
class RNN(nn.Module):
def __init__(self, embedding_size=256, hidden_size=128, num_classes=4):
super().__init__()
self.embedding = nn.Embedding(len(vocab), embedding_size, 0)
self.rnn = nn.RNN(embedding_size, hidden_size, batch_first=True)
self.fc = nn.Linear(hidden_size, num_classes)
def forward(self, x):
#x=[batch_size, sequence_length]
x = self.embedding(x) #x=[batch_size, sequence_length, embedding_size]
_, h_n = self.rnn(x) #h_n=[1, batch_size, hidden_size]
h_n = h_n.squeeze(0)
out = self.fc(h_n) #out=[batch_size, num_classes]
return out
Input data is tokenized sentences, padded with 0 to the longest sentence in the batch, so as an example one sample would be: [2784, 9544, 1321, 120, 0, 0]. The data is from AG_NEWS dataset from torchtext datasets.
The training code:
model = RNN().to(device)
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=LR)
model.train()
for epoch in range(NUM_EPOCHS):
epoch_losses = []
correct_predictions = []
for batch_idx, (labels, texts) in enumerate(train_loader):
scores = model(texts)
loss = criterion(scores, labels)
loss.backward()
optimizer.step()
optimizer.zero_grad()
epoch_losses.append(loss.item())
correct = (scores.max(1).indices==labels).sum()
correct_predictions.append(correct)
epoch_avg_loss = sum(epoch_losses)/len(epoch_losses)
epoch_avg_accuracy = float(sum(correct_predictions))/float(len(labels))
The issue was due to the vanishing gradient.

Weights not updating on my neural net (Pytorch)

I'm completely new to neural nets, so I tried to roughly follow some tutorials to create a neural net that can just distinguish if a given binary picture contains a white circle or if it is all black. So, I generated 1000 arrays of size 10000 representing a 100x100 picture with half of them containing a white circle somewhere. The generation of my dataset looks like this:
for i in range(1000):
image = [0] * (IMAGE_SIZE * IMAGE_SIZE)
if random() < 0.5:
dataset.append([image, [[0]]])
else:
#inserts circle in image
#...
dataset.append([image, [[1]]])
np.random.shuffle(dataset)
np.save("testdataset.npy", dataset)
The double list around the classifications is because the net seemed to give that format as an output, so I matched that.
Now since I don't really have any precise idea of how pytorch works, I don't really now which parts of the code are relevant for solving my problem and which aren't. Therefore, I gave the code for the net and the training down below and really hope that someone can explain to me where I went wrong. I'm sorry if it's too much code. The code runs without errors, but if I print the parameters before and after training they didn't change in any way and the net will always just return a 0 for every image/array.
IMAGE_SIZE = 100
EPOCHS = 3
BATCH_SIZE = 50
VAL_PCT = 0.1
class Net(nn.Module):
def __init__(self):
super().__init__()
self.fc1 = nn.Linear(IMAGE_SIZE * IMAGE_SIZE, 64)
self.fc2 = nn.Linear(64, 64)
self.fc3 = nn.Linear(64, 64)
self.fc4 = nn.Linear(64, 1)
def forward(self, x):
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.relu(self.fc3(x))
x = self.fc4(x)
return F.log_softmax(x, dim = 1)
net = Net()
optimizer = optim.Adam(net.parameters(), lr = 0.01)
loss_function = nn.MSELoss()
dataset = np.load("testdataset.npy", allow_pickle = True)
X = torch.Tensor([i[0] for i in dataset]).view(-1, 10000)
y = torch.Tensor([i[1] for i in dataset])
val_size = int(len(X) * VAL_PCT)
train_X = X[:-val_size]
train_y = y[:-val_size]
test_X = X[-val_size:]
test_y = y[-val_size:]
for epoch in range(EPOCHS):
for i in range(0, len(train_X), BATCH_SIZE):
batch_X = train_X[i:i + BATCH_SIZE].view(-1, 1, 10000)
batch_y = train_y[i:i + BATCH_SIZE]
net.zero_grad()
outputs = net(batch_X)
loss = loss_function(outputs, batch_y)
loss.backward()
optimizer.step()
Instead of net.zero_grad() I would recommend using optimizer.zero_grad() as it's more common and de facto standard. Your training loop should be:
for epoch in range(EPOCHS):
for i in range(0, len(train_X), BATCH_SIZE):
batch_X = train_X[i:i + BATCH_SIZE].view(-1, 1, 10000)
batch_y = train_y[i:i + BATCH_SIZE]
optimizer.zero_grad()
outputs = net(batch_X)
loss = loss_function(outputs, batch_y)
loss.backward()
optimizer.step()
I would recommend you reading a bit about different loss functions. It seems you have a classification problem, for that you should use the logits (binary classification) or cross entropy (multi class) loss. I would make the following changes to the network and loss function:
class Net(nn.Module):
def __init__(self):
super().__init__()
self.fc1 = nn.Linear(IMAGE_SIZE * IMAGE_SIZE, 64)
self.fc2 = nn.Linear(64, 64)
self.fc3 = nn.Linear(64, 64)
self.fc4 = nn.Linear(64, 1)
def forward(self, x):
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.relu(self.fc3(x))
x = self.fc4(x)
return x
loss_function = nn.BCEWithLogitsLoss()
Check the documentation before using it: https://pytorch.org/docs/stable/nn.html#bcewithlogitsloss
Good luck!
First, It is not ideal to use Neural networks for address this kind of problems. Neural Networks train on highly non-linear data. For this example, You can use average intensities of image to find out a white pixel is present or not
However, A classic logistic regression problem outputs a value from 0 to 1 or probabilities
Softmax function is used when you have multiple classes and convert all the sum of classes equal to 1
log_softmax implementation: log( exp(x_i) / exp(x).sum() ). Here, your output layer consists of only 1 neuron. outputs = net(batch_X) is always 1.

Why is a simple Binary classification failing in a feedforward neural network?

I am new to Pytorch. I was trying to model a binary classifier on the Kepler dataset. The following was my dataset class.
class KeplerDataset(Dataset):
def __init__(self, test=False):
self.dataframe_orig = pd.read_csv(koi_cumm_path)
if (test == False):
self.data = df_numeric[( df_numeric.koi_disposition == 1 ) | ( df_numeric.koi_disposition == 0 )].values
else:
self.data = df_numeric[~(( df_numeric.koi_disposition == 1 ) | ( df_numeric.koi_disposition == 0 ))].values
self.X_data = torch.FloatTensor(self.data[:, 1:])
self.y_data = torch.FloatTensor(self.data[:, 0])
def __len__(self):
return len(self.data)
def __getitem__(self, index):
return self.X_data[index], self.y_data[index]
Here, I created a custom classifier class with one hidden layer and a single output unit that produces sigmoidal probability of being in class 1 (planet).
class KOIClassifier(nn.Module):
def __init__(self, input_dim, out_dim):
super(KOIClassifier, self).__init__()
self.linear1 = nn.Linear(input_dim, 32)
self.linear2 = nn.Linear(32, 32)
self.linear3 = nn.Linear(32, out_dim)
def forward(self, xb):
out = self.linear1(xb)
out = F.relu(out)
out = self.linear2(out)
out = F.relu(out)
out = self.linear3(out)
out = torch.sigmoid(out)
return out
I then created a train_model function to optimize the loss using SGD.
def train_model(X, y):
criterion = nn.BCELoss()
optim = torch.optim.SGD(model.parameters(), lr=0.001)
n_epochs = 100
losses = []
for epoch in range(n_epochs):
y_pred = model.forward(X)
loss = criterion(y_pred, y)
losses.append(loss.item())
optim.zero_grad()
loss.backward()
optim.step()
losses = []
for X, y in train_loader:
losses.append(train_model(X, y))
But after performing the optimization over the train_loader, When I try predicting on the trainn_loader itself, the prediction values are so much worse.
for features, y in train_loader:
y_pred = model.predict(features)
break
y_pred
> tensor([[4.5436e-02],
[1.5024e-02],
[2.2579e-01],
[4.2279e-01],
[6.0811e-02],
.....
Why is my model not working properly? Is it the problem with the dataset or am I doing something wrong with implementing the Neural net? I will link my Kaggle notebook because more context might be helpful. Please help.
You are optimizing many times (100 steps) on the first batch (first samples), then moving to the next samples. It means that your model will overfit your few samples before going to the next batch. Then, your training will be very non smooth, diverge and go far from your global optimum.
Usually, in a training loop you should:
go over all samples (this is one epoch)
shuffle your dataset in order to visit your samples in a different order (set your pytorch training loader accordingly)
go back to 1. until you reach the max number of epochs
Also you should not define your optimizer each time (nor your criterion).
Your training loop should look like this:
criterion = nn.BCELoss()
optim = torch.optim.SGD(model.parameters(), lr=0.001)
n_epochs = 100
def train_model():
for X, y in train_loader:
optim.zero_grad()
y_pred = model.forward(X)
loss = criterion(y_pred, y)
loss.backward()
optim.step()
for epoch in range(n_epochs):
train_model()

Pytorch simple model not improving

I am making a simple PyTorch neural net to approximate the sine function on x = [0, 2pi]. This is a simple architecture I use with different deep learning libraries to test whether I understand how to use it or not. The neural net, when untrained, always produces a straight horizontal line, and when trained, produces a straight line at y = 0. In general, it always produces a straight line at y = (The mean of the function). This leads me to believe something is wrong with the forward prop portion of it, as the boundary should not just be a straight line when untrained. Here is the code for the net:
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.model = nn.Sequential(
nn.Linear(1, 20),
nn.Sigmoid(),
nn.Linear(20, 50),
nn.Sigmoid(),
nn.Linear(50, 50),
nn.Sigmoid(),
nn.Linear(50, 1)
)
def forward(self, x):
x = self.model(x)
return x
Here is the training loop
def train(net, trainloader, valloader, learningrate, n_epochs):
net = net.train()
loss = nn.MSELoss()
optimizer = torch.optim.SGD(net.parameters(), lr = learningrate)
for epoch in range(n_epochs):
for X, y in trainloader:
X = X.reshape(-1, 1)
y = y.view(-1, 1)
optimizer.zero_grad()
outputs = net(X)
error = loss(outputs, y)
error.backward()
#net.parameters() net.parameters() * learningrate
optimizer.step()
total_loss = 0
for X, y in valloader:
X = X.reshape(-1, 1).float()
y = y.view(-1, 1)
outputs = net(X)
error = loss(outputs, y)
total_loss += error.data
print('Val loss for epoch', epoch, 'is', total_loss / len(valloader) )
it is called as:
net = Net()
losslist = train(net, trainloader, valloader, .0001, n_epochs = 4)
Where trainloader and valloader are the training and validation loaders. Can anyone help me see what's wrong with this? I know its not the learning rate since its the one I use in other frameworks, and I know its not the fact im using SGD or sigmoid activation functions, although I have a suspicion the error is in the activation functions somewhere.
Does anyone know how to fix this? Thanks.
After a while playing with some hyperparameters, modifying the net and changing the optimizer (following this excellent recipe) I ended up with changing the line optimizer = torch.optim.SGD(net.parameters(), lr = learningrate) to optimizer = torch.optim.Adam(net.parameters()) (the default optimizer parameters was used), running for 100 epochs and batch size equal to 1.
The following code was used (tested on CPU only):
import torch
import torch.nn as nn
from torch.utils import data
import numpy as np
import matplotlib.pyplot as plt
# for reproducibility
torch.manual_seed(0)
np.random.seed(0)
class Dataset(data.Dataset):
def __init__(self, init, end, n):
self.n = n
self.x = np.random.rand(self.n, 1) * (end - init) + init
self.y = np.sin(self.x)
def __len__(self):
return self.n
def __getitem__(self, idx):
x = self.x[idx, np.newaxis]
y = self.y[idx, np.newaxis]
return torch.Tensor(x), torch.Tensor(y)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.model = nn.Sequential(
nn.Linear(1, 20),
nn.Sigmoid(),
nn.Linear(20, 50),
nn.Sigmoid(),
nn.Linear(50, 50),
nn.Sigmoid(),
nn.Linear(50, 1)
)
def forward(self, x):
x = self.model(x)
return x
def train(net, trainloader, valloader, n_epochs):
loss = nn.MSELoss()
# Switch the two following lines and run the code
# optimizer = torch.optim.SGD(net.parameters(), lr = 0.0001)
optimizer = torch.optim.Adam(net.parameters())
for epoch in range(n_epochs):
net.train()
for x, y in trainloader:
optimizer.zero_grad()
outputs = net(x).view(-1)
error = loss(outputs, y)
error.backward()
optimizer.step()
net.eval()
total_loss = 0
for x, y in valloader:
outputs = net(x)
error = loss(outputs, y)
total_loss += error.data
print('Val loss for epoch', epoch, 'is', total_loss / len(valloader) )
net.eval()
f, (ax1, ax2) = plt.subplots(1, 2, sharey=True)
def plot_result(ax, dataloader):
out, xx, yy = [], [], []
for x, y in dataloader:
out.append(net(x))
xx.append(x)
yy.append(y)
out = torch.cat(out, dim=0).detach().numpy().reshape(-1)
xx = torch.cat(xx, dim=0).numpy().reshape(-1)
yy = torch.cat(yy, dim=0).numpy().reshape(-1)
ax.scatter(xx, yy, facecolor='green')
ax.scatter(xx, out, facecolor='red')
xx = np.linspace(0.0, 3.14159*2, 1000)
ax.plot(xx, np.sin(xx), color='green')
plot_result(ax1, trainloader)
plot_result(ax2, valloader)
plt.show()
train_dataset = Dataset(0.0, 3.14159*2, 100)
val_dataset = Dataset(0.0, 3.14159*2, 30)
params = {'batch_size': 1,
'shuffle': True,
'num_workers': 4}
trainloader = data.DataLoader(train_dataset, **params)
valloader = data.DataLoader(val_dataset, **params)
net = Net()
losslist = train(net, trainloader, valloader, n_epochs = 100)
Result with Adam optimizer:
Result with SGD optimizer:
In general, it always produces a straight line at y = (The mean of the function).
Usually, this means that the NN has only successfully trained the final layer so far. You need to train it for longer or with better optimizations, as ViniciusArruda shows here.
Edit: To explain further.. When only the final layer has been trained, the NN is effectively trying to guess the output y with no knowledge of the input X. In this case, the best guess it can make is the mean value. That way, it can minimize its MSE loss.

Regression loss functions incorrect

I'm trying a basic averaging example, but the validation and loss don't match and the network fails to converge if I increase the training time. I'm training a network with 2 hidden layers, each 500 units wide on three integers from the range [0,9] with a learning rate of 1e-1, Adam, batch size of 1, and dropout for 3000 iterations and validate every 100 iterations. If the absolute difference between the label and the hypothesis is less than a threshold, here I set the threshold to 1, I consider that correct. Could someone let me know if this is an issue with the choice of loss function, something wrong with Pytorch, or something I'm doing. Below are some plots:
val_diff = 1
acc_diff = torch.FloatTensor([val_diff]).expand(self.batch_size)
Loop 100 times to during validation:
num_correct += torch.sum(torch.abs(val_h - val_y) < acc_diff)
Append after each validation phase:
validate.append(num_correct / total_val)
Here are some examples of the (hypothesis, and labels):
[...(-0.7043088674545288, 6.0), (-0.15691305696964264, 2.6666667461395264),
(0.2827358841896057, 3.3333332538604736)]
I tried six of the loss functions in the API that are typically used for regression:
torch.nn.L1Loss(size_average=False)
torch.nn.L1Loss()
torch.nn.MSELoss(size_average=False)
torch.nn.MSELoss()
torch.nn.SmoothL1Loss(size_average=False)
torch.nn.SmoothL1Loss()
Thanks.
Network code:
class Feedforward(nn.Module):
def __init__(self, topology):
super(Feedforward, self).__init__()
self.input_dim = topology['features']
self.num_hidden = topology['hidden_layers']
self.hidden_dim = topology['hidden_dim']
self.output_dim = topology['output_dim']
self.input_layer = nn.Linear(self.input_dim, self.hidden_dim)
self.hidden_layer = nn.Linear(self.hidden_dim, self.hidden_dim)
self.output_layer = nn.Linear(self.hidden_dim, self.output_dim)
self.dropout_layer = nn.Dropout(p=0.2)
def forward(self, x):
batch_size = x.size()[0]
feat_size = x.size()[1]
input_size = batch_size * feat_size
self.input_layer = nn.Linear(input_size, self.hidden_dim).cuda()
hidden = self.input_layer(x.view(1, input_size)).clamp(min=0)
for _ in range(self.num_hidden):
hidden = self.dropout_layer(F.relu(self.hidden_layer(hidden)))
output_size = batch_size * self.output_dim
self.output_layer = nn.Linear(self.hidden_dim, output_size).cuda()
return self.output_layer(hidden).view(output_size)
Training code:
def train(self):
if self.cuda:
self.network.cuda()
dh = DataHandler(self.data)
# loss_fn = nn.L1Loss(size_average=False)
# loss_fn = nn.L1Loss()
# loss_fn = nn.SmoothL1Loss(size_average=False)
# loss_fn = nn.SmoothL1Loss()
# loss_fn = nn.MSELoss(size_average=False)
loss_fn = torch.nn.MSELoss()
losses = []
validate = []
hypos = []
labels = []
val_size = 100
val_diff = 1
total_val = float(val_size * self.batch_size)
for i in range(self.iterations):
x, y = dh.get_batch(self.batch_size)
x = self.tensor_to_Variable(x)
y = self.tensor_to_Variable(y)
self.optimizer.zero_grad()
loss = loss_fn(self.network(x), y)
loss.backward()
self.optimizer.step()
It looks like you've misunderstood how layers in pytorch works, here are a few tips:
In your forward when you do nn.Linear(...) you are definining new layers instead of using those you pre-defined in your network __init__. Therefore, it cannot learn anything as weights are constantly reinitalized.
You shouldn't need to call .cuda() inside net.forward(...) since you've already copied the network on gpu in your train by calling self.network.cuda()
Ideally the net.forward(...) input should directly have the shape of the first layer so you won't have to modify it. Here you should have x.size() <=> Linear -- > (Batch_size, Features).
Your forward should look close to this:
def forward(self, x):
x = F.relu(self.input_layer(x))
x = F.dropout(F.relu(self.hidden_layer(x)),training=self.training)
x = self.output_layer(x)
return x

Categories

Resources