LightningDataModule with Trainer in PytorchLightning automatically fits validation model? - python

I try to fight with overfitting, this is why I decided to look through documentation (https://pytorch-lightning.readthedocs.io/en/stable/common/evaluation_basic.html#train-with-the-validation-loop), where I found that you can pass in Trainer.fit training and validation dataloader. The question is that - should I use this method, or I can simply pass the dataloader class in Trainer.fit to prevent overfitting ?
Code DataLoader:
class ClassifierDataModule(pl.LightningDataModule):
def __init__(self, train_dataset:pd.DataFrame, val_dataset:pd.DataFrame, batch_size:int):
super().__init__()
self.prepare_data_per_node = False
self.train_dataset = train_dataset
self.val_dataset = val_dataset
self.batch_size=batch_size
def train_dataloader(self):
return DataLoader(self.train_dataset, batch_size=self.batch_size, shuffle=True, num_workers=os.cpu_count())
def val_dataloader(self):
return DataLoader(self.val_dataset, batch_size=self.batch_size, shuffle=True, num_workers=os.cpu_count())
data_module_classifier = ClassifierDataModule(train_dataset,val_dataset,test_dataset,BATCH_SIZE )
And here is my Trainer.fit():
model = MulticlassClassificationLIGHT(class_weights)
#trainer.fit(model, data_module_classifier) # SHOULD I USE THIS METHOD TO PREVENT OVERFITTING
trainer.fit(model, data_module_classifier.train_dataloader(),data_module_classifier.val_dataloader() ) # OR THIS ONE ?
My LightningModule just in case:
class MulticlassClassificationLIGHT(pl.LightningModule):
def __init__(self,class_weights):
super(MulticlassClassificationLIGHT, self).__init__()
self.num_feature=35
self.num_class=36
self.layer_1 = nn.Linear(self.num_feature, 512)
self.layer_2 = nn.Linear(512, 128)
self.layer_3 = nn.Linear(128, 64)
self.layer_out = nn.Linear(64, self.num_class)
self.relu = nn.ReLU()
self.dropout = nn.Dropout(p=0.2)
self.batchnorm1 = nn.BatchNorm1d(512)
self.batchnorm2 = nn.BatchNorm1d(128)
self.batchnorm3 = nn.BatchNorm1d(64)
self.loss = nn.CrossEntropyLoss(weight=class_weights.to(device))
def forward(self, x):
x = self.layer_1(x)
x = self.batchnorm1(x)
x = self.relu(x)
x = self.layer_2(x)
x = self.batchnorm2(x)
x = self.relu(x)
x = self.dropout(x)
x = self.layer_3(x)
x = self.batchnorm3(x)
x = self.relu(x)
x = self.dropout(x)
x = self.layer_out(x)
return x
def training_step(self, batch, batch_idx):
x, y = batch
logits = self.forward(x)
loss = self.loss(logits, y)
self.log("train_loss", loss, prog_bar=True, logger=True)
return loss
def validation_step(self, batch, batch_idx):
x, y = batch
logits = self.forward(x)
loss = self.loss(logits, y)
self.log("val_loss", loss, prog_bar=True, logger=True) # I ask Trainer to "ModelCheckpoint" this loss
return loss

Passing validation data loader during training does not fix overfitting. It allows to measure the overfitting/underfitting of the model. We want performance on validation data to be closer to performance on training data in case of a well-fit model.
Regarding the syntax, This should work :
trainer.fit(model=model, train_dataloaders =data_module_classifier.train_dataloader(), val_dataloaders =data_module_classifier.val_dataloader())
documentation for fit here - https://pytorch-lightning.readthedocs.io/en/latest/common/trainer.html#trainer-class-api

Related

How can I determine a CNN model to train a multiple labels for image classifications?

I need your kind help.
How can I determine a CNNs model to train a multiple labels for image classification?
My data is a bunch of spectra. Each spectrum has 4 labels. How can I build a CNN to classify those images.
How to compose the forward function and initialization function
What kinds of layers do you recommend?
First, the metadata structure goes like this:
enter image description here
Here is my Dataset
class OurDataset(Dataset):
spectra_dir = f"./data/spectrograms_fm"
metaData_path = f"./data/FMAudio/metaData.csv"
def __init__(self):
self.audio_labels = panda.read_csv(self.metaData_path)
self.transform = torchvision.transforms.Compose([
torchvision.transforms.Resize((201, 81)),
torchvision.transforms.ToTensor()
])
def __len__(self):
return len(self.audio_labels)
def __getitem__(self, idx):
img = PILImage.open(f"./data/spectrograms_fm/mutiplelabels/{self.audio_labels.iloc[idx, 8]}.png").convert("RGB")
img = self.transform(img)
label_A = torch.tensor(self.audio_labels.iloc[idx, 4])
label_Fw = torch.tensor(self.audio_labels.iloc[idx, 5])
label_P = torch.tensor(self.audio_labels.iloc[idx, 6])
label_Fi = torch.tensor(self.audio_labels.iloc[idx, 7])
return img, label_A, label_Fw, label_P, label_Fi
I defined the training function
def train(dataloader, model, loss, optimizer):
model.train()
size = len(dataloader.dataset)
for batch, (img_tensors, Y_A, Y_Fw, Y_P, Y_Fi) in enumerate(dataloader):
optimizer.zero_grad()
pred = model(img_tensors.float())
loss_A = cost(pred, Y_A)
loss_Fw = cost(pred, Y_Fw)
loss_P = cost(pred, Y_P)
loss_Fi = cost(pred, Y_Fi)
loss = loss_A + loss_Fw + loss_P + loss_Fi
loss.backward()
optimizer.step()
I used the official CNN model from Microsoft pytorch tutorial image classification which is not apt to my case.
class CNNet(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 32, kernel_size=5)
self.conv2 = nn.Conv2d(32, 64, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.flatten = nn.Flatten()
self.fc1 = nn.Linear(2112, 50)
self.fc2 = nn.Linear(50, 4)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 4))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 4))
x = self.flatten(x)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = F.relu(self.fc2(x))
return F.log_softmax(x,dim=1)
Can you write my a demo?
Cheers

Trying to compute the loss of an encoder/decoder model

I am attempting to create an encoder/decoder model with mini-batch. I continue to encounter an errors stating:
RuntimeError: one of the variables needed for gradient computation has been modified by an inplace operation: [torch.FloatTensor [32, 6]], which is output 0 of AsStridedBackward0, is at version 2; expected version 1 instead. Hint: the backtrace further above shows the operation that failed to compute its gradient. The variable in question was changed in there or anywhere later. Good luck!
The traceback reveals something is wrong with the y=self.linear(out) but I am unsure what exactly. Any help would be greatly appreciated. Below is the model. Thank you.
import torch
import torch.nn as nn
import numpy as np
from torch.autograd import Variable
from sliding_window import sliding_window
from training_datasets import get_training_datasets_batch
torch.autograd.set_detect_anomaly(True)
class Encoder(nn.Module):
def __init__(self, input_size, hidden_size, num_layers=1):
super(Encoder, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.gru = nn.GRU(input_size=input_size, hidden_size=hidden_size,num_layers=num_layers,batch_first=True)
def forward(self, x):
flat = x.view(x.shape[0], x.shape[1], self.input_size)
out,h = self.gru(flat)
return out, h
class Decoder(nn.Module):
def __init__(self, input_size, hidden_size, output_size=6, num_layers=1):
super(Decoder, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.output_size = output_size
self.gru = nn.GRU(input_size=input_size,hidden_size=hidden_size,num_layers=num_layers,batch_first=True)
self.linear = nn.Linear(hidden_size, output_size)
self.ReLU = nn.ReLU()
def forward(self, x, h):
x = x.unsqueeze(1)
out, h = self.gru(x, h)
out = out.squeeze(1)
print(out.shape)
y = self.linear(out)
print(y.shape)
y = self.ReLU(y)
return y,h
class EncoderDecoder(nn.Module):
def __init__(self, hidden_size, input_size=6, output_size=6):
super(EncoderDecoder, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.encoder = Encoder(input_size=input_size, hidden_size=hidden_size)
self.decoder = Decoder(input_size=input_size, hidden_size=hidden_size, output_size=output_size)
def train_model(self, ts, epochs, target_len, features, batch_size=64, test_len=288, method = 'teacher_forcing', tfr = 0.5, lr = 0.01, dynamic_tf=False):
X,Y= sliding_window(ts, features=288, target_len=target_len)
x_train, x_val, x_test, y_train, y_val, y_test = get_training_datasets_batch(X,Y, features, test_len=test_len, batch_size=batch_size)
losses = np.full(epochs,np.nan)
optimizer = torch.optim.SGD(filter(lambda x: x.requires_grad, self.parameters()),
lr=lr)
criterion = nn.MSELoss()
for e in range(epochs):
print('Starting epoch {}'.format(e))
x_train_data = iter(x_train)
y_train_data = iter(y_train)
x_val_data = iter(x_val)
y_val_data = iter(y_val)
x_train_shape = list(x_train)[0].shape
# predicted = torch.zeros(target_len,batch_size,x_train_shape[2])
# print(predicted.shape)
loss=0
for x_train_in in x_train_data:
optimizer.zero_grad()
x_train_in = Variable(x_train_in)
y_train_in = Variable(next(y_train_data).transpose(0,1))
_, enc_h = self.encoder(x_train_in)
dec_in = x_train_in[:,-1,:]
dec_h = enc_h
if method == 'recursive':
for t in range(target_len):
dec_out, dec_h = self.decoder(dec_in, dec_h)
predicted = dec_out
dec_in = dec_out
loss += criterion(predicted,y_train_in[t])
loss.backward(retain_graph=True)
optimizer.step()
The problem in this case was the loss.backward(retain_graph=True). The code started working after adding the line loss=0. The loss value continues to increase and needs to be reset.
loss.backward()
optimizer.step()
loss=0

Confusion matrix in Pytorch Lightning

I am running Alexnet on CIFAR10 dataset using Pytorch Lightning, here is my model:
class SelfSupervisedModel(pl.LightningModule):
def __init__(self, hparams=None, num_classes=10, batch_size=128):
super(SelfSupervisedModel, self).__init__()
self.batch_size = batch_size
self.loss_fn = nn.CrossEntropyLoss()
self.hparams["lr"] = ModelHelper.Hyperparam.Learning_rate
self.model = torchvision.models.alexnet(pretrained=False)
def forward(self, x):
return self.model(x)
def training_step(self, train_batch, batch_idx):
inputs, targets = train_batch
predictions = self(inputs)
loss = self.loss_fn(predictions, targets)
return {'loss': loss}
def validation_step(self, test_batch, batch_idx):
inputs, targets = test_batch
predictions = self(inputs)
val_loss = self.loss_fn(predictions, targets)
_, preds = tf.max(predictions, 1)
acc = tf.sum(preds == targets.data) / (targets.shape[0] * 1.0)
return {'val_loss': val_loss, 'val_acc': acc, 'target': targets, 'preds': predictions}
def validation_epoch_end(self, outputs):
avg_loss = tf.stack([x['val_loss'] for x in outputs]).mean()
avg_acc = tf.stack([x['val_acc'].float() for x in outputs]).mean()
logs = {'val_loss': avg_loss, 'val_acc': avg_acc}
print(f'validation_epoch_end logs => {logs}')
OutputMatrix.predictions = tf.cat([tmp['preds'] for tmp in outputs])
OutputMatrix.targets = tf.cat([tmp['target'] for tmp in outputs])
return {'progress_bar': logs}
def configure_optimizers(self):
return tf.optim.SGD(self.parameters(), lr=self.hparams["lr"], momentum=0.9)
I am storing the predicted and true values in OutputMatrix.predictions and OutputMatrix.targets which are used to generate confusion matrix looks like below:
I'm pretty much sure that this should not be the output though. Can not find where is the mistake. Any help would be appreciated.
I would suggest using Torchmetrics and the internal log method, so the code could like:
class MyModule(LightningModule):
def __init__(self):
...
self.train_acc = torchmetrics.Accuracy()
self.valid_acc = torchmetrics.Accuracy()
def training_step(self, batch, batch_idx):
x, y = batch
preds = self(x)
...
self.train_acc(preds, y)
self.log('train_acc', self.train_acc, on_step=True, on_epoch=False)
def validation_step(self, batch, batch_idx):
logits = self(x)
...
self.valid_acc(logits, y)
self.log('valid_acc', self.valid_acc, on_step=True, on_epoch=True)
as you can also find in the docs related to PL integration.

Pytorch: 1D target tensor expected, multi-target not supported

I want to train a 1D CNN on time series. I get the following error message 1D target tensor expected, multi-target not supported
Here is the code with simulated data corresponding to the structures of my data as well as the error message
import torch
from torch.utils.data import DataLoader
import torch.utils.data as data
import torch.nn as nn
import numpy as np
import random
from tqdm.notebook import tqdm
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(device)
train_dataset = []
n_item = 20
for i in range(0,n_item):
train_data = np.random.uniform(-10, 10, 500)
train_dataset.append(train_data)
train_dataset = np.asarray(train_dataset)
train_dataset.shape
ecg_train = torch.from_numpy(train_dataset).float()
labels_train = np.random.randint(2, size=n_item)
labels_train = torch.from_numpy(labels_train).long()
val_dataset = []
n_item = 10
for i in range(0,n_item):
val_data = np.random.uniform(-10, 10, 500)
val_dataset.append(val_data)
val_dataset = np.asarray(val_dataset)
val_dataset.shape
ecg_validation = torch.from_numpy(val_dataset).float()
labels_validation = np.random.randint(2, size=n_item)
labels_validation = torch.from_numpy(labels_validation).long()
class ECGNet(data.Dataset):
"""ImageNet Limited dataset."""
def __init__(self, ecgs, labls, transform=None):
self.ecg = ecgs
self.target = labls
self.transform = transform
def __getitem__(self, idx):
ecgVec = self.ecg[idx] #.reshape(10, -1)
labelID = self.target[idx].reshape(1)
return ecgVec,labelID
def __len__(self):
return len(self.ecg)
train_data = ECGNet(ecg_train,
labels_train,
)
print("size of Training dataset: {}".format(len(train_data)))
validation_data = ECGNet(ecg_validation,
labels_validation,
)
print("size of Training dataset: {}".format(len(validation_data)))
batch_size = 1
train_dataloader = DataLoader(dataset = train_data,
batch_size=batch_size,
shuffle = True,
num_workers = 0)
val_dataloader = DataLoader(dataset = validation_data,
batch_size=batch_size,
shuffle = True,
num_workers = 0)
def train_epoch(model, train_dataloader, optimizer, loss_fn):
losses = []
correct_predictions = 0
# Iterate mini batches over training dataset
for images, labels in tqdm(train_dataloader):
images = images.to(device)
#labels = labels.squeeze_()
labels = labels.to(device)
#labels = labels.to(device=device, dtype=torch.int64)
# Run predictions
output = model(images)
# Set gradients to zero
optimizer.zero_grad()
# Compute loss
loss = loss_fn(output, labels)
# Backpropagate (compute gradients)
loss.backward()
# Make an optimization step (update parameters)
optimizer.step()
# Log metrics
losses.append(loss.item())
predicted_labels = output.argmax(dim=1)
correct_predictions += (predicted_labels == labels).sum().item()
accuracy = 100.0 * correct_predictions / len(train_dataloader.dataset)
# Return loss values for each iteration and accuracy
mean_loss = np.array(losses).mean()
return mean_loss, accuracy
def evaluate(model, dataloader, loss_fn):
losses = []
correct_predictions = 0
with torch.no_grad():
for images, labels in dataloader:
images = images.to(device)
#labels = labels.squeeze_()
labels = labels.to(device=device, dtype=torch.int64)
# Run predictions
output = model(images)
# Compute loss
loss = loss_fn(output, labels)
# Save metrics
predicted_labels = output.argmax(dim=1)
correct_predictions += (predicted_labels == labels).sum().item()
losses.append(loss.item())
mean_loss = np.array(losses).mean()
accuracy = 100.0 * correct_predictions / len(dataloader.dataset)
# Return mean loss and accuracy
return mean_loss, accuracy
def train(model, train_dataloader, val_dataloader, optimizer, n_epochs, loss_function):
# We will monitor loss functions as the training progresses
train_losses = []
val_losses = []
train_accuracies = []
val_accuracies = []
for epoch in range(n_epochs):
model.train()
train_loss, train_accuracy = train_epoch(model, train_dataloader, optimizer, loss_fn)
model.eval()
val_loss, val_accuracy = evaluate(model, val_dataloader, loss_fn)
train_losses.append(train_loss)
val_losses.append(val_loss)
train_accuracies.append(train_accuracy)
val_accuracies.append(val_accuracy)
print('Epoch {}/{}: train_loss: {:.4f}, train_accuracy: {:.4f}, val_loss: {:.4f}, val_accuracy: {:.4f}'.format(epoch+1, n_epochs,
train_losses[-1],
train_accuracies[-1],
val_losses[-1],
val_accuracies[-1]))
return train_losses, val_losses, train_accuracies, val_accuracies
class Simple1DCNN(torch.nn.Module):
def __init__(self):
super(Simple1DCNN, self).__init__()
self.layer1 = torch.nn.Conv1d(in_channels=50,
out_channels=20,
kernel_size=5,
stride=2)
self.act1 = torch.nn.ReLU()
self.layer2 = torch.nn.Conv1d(in_channels=20,
out_channels=10,
kernel_size=1)
self.fc1 = nn.Linear(10* 3, 2)
def forward(self, x):
print(x.shape)
x = x.view(1, 50,-1)
print(x.shape)
x = self.layer1(x)
print(x.shape)
x = self.act1(x)
print(x.shape)
x = self.layer2(x)
print(x.shape)
x = x.view(1,-1)
print(x.shape)
x = self.fc1(x)
print(x.shape)
print(x)
return x
model_a = Simple1DCNN()
model_a = model_a.to(device)
criterion = nn.CrossEntropyLoss()
loss_fn = torch.nn.CrossEntropyLoss()
n_epochs_a = 50
learning_rate_a = 0.01
alpha_a = 1e-5
momentum_a = 0.9
optimizer = torch.optim.SGD(model_a.parameters(),
momentum = momentum_a,
nesterov = True,
weight_decay = alpha_a,
lr=learning_rate_a)
train_losses_a, val_losses_a, train_acc_a, val_acc_a = train(model_a,
train_dataloader,
val_dataloader,
optimizer,
n_epochs_a,
loss_fn
)
Error message:
cpu
size of Training dataset: 20
size of Training dataset: 10
0%| | 0/20 [00:00<?, ?it/s]
torch.Size([1, 500])
torch.Size([1, 50, 10])
torch.Size([1, 20, 3])
torch.Size([1, 20, 3])
torch.Size([1, 10, 3])
torch.Size([1, 30])
torch.Size([1, 2])
tensor([[ 0.5785, -1.0169]], grad_fn=<AddmmBackward>)
Traceback (most recent call last):
File "SO_question.py", line 219, in <module>
train_losses_a, val_losses_a, train_acc_a, val_acc_a = train(model_a,
File "SO_question.py", line 137, in train
train_loss, train_accuracy = train_epoch(model, train_dataloader, optimizer, loss_fn)
File "SO_question.py", line 93, in train_epoch
loss = loss_fn(output, labels)
File "/Users/mymac/Documents/programming/python/mainenv/lib/python3.8/site-packages/torch/nn/modules/module.py", line 727, in _call_impl
result = self.forward(*input, **kwargs)
File "/Users/mymac/Documents/programming/python/mainenv/lib/python3.8/site-packages/torch/nn/modules/loss.py", line 961, in forward
return F.cross_entropy(input, target, weight=self.weight,
File "/Users/mymac/Documents/programming/python/mainenv/lib/python3.8/site-packages/torch/nn/functional.py", line 2468, in cross_entropy
return nll_loss(log_softmax(input, 1), target, weight, None, ignore_index, None, reduction)
File "/Users/mymac/Documents/programming/python/mainenv/lib/python3.8/site-packages/torch/nn/functional.py", line 2264, in nll_loss
ret = torch._C._nn.nll_loss(input, target, weight, _Reduction.get_enum(reduction), ignore_index)
RuntimeError: 1D target tensor expected, multi-target not supported
What am I doing wrong?
You are using nn.CrossEntropyLoss as the criterion for your training. You correctly passed the labels as indices of the ground truth class: 0s and 1s. However, as the error message suggests, it needs to be a 1D tensor!
Simply remove the reshape in ECGNet's __getitem__:
def __getitem__(self, idx):
ecgVec = self.ecg[idx]
labelID = self.target[idx]
return ecgVec,labelID
Edit
I want to increase the batch_size to 8. But now I get the error [...]
You are doing a lot of broadcasting (flattening) which surely will affect the batch size. As a general rule of thumb never fiddle with axis=0. For instance, if you have an input shape of (8, 500), straight off you have a problem when doing x.view(1, 50, -1). Since the resulting tensor will be (1, 50, 80) (the desired shape would have been (8, 50, 10)). Instead, you could broadcast with x.view(x.size(0), 50, -1).
Same with x.view(1, -1) later down forward. You are looking to flatten the tensor, but you should not flatten it along with the batches, they need to stay separated! It's safer to use torch.flatten, yet I prefer nn.Flatten which flattens from axis=1 to axis=-1 by default.
My personal advice is to start with a simple setup (without train loops etc...) to verify the architecture and intermediate output shapes. Then, add the necessary logic to handle the training.
class ECGNet(data.Dataset):
"""ImageNet Limited dataset."""
def __init__(self, ecgs, labls, transform=None):
self.ecg = ecgs
self.target = labls
self.transform = transform
def __getitem__(self, idx):
ecgVec = self.ecg[idx]
labelID = self.target[idx]
return ecgVec, labelID
def __len__(self):
return len(self.ecg)
class Simple1DCNN(nn.Module):
def __init__(self):
super(Simple1DCNN, self).__init__()
self.layer1 = nn.Conv1d(in_channels=50,
out_channels=20,
kernel_size=5,
stride=2)
self.act1 = nn.ReLU()
self.layer2 = nn.Conv1d(in_channels=20,
out_channels=10,
kernel_size=1)
self.fc1 = nn.Linear(10*3, 2)
self.flatten = nn.Flatten()
def forward(self, x):
x = x.view(x.size(0), 50, -1)
x = self.layer1(x)
x = self.act1(x)
x = self.layer2(x)
x = self.flatten(x)
x = self.fc1(x)
return x
batch_size = 8
train_data = ECGNet(ecg_train, labels_train)
train_dl = DataLoader(dataset=train_data,
batch_size=batch_size,
shuffle=True,
num_workers=0)
model = Simple1DCNN()
criterion = nn.CrossEntropyLoss()
Then
>>> x, y = next(iter(train_dl))
>>> y_hat = model(x)
>>> y_hat.shape
torch.Size([8, 2])
Also, make sure your loss works:
>>> criterion(y_hat, y)
tensor(..., grad_fn=<NllLossBackward>)

Multi Layer Perceptron Deep Learning in Python using Pytorch

I am having errors in executing the train function of my code in MLP.
This is the error:
mat1 and mat2 shapes cannot be multiplied (128x10 and 48x10)
My code for the train function is this:
class net(nn.Module):
def __init__(self, input_dim2, hidden_dim2, output_dim2):
super(net, self).__init__()
self.input_dim2 = input_dim2
self.fc1 = nn.Linear(input_dim2, hidden_dim2)
self.relu = nn.ReLU()
self.fc2 = nn.Linear(hidden_dim2, hidden_dim2)
self.fc3 = nn.Linear(hidden_dim2, output_dim2)
def forward(self, x):
x = self.fc1(x)
x = self.relu(x)
x = self.fc2(x)
x = self.relu(x)
x = self.fc3(x)
x = F.softmax(self.fc3(x))
return x
model = net(input_dim2, hidden_dim2, output_dim2) #create the network
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.RMSprop(model.parameters(), lr = learning_rate2)
def train(num_epochs2):
for i in range(num_epochs2):
tmp_loss = []
for (x,y) in train_loader:
print(y.shape)
print(x.shape)
outputs = model(x) #forward pass
print(outputs.shape)
loss = criterion(outputs, y) #loss computation
tmp_loss.append(loss.item()) #recording the loss
optimizer.zero_grad() #all the accumulated gradient
loss.backward() #auto-differentiaton - accumulation of gradient
optimizer.step() # a gradient step
print("Loss at {}th epoch: {}".format(i, np.mean(tmp_loss)))
I don't know where I'm wrong. My code seems to work okay.
From the limited message, I guess the place you are wrong are the following snippets:
x = self.fc3(x)
x = F.softmax(self.fc3(x))
Try to replace with:
x = self.fc3(x)
x = F.softmax(x)
A good question should include: error backtrace information and complete toy example which could repeat the errors!
Here an relu activation seems to be missing in the 'init' function. Or there is an extra relu activation in the forward function. Look at the code below and try to figure out what is extra or missing.
def __init__(self, input_dim2, hidden_dim2, output_dim2):
super(net, self).__init__()
self.input_dim2 = input_dim2
self.fc1 = nn.Linear(input_dim2, hidden_dim2)
self.relu = nn.ReLU()
self.fc2 = nn.Linear(hidden_dim2, hidden_dim2)
self.fc3 = nn.Linear(hidden_dim2, output_dim2)
def forward(self, x):
x = self.fc1(x)
x = self.relu(x)
x = self.fc2(x)
x = self.relu(x)
x = self.fc3(x)
x = F.softmax(self.fc3(x))
return x

Categories

Resources