AND-gate with Pytorch - python

I'm new to PyTorch and deep learning generally.
The code I wrote can be seen longer down.
I'm trying to learn the simple 'And' problem, which is linearby separable.
The problem is, that I'm getting poor results. Only around 2/10 times it gets to the correct answer.
Sometimes the loss.item() values is stuck at 0.250.
Just to clear up
Why does it only work 2/10 times?
.
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.autograd as autog
data_x = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
data_y = np.array([[0, 0, 0, 1]]).T
data_x = autog.Variable(torch.FloatTensor(data_x))
data_y = autog.Variable(torch.FloatTensor(data_y), requires_grad=False)
in_dim = 2
out_dim = 1
epochs = 15000
epoch_print = epochs / 5
l_rate = 0.001
class NeuralNet(nn.Module):
def __init__(self, input_size, output_size):
super(NeuralNet, self).__init__()
self.lin1 = nn.Linear(input_size, output_size)
self.relu = nn.ReLU()
def forward(self, x):
out = x
out = self.lin1(out)
out = self.relu(out)
return out
model = NeuralNet(in_dim, out_dim)
criterion = nn.L1Loss()
optimizer = optim.Adam(model.parameters(), lr=l_rate)
for epoch in range(epochs):
pred = model(data_x)
loss = criterion(pred, data_y)
loss.backward()
optimizer.step()
if (epoch + 1) % epoch_print == 0:
print("Epoch %d Loss %.3f" %(epoch + 1, loss.item()))
for x, y in zip(data_x, data_y):
pred = model(x)
print("Input", list(map(int, x)), "Pred", int(pred), "Output", int(y))

1. Using zero_grad with optimizer
You are not using optimizer.zero_grad() to clear the gradient. Your learning loop should look like this:
for epoch in range(epochs):
optimizer.zero_grad()
pred = model(data_x)
loss = criterion(pred, data_y)
loss.backward()
optimizer.step()
if (epoch + 1) % epoch_print == 0:
print("Epoch %d Loss %.3f" %(epoch + 1, loss.item()))
In this particular case it will not have any detrimental effect, the gradient is accumulating, but as you have the same dataset looped over and over it makes barely any difference (you should get into this habit though, as you will use it throughout your deep learning journey).
2. Cost Function
You are using Mean Absolute Error which is regression loss function, not a classification one (what you do is binary classification).
Accordingly, you should use BCELoss and sigmoid activation or (I prefer it that way), return logits from the network and use BCEWithLogitsLoss, both of them calculate binary cross entropy (simplified version of cross-entropy).
See below:
class NeuralNet(nn.Module):
def __init__(self, input_size, output_size):
super(NeuralNet, self).__init__()
self.lin1 = nn.Linear(input_size, output_size)
def forward(self, x):
# You may want to use torch.nn.functional.sigmoid activation
return self.lin1(x)
...
# Change your criterion to nn.BCELoss() if using sigmoid
criterion = nn.BCEWithLogitsLoss()
...
3. Predictions
If you used the logits version, classifier learns to assign negative values to 0 label and positive to indicate 1. Your display function has to be modified to incorporate this knowledge:
for x, y in zip(data_x, data_y):
pred = model(x)
# See int(pred > 0), that's the only change
print("Input", list(map(int, x)), "Pred", int(pred > 0), "Output", int(y))
This step does not apply if your forward applies sigmoid to the output. Oh, and it's better to use torch.round instead of casting to int.

Related

what is the pytorch equivalent of a tensorflow linear regression?

I am learning pytorch, that to do a basic linear regression on this data created this way here:
from sklearn.datasets import make_regression
x, y = make_regression(n_samples=100, n_features=1, noise=15, random_state=42)
y = y.reshape(-1, 1)
print(x.shape, y.shape)
plt.scatter(x, y)
I know that using tensorflow this code can solve:
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Dense(units=1, activation='linear', input_shape=(x.shape[1], )))
model.compile(optimizer=tf.keras.optimizers.SGD(lr=0.05), loss='mse')
hist = model.fit(x, y, epochs=15, verbose=0)
but I need to know what the pytorch equivalent would be like, what I tried to do was this:
# Model Class
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.linear = nn.Linear(1,1)
def forward(self, x):
x = self.linear(x)
return x
def predict(self, x):
return self.forward(x)
model = Net()
loss_fn = F.mse_loss
opt = torch.optim.SGD(modelo.parameters(), lr=0.05)
# Funcao para treinar
def fit(num_epochs, model, loss_fn, opt, train_dl):
# Repeat for given number of epochs
for epoch in range(num_epochs):
# Train with batches of data
for xb, yb in train_dl:
# 1. Generate predictions
pred = model(xb)
# 2. Calculate Loss
loss = loss_fn(pred, yb)
# 3. Campute gradients
loss.backward()
# 4. Update parameters using gradients
opt.step()
# 5. Reset the gradients to zero
opt.zero_grad()
# Print the progress
if (epoch+1) % 10 == 0:
print('Epoch [{}/{}], Loss: {:.4f}'.format(epoch+1, num_epochs, loss.item()))
# Training
fit(200, model, loss_fn, opt, data_loader)
But the model doesn't learn anything, I don't know what I can do anymore.
The input/output dimensions is (1/1)
Dataset
First of all, you should define torch.utils.data.Dataset
import torch
from sklearn.datasets import make_regression
class RegressionDataset(torch.utils.data.Dataset):
def __init__(self):
data = make_regression(n_samples=100, n_features=1, noise=0.1, random_state=42)
self.x = torch.from_numpy(data[0]).float()
self.y = torch.from_numpy(data[1]).float()
def __len__(self):
return len(self.x)
def __getitem__(self, index):
return self.x[index], self.y[index]
It converts numpy data to PyTorch's tensor inside __init__ and converts data to float (numpy has double by default while PyTorch's default is float in order to use less memory).
Apart from that it will simply return tuple of features and respective regression targets.
Fit
Almost there, but you have to flatten output from the model (described below). torch.nn.Linear will return tensors of shape (batch, 1) while your targets are of shape (batch,). flatten() will remove unnecessary 1 dimension.
# 2. Calculate Loss
loss = criterion(pred.flatten(), yb)
Model
That is all you need actually:
model = torch.nn.Linear(1, 1)
Any layer can be called directly, no need for forward and inheritance for simple models.
Calling
The rest is almost okay, you just have to create torch.utils.data.DataLoader and pass instance of our dataset. What DataLoader does is it issues __getitem__ of dataset multiple times and creates a batch of specified size (there is some other funny business, but that's the idea):
dataset = RegressionDataset()
dataloader = torch.utils.data.DataLoader(dataset, batch_size=32)
model = torch.nn.Linear(1, 1)
criterion = torch.nn.MSELoss()
optimizer = torch.optim.SGD(model.parameters(), lr=3e-4)
fit(5000, model, criterion, optimizer, dataloader)
Also notice I've used torch.nn.MSELoss(), as we are passing object it looks better than function in this case.
Whole code
To make it easier:
import torch
from sklearn.datasets import make_regression
class RegressionDataset(torch.utils.data.Dataset):
def __init__(self):
data = make_regression(n_samples=100, n_features=1, noise=0.1, random_state=42)
self.x = torch.from_numpy(data[0]).float()
self.y = torch.from_numpy(data[1]).float()
def __len__(self):
return len(self.x)
def __getitem__(self, index):
return self.x[index], self.y[index]
# Funcao para treinar
def fit(num_epochs, model, criterion, optimizer, train_dl):
# Repeat for given number of epochs
for epoch in range(num_epochs):
# Train with batches of data
for xb, yb in train_dl:
# 1. Generate predictions
pred = model(xb)
# 2. Calculate Loss
loss = criterion(pred.flatten(), yb)
# 3. Compute gradients
loss.backward()
# 4. Update parameters using gradients
optimizer.step()
# 5. Reset the gradients to zero
optimizer.zero_grad()
# Print the progress
if (epoch + 1) % 10 == 0:
print(
"Epoch [{}/{}], Loss: {:.4f}".format(epoch + 1, num_epochs, loss.item())
)
dataset = RegressionDataset()
dataloader = torch.utils.data.DataLoader(dataset, batch_size=32)
model = torch.nn.Linear(1, 1)
criterion = torch.nn.MSELoss()
optimizer = torch.optim.SGD(model.parameters(), lr=3e-4)
fit(5000, model, criterion, optimizer, dataloader)
You should get around 0.053 loss or so, vary noise or other params for harder/easier regression task.

How to implement contractive autoencoder in Pytorch?

I'm trying to create a contractive autoencoder in Pytorch. I found this thread and tried according to that. This is the snippet I wrote based on the mentioned thread:
import datetime
import numpy as np
import torch
import torchvision
from torchvision import datasets, transforms
from torchvision.utils import save_image, make_grid
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import matplotlib.pyplot as plt
%matplotlib inline
dataset_train = datasets.MNIST(root='MNIST',
train=True,
transform = transforms.ToTensor(),
download=True)
dataset_test = datasets.MNIST(root='MNIST',
train=False,
transform = transforms.ToTensor(),
download=True)
batch_size = 128
num_workers = 2
dataloader_train = torch.utils.data.DataLoader(dataset_train,
batch_size = batch_size,
shuffle=True,
num_workers = num_workers,
pin_memory=True)
dataloader_test = torch.utils.data.DataLoader(dataset_test,
batch_size = batch_size,
num_workers = num_workers,
pin_memory=True)
def view_images(imgs, labels, rows = 4, cols =11):
imgs = imgs.detach().cpu().numpy().transpose(0,2,3,1)
fig = plt.figure(figsize=(8,4))
for i in range(imgs.shape[0]):
ax = fig.add_subplot(rows, cols, i+1, xticks=[], yticks=[])
ax.imshow(imgs[i].squeeze(), cmap='Greys_r')
ax.set_title(labels[i].item())
# now let's view some
imgs, labels = next(iter(dataloader_train))
view_images(imgs, labels,13,10)
class Contractive_AutoEncoder(nn.Module):
def __init__(self):
super().__init__()
self.encoder = nn.Linear(784, 512)
self.decoder = nn.Linear(512, 784)
def forward(self, input):
# flatten the input
shape = input.shape
input = input.view(input.size(0), -1)
output_e = F.relu(self.encoder(input))
output = F.sigmoid(self.decoder(output_e))
output = output.view(*shape)
return output_e, output
def loss_function(output_e, outputs, imgs, device):
output_e.backward(torch.ones(output_e.size()).to(device), retain_graph=True)
criterion = nn.MSELoss()
assert outputs.shape == imgs.shape ,f'outputs.shape : {outputs.shape} != imgs.shape : {imgs.shape}'
imgs.grad.requires_grad = True
loss1 = criterion(outputs, imgs)
print(imgs.grad)
loss2 = torch.mean(pow(imgs.grad,2))
loss = loss1 + loss2
return loss
epochs = 50
interval = 2000
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = Contractive_AutoEncoder().to(device)
optimizer = optim.Adam(model.parameters(), lr =0.001)
for e in range(epochs):
for i, (imgs, labels) in enumerate(dataloader_train):
imgs = imgs.to(device)
labels = labels.to(device)
outputs_e, outputs = model(imgs)
loss = loss_function(outputs_e, outputs, imgs,device)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if i%interval:
print('')
print(f'epoch/epoechs: {e}/{epochs} loss : {loss.item():.4f} ')
For the sake of brevity I just used one layer for the encoder and the decoder. It should work regardless of number of layers in either of them obviously!
But the catch here is, aside from the fact that I don't know if this is the correct way of doing this, (calculating gradients with respect to the input), I get an error which makes the former solution wrong/not applicable.
That is:
imgs.grad.requires_grad = True
produces the error :
AttributeError : 'NoneType' object has no attribute 'requires_grad'
I also tried the second method suggested in that thread which is as follows:
class Contractive_Encoder(nn.Module):
def __init__(self):
super().__init__()
self.encoder = nn.Linear(784, 512)
def forward(self, input):
# flatten the input
input = input.view(input.size(0), -1)
output_e = F.relu(self.encoder(input))
return output_e
class Contractive_Decoder(nn.Module):
def __init__(self):
super().__init__()
self.decoder = nn.Linear(512, 784)
def forward(self, input):
# flatten the input
output = F.sigmoid(self.decoder(input))
output = output.view(-1,1,28,28)
return output
epochs = 50
interval = 2000
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model_enc = Contractive_Encoder().to(device)
model_dec = Contractive_Decoder().to(device)
optimizer = optim.Adam([{"params":model_enc.parameters()},
{"params":model_dec.parameters()}], lr =0.001)
optimizer_cond = optim.Adam(model_enc.parameters(), lr = 0.001)
criterion = nn.MSELoss()
for e in range(epochs):
for i, (imgs, labels) in enumerate(dataloader_train):
imgs = imgs.to(device)
labels = labels.to(device)
outputs_e = model_enc(imgs)
outputs = model_dec(outputs_e)
loss_rec = criterion(outputs, imgs)
optimizer.zero_grad()
loss_rec.backward()
optimizer.step()
imgs.requires_grad_(True)
y = model_enc(imgs)
optimizer_cond.zero_grad()
y.backward(torch.ones(imgs.view(-1,28*28).size()))
imgs.grad.requires_grad = True
loss = torch.mean([pow(imgs.grad,2)])
optimizer_cond.zero_grad()
loss.backward()
optimizer_cond.step()
if i%interval:
print('')
print(f'epoch/epoechs: {e}/{epochs} loss : {loss.item():.4f} ')
but I face the error :
RuntimeError: invalid gradient at index 0 - got [128, 784] but expected shape compatible with [128, 512]
How should I go about this in Pytorch?
Summary
The final implementation for contractive loss that I wrote is as follows:
def loss_function(output_e, outputs, imgs, lamda = 1e-4, device=torch.device('cuda')):
criterion = nn.MSELoss()
assert outputs.shape == imgs.shape ,f'outputs.shape : {outputs.shape} != imgs.shape : {imgs.shape}'
loss1 = criterion(outputs, imgs)
output_e.backward(torch.ones(outputs_e.size()).to(device), retain_graph=True)
# Frobenious norm, the square root of sum of all elements (square value)
# in a jacobian matrix
loss2 = torch.sqrt(torch.sum(torch.pow(imgs.grad,2)))
imgs.grad.data.zero_()
loss = loss1 + (lamda*loss2)
return loss
and inside training loop you need to do:
for e in range(epochs):
for i, (imgs, labels) in enumerate(dataloader_train):
imgs = imgs.to(device)
labels = labels.to(device)
imgs.retain_grad()
imgs.requires_grad_(True)
outputs_e, outputs = model(imgs)
loss = loss_function(outputs_e, outputs, imgs, lam,device)
imgs.requires_grad_(False)
optimizer.zero_grad()
loss.backward()
optimizer.step()
print(f'epoch/epochs: {e}/{epochs} loss: {loss.item():.4f}')
Full explanation
As it turns out and rightfully #akshayk07 pointed out in the comments, the implementation found in Pytorch forum was wrong in multiple places. The notable thing, being it wasn't implementing the actual contractive loss that was introduced in Contractive Auto-Encoders:Explicit Invariance During Feature Extraction paper! and also aside from that, the implementation wouldn't work at all for obvious reasons that will be explained in a moment.
The changes are obvious so I try to explain what's going on here. First of all note that imgs is not a leaf node, so the gradients would not be retained in the image .grad attribute.
In order to retain gradients for non leaf nodes, you should use retain_graph(). grad is only populated for leaf Tensors. Also imgs.retain_grad() should be called before doing forward() as it will instruct the autograd to store grads into non-leaf nodes.
Update
Thanks to #Michael for pointing out that the correct calculation of Frobenius Norm is actually (from ScienceDirect):
the square root of the sum of the squares of all the matrix entries
and not
the the square root of the sum of the absolute values of all the
matrix entries as explained here
In PyTorch 1.5.0, a high level torch.autograd.functional.jacobian API is added. This should make the contractive objective easier to implement for an arbitrary encoder. For torch>=v1.5.0, the contractive loss would look like this:
contractive_loss = torch.norm(torch.autograd.functional.jacobian(self.encoder, imgs, create_graph=True))
The create_graph argument makes the jacobian differentiable.
The main challenge in implementing the contractive autoencoder is in calculating the Frobenius norm of the Jacobian, which is the gradient of the code or bottleneck layer (vector) with respect to the input layer (vector). This is the regularization term in the loss function. Fortunately, you have done the hard work in solving this for me. Thank you! You are using MSE loss for the first term. Cross entropy loss is sometimes used instead. It's worth considering. I think you are almost there with the Frobenius norm, except that you need to take the square root of the sum of the squares of the Jacobian, where you are calculating the square root of the sum of the absolute values. Here's how I'd define the loss function (sorry I changed notation a little to keep myself straight):
def cae_loss_fcn(code, img_out, img_in, lamda=1e-4, device=torch.device('cuda')):
# First term in the loss function, for ensuring representational fidelity
criterion=nn.MSELoss()
assert img_out.shape == img_in.shape, f'img_out.shape : {img_out.shape} != img_in.shape : {img_in.shape}'
loss1 = criterion(img_out, img_in)
# Second term in the loss function, for enforcing contraction of representation
code.backward(torch.ones(code.size()).to(device), retain_graph=True)
# Frobenius norm of Jacobian of code with respect to input image
loss2 = torch.sqrt(torch.sum(torch.pow(img_in.grad, 2))) # THE CORRECTION
img_in.grad.data.zero_()
# Total loss, the sum of the two loss terms, with weight applied to second term
loss = loss1 + (lamda*loss2)
return loss

why Gradient Descent doesn't work as expected with pytorch

so I'm starting with Pytorch and tried to start with an easy Linear Regression Example. Actually I made an easy Implementation of Linear Regression with Pytorch to calculate the equation 2*x+1 but the loss stay stuck at 120 and there is a Problem with Gradient Descent because it doesn't converge to a small loss value. I don't know why this is happening and it made me crazy because I don't see what's wrong. actually this example should be very easy to solve. this is the Code I'm using
import torch
import torch.nn.functional as F
from torch.utils.data import TensorDataset, DataLoader
import numpy as np
X = np.array([i for i in np.arange(1, 20)]).reshape(-1, 1)
X = torch.tensor(X, dtype=torch.float32, requires_grad=True)
y = np.array([2*i+1 for i in np.arange(1, 20)]).reshape(-1, 1)
y = torch.tensor(y, dtype=torch.float32, requires_grad=True)
print(X.shape, y.shape)
class LR(torch.nn.Module):
def __init__(self, n_features, n_hidden1, n_out):
super(LR, self).__init__()
self.linear = torch.nn.Linear(n_features, n_hidden1)
self.predict = torch.nn.Linear(n_hidden1, n_out)
def forward(self, x):
x = F.relu(self.linear(x))
x = self.predict(x)
return x
model = LR(1, 10, 1)
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
loss_fn = torch.nn.MSELoss()
def train(epochs=100):
for e in range(epochs):
pred = model(X)
loss = loss_fn(pred, y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
print(f"epoch: {e} and loss= {loss}")
desired output is a small loss value and that the model train to give a good prediction later.
Your learning rate is too large. The model takes a few steps in the right direction, but it can't land on an actually good minimizer and henceforth zigzags around it. If you try lr=0.001 instead, your performance will be much better. This is why it's often useful to decay your learning rate over time when using first order optimizers.

My custom loss function in Pytorch does not train

My custom loss function in Pytorch does not update during training. The loss stays exactly the same. I am trying to write this custom loss function based on the false positive and negative rates. I am giving you a simplified version of the code. Any idea what could be happening? Does the backpropagation turns to 0? Is this not the correct way of defining a custom loss function?
I have already checked that during backpropagation the Gradient always stays TRUE (assert requires_grad). I have also tried to make a class (torch.nn.module) of the function false_pos_neg_rate, but that did not work. The Assert Requires_grad turned out to be negative and I left it out afterwards.
There is no error, the training does continue.
def false_pos_neg_rate(outputs, truths):
y = truths
y_predicted = outputs
cut_off= torch.tensor(0.5, requires_grad=True)
y_predicted =torch.where(y_predicted <= cut_off, zeros, ones)
tp, fp, tn, fn = confusion_matrix(y_predicted, y)
fp_rate = fp / (fp+tn).float()
fn_rate = fn / (fn+tp).float()
loss = fn_rate + fp_rate
return loss
for i, (samples, truths) in enumerate(train_loader):
samples = Variable(samples)
truths = Variable(truths)
outputs = model(samples)
loss = false_pos_neg_rate_torch(outputs, truths)
loss.backward()
optimizer.step()
I expect the loss function to update the model and be smaller every training step. Instead the loss stays exactly the same and nothing happens.
Please help me, what happens? Why does the model not train during training steps?
As pointed out by Umang Gupta your loss function is not differentiable. If you write, mathematically, what you are trying to do you'll see that your loss has zero gradient almost everywhere and it behaves like a "step function".
In order to train models using gradient-descent methods you must have meaningful gradients for the loss function.
Based on your tips, I updated my Loss Function. I made a dummy so you can check the first 2 functions as well. I added the rest, so you can see how it is implemented. However, still somewhere the gradient turns out to be zero. What is now the step where the gradient turns zero, or how can I check this? Please I would like to know how I can fix this :).
I tried providing you with more information so you can play around as well, but if you miss anything please do let me know!
y = Variable(torch.tensor((0, 0, 0, 1, 1,1), dtype=torch.float), requires_grad = True)
y_pred = Variable(torch.tensor((0.333, 0.2, 0.01, 0.99, 0.49, 0.51), dtype=torch.float), requires_grad = True)
def binary_y_pred(y_pred):
y_pred.register_hook(lambda grad: print(grad))
y_pred = y_pred+torch.tensor(0.5, requires_grad=True, dtype=torch.float)
y_pred = y_pred.pow(5) # this is my way working around using torch.where()
y_pred = y_pred.pow(10)
y_pred = y_pred.pow(15)
m = nn.Sigmoid()
y_pred = m(y_pred)
y_pred = y_pred-torch.tensor(0.5, requires_grad=True, dtype=torch.float)
y_pred = y_pred*2
y_pred.register_hook(lambda grad: print(grad))
return y_pred
def confusion_matrix(y_pred, y):
TP = torch.sum(y*y_pred)
TN = torch.sum((1-y)*(1-y_pred))
FP = torch.sum((1-y)*y_pred)
FN = torch.sum(y*(1-y_pred))
k_eps = torch.tensor(1e-12, requires_grad=True, dtype=torch.float)
FN_rate = FN/(TP + FN + k_eps)
FP_rate = FP/(TN + FP + k_eps)
cost = FN_rate + FP_rate
return cost
class FeedforwardNeuralNetModel(nn.Module):
def __init__(self, input_dim, hidden_dim, output_dim):
super(FeedforwardNeuralNetModel, self).__init__()
self.fc1 = nn.Linear(input_dim, hidden_dim)
self.relu1 = nn.ReLU()
self.fc2 = nn.Linear(hidden_dim, output_dim)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
out = self.fc1(x)
out = self.relu1(out)
out = self.fc2(out)
out = self.sigmoid(out)
return out
model = FeedforwardNeuralNetModel(input_dim, hidden_dim, output_dim)
optimizer = torch.optim.Adam(model.parameters(), lr=0.0001, betas=[0.9, 0.99], amsgrad=True)
criterion = torch.nn.BCELoss(weight=None, size_average=None, reduce=None, reduction='mean')
samples= Variable(samples)
truths = Variable(truths)
outputs = model(samples)
loss = confusion_matrix(outputs, truths)
loss.backward()
optimizer.step()

pytorch Crossentropy error in simple example of NN

H1, I am try to make NN model that satisfy simple formula.
y = X1^2 + X2^2
But when i use CrossEntropyLoss for loss function, i get two different error message.
First, when i set code like this
x = torch.randn(batch_size, 2)
y_hat = model(x)
y = answer(x).long()
optimizer.zero_grad()
loss = loss_func(y_hat, y)
loss.backward()
optimizer.step()
i get this message
RuntimeError: Assertion `cur_target >= 0 && cur_target < n_classes' failed. at
c:\programdata\miniconda3\conda-bld\pytorch_1533090623466\work\aten\src\thnn\generic/Cl
assNLLCriterion.c:93
Second, I change code like this
x = torch.randn(batch_size, 2)
y_hat = model(x)
y = answer(x).long().view(batch_size,1,1)
optimizer.zero_grad()
loss = loss_func(y_hat, y)
loss.backward()
optimizer.step()
then i get message like
RuntimeError: multi-target not supported at c:\programdata\miniconda3\conda-bld\pytorch_1533090623466\work\aten\src\thnn\generic/ClassNLLCriterion.c:21
How can i solve this problem? Thanks.(sorry for my English)
This is my code
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
def answer(x):
y = x[:,0].pow(2) + x[:,1].pow(2)
return y
class Model(nn.Module):
def __init__(self, input_size, output_size):
super(Model, self).__init__()
self.linear1 = nn.Linear(input_size, 10)
self.linear2 = nn.Linear(10, 1)
def forward(self, x):
y = F.relu(self.linear1(x))
y = F.relu(self.linear2(y))
return y
model = Model(2,1)
print(model, '\n')
loss_func = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr = 0.001)
batch_size = 3
epoch_n = 100
iter_n = 100
for epoch in range(epoch_n):
loss_avg = 0
for i in range(iter_n):
x = torch.randn(batch_size, 2)
y_hat = model(x)
y = answer(x).long().view(batch_size,1,1)
optimizer.zero_grad()
loss = loss_func(y_hat, y)
loss.backward()
optimizer.step()
loss_avg += loss
loss_avg = loss_avg / iter_n
if epoch % 10 == 0:
print(loss_avg)
if loss_avg < 0.001:
break
Can i make those dataset using dataloader in pytorch? Thanks for your help.
You are using the wrong loss function. CrossEntropyLoss is used for classification problems generally wheread your problem is that of regression. So you should use losses which are meant for regression like tasks like Mean Squared Error Loss, L1 Loss etc. Take a look at this, this, this and this.

Categories

Resources