This is the model I defined it is a simple lstm with 2 fully connect layers.
import copy
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
class mylstm(nn.Module):
def __init__(self,input_dim, output_dim, hidden_dim,linear_dim):
super(mylstm, self).__init__()
self.hidden_dim=hidden_dim
self.lstm=nn.LSTMCell(input_dim,self.hidden_dim)
self.linear1=nn.Linear(hidden_dim,linear_dim)
self.linear2=nn.Linear(linear_dim,output_dim)
def forward(self, input):
out,_=self.lstm(input)
out=nn.Dropout(p=0.3)(out)
out=self.linear1(out)
out=nn.Dropout(p=0.3)(out)
out=self.linear2(out)
return out
x_train and x_val are float dataframe with shape (4478,30), while y_train and y_val are float df with shape (4478,10)
x_train.head()
Out[271]:
0 1 2 3 ... 26 27 28 29
0 1.6110 1.6100 1.6293 1.6370 ... 1.6870 1.6925 1.6950 1.6905
1 1.6100 1.6293 1.6370 1.6530 ... 1.6925 1.6950 1.6905 1.6960
2 1.6293 1.6370 1.6530 1.6537 ... 1.6950 1.6905 1.6960 1.6930
3 1.6370 1.6530 1.6537 1.6620 ... 1.6905 1.6960 1.6930 1.6955
4 1.6530 1.6537 1.6620 1.6568 ... 1.6960 1.6930 1.6955 1.7040
[5 rows x 30 columns]
x_train.shape
Out[272]: (4478, 30)
Define the varible and do one time bp, I can find out the vaildation loss is 1.4941
model=mylstm(30,10,200,100).double()
from torch import optim
optimizer=optim.RMSprop(model.parameters(), lr=0.001, alpha=0.9)
criterion=nn.L1Loss()
input_=torch.autograd.Variable(torch.from_numpy(np.array(x_train)))
target=torch.autograd.Variable(torch.from_numpy(np.array(y_train)))
input2_=torch.autograd.Variable(torch.from_numpy(np.array(x_val)))
target2=torch.autograd.Variable(torch.from_numpy(np.array(y_val)))
optimizer.zero_grad()
output=model(input_)
loss=criterion(output,target)
loss.backward()
optimizer.step()
moniter=criterion(model(input2_),target2)
moniter
Out[274]: tensor(1.4941, dtype=torch.float64, grad_fn=<L1LossBackward>)
But I called forward function again I get a different number due to randomness of dropout
moniter=criterion(model(input2_),target2)
moniter
Out[275]: tensor(1.4943, dtype=torch.float64, grad_fn=<L1LossBackward>)
what should I do that I can eliminate all the dropout in predicting phrase?
I tried eval():
moniter=criterion(model.eval()(input2_),target2)
moniter
Out[282]: tensor(1.4942, dtype=torch.float64, grad_fn=<L1LossBackward>)
moniter=criterion(model.eval()(input2_),target2)
moniter
Out[283]: tensor(1.4945, dtype=torch.float64, grad_fn=<L1LossBackward>)
And pass an addtional parameter p to control dropout:
import copy
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
class mylstm(nn.Module):
def __init__(self,input_dim, output_dim, hidden_dim,linear_dim,p):
super(mylstm, self).__init__()
self.hidden_dim=hidden_dim
self.lstm=nn.LSTMCell(input_dim,self.hidden_dim)
self.linear1=nn.Linear(hidden_dim,linear_dim)
self.linear2=nn.Linear(linear_dim,output_dim)
def forward(self, input,p):
out,_=self.lstm(input)
out=nn.Dropout(p=p)(out)
out=self.linear1(out)
out=nn.Dropout(p=p)(out)
out=self.linear2(out)
return out
model=mylstm(30,10,200,100,0.3).double()
output=model(input_)
loss=criterion(output,target)
loss.backward()
optimizer.step()
moniter=criterion(model(input2_,0),target2)
Traceback (most recent call last):
File "<ipython-input-286-e49b6fac918b>", line 1, in <module>
output=model(input_)
File "D:\Users\shan xu\Anaconda3\lib\site-packages\torch\nn\modules\module.py", line 489, in __call__
result = self.forward(*input, **kwargs)
TypeError: forward() missing 1 required positional argument: 'p'
But neither of them worked.
You have to define your nn.Dropout layer in your __init__ and assign it to your model to be responsive for calling eval().
So changing your model like this should work for you:
class mylstm(nn.Module):
def __init__(self,input_dim, output_dim, hidden_dim,linear_dim,p):
super(mylstm, self).__init__()
self.hidden_dim=hidden_dim
self.lstm=nn.LSTMCell(input_dim,self.hidden_dim)
self.linear1=nn.Linear(hidden_dim,linear_dim)
self.linear2=nn.Linear(linear_dim,output_dim)
# define dropout layer in __init__
self.drop_layer = nn.Dropout(p=p)
def forward(self, input):
out,_= self.lstm(input)
# apply model dropout, responsive to eval()
out= self.drop_layer(out)
out= self.linear1(out)
# apply model dropout, responsive to eval()
out= self.drop_layer(out)
out= self.linear2(out)
return out
If you change it like this dropout will be inactive as soon as you call eval().
NOTE: If you want to continue training afterwards you need to call train() on your model to leave evaluation mode.
You can also find a small working example for dropout with eval() for evaluation mode here:
nn.Dropout vs. F.dropout pyTorch
I add this answer just because I'm facing now the same issue while trying to reproduce Deep Bayesian active learning through dropout disagreement.
If you need to keep dropout active (for example to bootstrap a set of different predictions for the same test instances) you just need to leave the model in training mode, there is no need to define your own dropout layer.
Since in pytorch you need to define your own prediction function, you can just add a parameter to it like this:
def predict_class(model, test_instance, active_dropout=False):
if active_dropout:
model.train()
else:
model.eval()
As the other answers said, the dropout layer is desired to be defined in your model's __init__ method, so that your model can keep track of all information of each pre-defined layer. When the model's state is changed, it would notify all layers and do some relevant work. For instance, while calling model.eval() your model would deactivate the dropout layers but directly pass all activations. In general, if you wanna deactivate your dropout layers, you'd better define the dropout layers in __init__ method using nn.Dropout module.
Related
I am using Keras' sklearn wrapper for a regressor, namely tf.keras.wrappers.scikit_learn.KerasRegressor.
I want this regressor to work within sklearn's cross validation scheme, namely sklearn.model_selection.cross_validate.
The regressor generally works without CV.
However, the latter fails, because I have a necessary parameter in the regressor's __init__ method that defines the batch input shape and it appears to be missing.
This seems to be the case because MyRegressor or KerasRegressor isn't correctly cloneable using clone(estimator). The specific error message is:
KeyError: 'batch_input_shape'
Is there a way to make MyRegressor work with cross_validate? Am I somehow violating sklearn's requirements?
Please see this condensed working example:
from sklearn.datasets import make_regression
from sklearn.model_selection import cross_validate
from tensorflow.keras.layers import Dense, LSTM
from tensorflow.keras.models import Sequential
from tensorflow.keras.wrappers.scikit_learn import KerasRegressor
class MyRegressor(KerasRegressor):
def __init__(self, batch_input_shape, build_fn=None, **kwargs):
self.batch_input_shape = batch_input_shape
super().__init__(**kwargs)
def __call__(self, *kwargs):
model = Sequential([
LSTM(16, stateful=True, batch_input_shape=self.batch_input_shape),
Dense(1),
])
model.compile(optimizer='adam', loss='mean_squared_error', metrics=['RootMeanSquaredError'])
return model
def reset_states(self):
self.model.reset_states()
X, y = make_regression(6400, 5)
X = X.reshape(X.shape[0], 1, X.shape[1])
batch_size = 64
batch_input_shape = (batch_size, 1, X.shape[-1])
# Works fine
reg = MyRegressor(batch_input_shape)
for i in range(10):
reg.fit(X, y, batch_size=batch_size)
reg.reset_states()
# Doesn't work
reg = MyRegressor(batch_input_shape)
results = cross_validate(reg, X, y, scoring=['neg_mean_squared_error'])
Cloneability requires a proper get_params method. Most often this is obtained by inheriting from sklearn's BaseEstimator, but KerasRegressor instead implements its own directly (source). The way it does it is incompatible with your additional batch_input_shape; you can tweak it to make things work:
def get_params(self, deep=False):
res = self.sk_params.copy() # sk_params was set by KerasRegressor.__init__
res.update({
'build_fn': self.build_fn,
'batch_input_shape': self.batch_input_shape,
})
return res
(I get an error in your example after this update, about input shapes. But I'm less familiar with batch sizes and keras to be able to answer that followup.)
I've keras model defined as follow
class ConvLayer(Layer) :
def __init__(self, nf, ks=3, s=2, **kwargs):
self.nf = nf
self.grelu = GeneralReLU(leak=0.01)
self.conv = (Conv2D(filters = nf,
kernel_size = ks,
strides = s,
padding = "same",
use_bias = False,
activation = "linear"))
super(ConvLayer, self).__init__(**kwargs)
def rsub(self): return -self.grelu.sub
def set_sub(self, v): self.grelu.sub = -v
def conv_weights(self): return self.conv.weight[0]
def build(self, input_shape):
# No weight to train.
super(ConvLayer, self).build(input_shape) # Be sure to call this at the end
def compute_output_shape(self, input_shape):
output_shape = (input_shape[0],
input_shape[1]/2,
input_shape[2]/2,
self.nf)
return output_shape
def call(self, x):
return self.grelu(self.conv(x))
def __repr__(self):
return f'ConvLayer(nf={self.nf}, activation={self.grelu})'
class ConvModel(tf.keras.Model):
def __init__(self, nfs, input_shape, output_shape, use_bn=False, use_dp=False):
super(ConvModel, self).__init__(name='mlp')
self.use_bn = use_bn
self.use_dp = use_dp
self.num_classes = num_classes
# backbone layers
self.convs = [ConvLayer(nfs[0], s=1, input_shape=input_shape)]
self.convs += [ConvLayer(nf) for nf in nfs[1:]]
# classification layers
self.convs.append(AveragePooling2D())
self.convs.append(Dense(output_shape, activation='softmax'))
def call(self, inputs):
for layer in self.convs: inputs = layer(inputs)
return inputs
I'm able to compile this model without any issues
>>> model.compile(optimizer=tf.keras.optimizers.Adam(lr=lr),
loss='categorical_crossentropy',
metrics=['accuracy'])
But when I query the summary for this model, I see this error
>>> model = ConvModel(nfs, input_shape=(32, 32, 3), output_shape=num_classes)
>>> model.summary()
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-220-5f15418b3570> in <module>()
----> 1 model.summary()
/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/network.py in summary(self, line_length, positions, print_fn)
1575 """
1576 if not self.built:
-> 1577 raise ValueError('This model has not yet been built. '
1578 'Build the model first by calling `build()` or calling '
1579 '`fit()` with some data, or specify '
ValueError: This model has not yet been built. Build the model first by calling `build()` or calling `fit()` with some data, or specify an `input_shape` argument in the first layer(s) for automatic build.
I'm providing input_shape for the first layer of my model, why is throwing this error?
The error says what to do:
This model has not yet been built. Build the model first by calling build()
model.build(input_shape) # `input_shape` is the shape of the input data
# e.g. input_shape = (None, 32, 32, 3)
model.summary()
There is a very big difference between keras subclassed model and other keras models (Sequential and Functional).
Sequential models and Functional models are datastructures that represent a DAG of layers. In simple words, Functional or Sequential model are static graphs of layers built by stacking one on top of each other like LEGO. So when you provide input_shape to first layer, these (Functional and Sequential) models can infer shape of all other layers and build a model. Then you can print input/output shapes using model.summary().
On the other hand, subclassed model is defined via the body (a call method) of Python code. For subclassed model, there is no graph of layers here. We cannot know how layers are connected to each other (because that's defined in the body of call, not as an explicit data structure), so we cannot infer input / output shapes. So for a subclass model, the input/output shape is unknown to us until it is first tested with proper data. In the compile() method, we will do a deferred compile and wait for a proper data. In order for it to infer shape of intermediate layers, we need to run with a proper data and then use model.summary(). Without running the model with a data, it will throw an error as you noticed. Please check GitHub gist for complete code.
The following is an example from Tensorflow website.
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
class ThreeLayerMLP(keras.Model):
def __init__(self, name=None):
super(ThreeLayerMLP, self).__init__(name=name)
self.dense_1 = layers.Dense(64, activation='relu', name='dense_1')
self.dense_2 = layers.Dense(64, activation='relu', name='dense_2')
self.pred_layer = layers.Dense(10, name='predictions')
def call(self, inputs):
x = self.dense_1(inputs)
x = self.dense_2(x)
return self.pred_layer(x)
def get_model():
return ThreeLayerMLP(name='3_layer_mlp')
model = get_model()
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
x_train = x_train.reshape(60000, 784).astype('float32') / 255
x_test = x_test.reshape(10000, 784).astype('float32') / 255
model.compile(loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
optimizer=keras.optimizers.RMSprop())
model.summary() # This will throw an error as follows
# ValueError: This model has not yet been built. Build the model first by calling `build()` or calling `fit()` with some data, or specify an `input_shape` argument in the first layer(s) for automatic build.
# Need to run with real data to infer shape of different layers
history = model.fit(x_train, y_train,
batch_size=64,
epochs=1)
model.summary()
Thanks!
Another method is to add the attribute input_shape() like this:
model = Sequential()
model.add(Bidirectional(LSTM(n_hidden,return_sequences=False, dropout=0.25,
recurrent_dropout=0.1),input_shape=(n_steps,dim_input)))
# X is a train dataset with features excluding a target variable
input_shape = X.shape
model.build(input_shape)
model.summary()
Make sure you create your model properly. A small typo mistake like the following code may also cause a problem:
model = Model(some-input, some-output, "model-name")
while the correct code should be:
model = Model(some-input, some-output, name="model-name")
If your Tensorflow, Keras version is 2.5.0 then just add Tensorflow when you import Keras package
Not this:
from tensorflow import keras
from keras.models import Sequential
import tensorflow as tf
Like this:
from tensorflow import keras
from tensorflow.keras.models import Sequential
import tensorflow as tf
Version issues of your Tensorflow, Keras, can be the reason for this.
Same problem I encountered during training for the LSTM model for regression.
Error:
ValueError: This model has not yet been built. Build the model first
by calling build() or by calling the model on a batch of data.
Earlier:
from tensorflow.keras.models import Sequential
from tensorflow.python.keras.models import Sequential
Corrected:
from keras.models import Sequential
I was also facing same error, so I have removed model.summary(). Then issue is resolved. As it arises if model of summary is defined before the model is built.
Here is the LINK for description which states that
Raises:
ValueError: if `summary()` is called before the model is built.**
Here is an autoencoder I’m working on from tutorial:https://debuggercafe.com/implementing-deep-autoencoder-in-pytorch/
I’m just learning about autoencoders and I’ve modified the source encode a custom small dataset which consists of:
[0,1,0,1,0,1,0,1,0],[0,1,1,0,0,1,0,1,0],[0,1,1,0,0,1,0,1,0],[0,1,1,0,0,1,0,1,0]
It seems to work ok, but I’m unsure how to access the lower dimensional embedding values of dimension 2 (set by parameter out_features).
I've added a methods to the Autoencoder class to return the embedding , is this the recommended method of accessing the embedding's ?
Code:
import torch
import torchvision
from torch import nn
from torch.autograd import Variable
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision.datasets import MNIST
from torchvision.utils import save_image
import warnings
import os
# import packages
import os
import torch
import torchvision
import torch.nn as nn
import torchvision.transforms as transforms
import torch.optim as optim
import matplotlib.pyplot as plt
import torch.nn.functional as F
from torchvision import datasets
from torch.utils.data import DataLoader
from torchvision.utils import save_image
import numpy as np
# utility functions
def get_device():
if torch.cuda.is_available():
device = 'cuda:0'
else:
device = 'cpu'
return device
device = get_device()
features = torch.tensor(np.array([ [0,1,0,1,0,1,0,1,0],[0,1,1,0,0,1,0,1,0],[0,1,1,0,0,1,0,1,0],[0,1,1,0,0,1,0,1,0] ])).float()
tic_tac_toe_data_loader = torch.utils.data.DataLoader(features, batch_size=1, shuffle=True)
class Encoder(nn.Module):
def __init__(self):
super(Encoder, self).__init__()
self.fc1 = nn.Linear(in_features=9, out_features=2)
def forward(self, x):
return F.sigmoid(self.fc1(x))
class Decoder(nn.Module):
def __init__(self):
super(Decoder, self).__init__()
self.fc1 = nn.Linear(in_features=2, out_features=9)
def forward(self, x):
return F.sigmoid(self.fc1(x))
class Autoencoder(nn.Module):
def __init__(self):
super(Autoencoder, self).__init__()
self.fc1 = Encoder()
self.fc2 = Decoder()
def forward(self, x):
return self.fc2(self.fc1(x))
net = Autoencoder()
net.to(device)
NUM_EPOCHS = 50
LEARNING_RATE = 1e-3
criterion = nn.MSELoss()
optimizer = optim.Adam(net.parameters(), lr=LEARNING_RATE)
# image transformations
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,))
])
outputs = None
def train(net, trainloader, NUM_EPOCHS):
train_loss = []
for epoch in range(NUM_EPOCHS):
running_loss = 0.0
for data in trainloader:
img = data
img = img.to(device)
img = img.view(img.size(0), -1)
# print('img.shape' , img.shape)
optimizer.zero_grad()
outputs = net(img)
loss = criterion(outputs, img)
loss.backward()
optimizer.step()
running_loss += loss.item()
loss = running_loss / len(trainloader)
train_loss.append(loss)
return train_loss
# train the network
train_loss = train(net, tic_tac_toe_data_loader, NUM_EPOCHS)
I can access the lower dimensional embedding using
print(Encoder().forward( torch.tensor(np.array([0,1,0,1,0,1,0,1,0])).float()))
But is this using the trained weight values for the embedding ? If I call Encoder multiple times with same values:
print(Encoder().forward( torch.tensor(np.array([0,1,0,1,0,1,0,1,0])).float()))
print(Encoder().forward( torch.tensor(np.array([0,1,0,1,0,1,0,1,0])).float()))
different results are returned:
tensor([0.5083, 0.5020], grad_fn=<SigmoidBackward>)
tensor([0.4929, 0.6940], grad_fn=<SigmoidBackward>)
Why is this the case ? Is an extra training step being invoked as a result of calling Encoder ?
By calling Encoder() you are basically creating a new instance of the encoder everytime and the weights are randomly initialized each time.
Generally, you make one instance of it and train it, save the weights, and infer on it.
Also, for PyTorch, you need not call .forward(), but call the instance directly. Forward is called by it implicitly, including other hook methods if any.
enc = Encoder()
input = torch.from_numpy(np.asarray([0,1,0,1,0,1,0,1,0]).float()
print(enc(input))
print(enc(input))
Training pass happens when you pass the Encode() instance to train function. Calling Encoder() only creates a new object.
Since each object has it's own weights, and the weights are initialized randomly (see xavier and kaiming initialization), you are different outputs. Moving to a single object, you still have to explicitly train it with the train function.
Like other responder pointed out when you call Encoder() you generate new instances with randomly initialized weights. Because you are interested in lower dimensional embedding produced by your encoder you need to access the weights of the encoder in your trained net:
trained_encoder = net.fc1
Now that you have your encoder with trained weights the following lines should produces same result:
print(trained_encoder.forward( torch.tensor(np.array([0,1,0,1,0,1,0,1,0])).float()))
print(trained_encoder.forward( torch.tensor(np.array([0,1,0,1,0,1,0,1,0])).float()))
As pointed out by others you can further simplify by passing input directly:
test_input = torch.tensor(np.array([0,1,0,1,0,1,0,1,0])).float()
print(trained_encoder(test_input))
I'm trying to add Bayesian weight uncertainty to my Keras layers. After some coding I realized that Bayesifier could be implemented as a wrapper for any kind of layer, but I'm facing one issue.
A primer on bayesian weight uncertainty:
Consider an ANN layer with m learnable parameters. A Bayesian version of this layer with a Gaussian distribution on the weights would store another set of m parameters. The first m would be considered means, the new set of m would be considered variances for the weights, so every weight is characterized by a normal distribution.
In the learning phase we sample weights from this distribution. Using the reparametrization trick, we can still do backpropagation.
As you can see, converting a layer to a bayesian layer does not depend on the layer's architecture. We just duplicate the weights.
So I built a Keras wrapper class like so:
from keras.layers.wrappers import Wrapper
from keras import backend as K
K.set_learning_phase(1)
class Bayesify(Wrapper):
def __init__(self, base_layer,
variational_initializer="ones",
variational_regularizer=None,
variational_constraint=None,
**kwargs):
super().__init__(base_layer, **kwargs)
self.variational_initializer = variational_initializer
self.variational_regularizer = variational_regularizer
self.variational_constraint = variational_constraint
self.mean = []
self.variation = []
def build(self, input_shape=None):
super().build(input_shape)
if not self.layer.built:
self.layer.build(input_shape)
self.layer.built = True
self.mean = self.layer.trainable_weights[:]
for tensor in self.mean:
self.variation.append(self.add_weight(
name="variation",
shape=tensor.shape,
initializer=self.variational_initializer,
regularizer=self.variational_regularizer,
constraint=self.variational_constraint
))
self._trainable_weights = self.mean + self.variation
def _sample_weights(self):
return [mean + K.log(1. + K.exp(log_stddev))*K.random_normal(shape=mean.shape)
for mean, log_stddev in zip(self.mean, self.variation)]
def call(self, inputs, **kwargs):
self.layer._trainable_weights = K.in_train_phase(self._sample_weights(), self.mean)
return self.layer.call(inputs, **kwargs)
Short script to run a bayesian Dense layer on MNIST:
import numpy as np
from keras.models import Model
from keras.layers import Dense, Input
from keras.datasets import mnist
from bayesify import Bayesify
(lX, lY), (tX, tY) = mnist.load_data()
lX, tX = lX / 255., tX / 255.
onehot = np.eye(10)
lY, tY = onehot[lY], onehot[tY]
inputs = Input(lX.shape[1:])
x = Bayesify(Dense(30, activation="tanh"))(inputs)
x = Dense(lY.shape[1], activation="softmax")(x)
ann = Model(inputs=inputs, outputs=x)
ann.compile(optimizer="adam", loss="categorical_crossentropy")
ann.fit(lX, lY, batch_size=64, epochs=10, validation_data=(tX, tY))
The learning script raises weird internal Keras exceptions which I'm trying to figure out now.
Also I'm not entirely sure that the gradients of the wrapper's weights will be handled correctly.
I am currently pushing the sampled weights into the wrapped layer using the setter of the layer.trainable_weights property and simply forwarding call() to layer.call().
One such error:
File "/data/Prog/PyCharm/bayesforge/xp_mnist.py", line 20, in <module>
ann.fit(lX, lY, batch_size=64, epochs=10, validation_data=(tX, tY))
File "/usr/lib/python3.6/site-packages/keras/engine/training.py", line
1630, in fit
batch_size=batch_size)
File "/usr/lib/python3.6/site-packages/keras/engine/training.py", line
1480, in _standardize_user_data
exception_prefix='target')
File "/usr/lib/python3.6/site-packages/keras/engine/training.py", line
113, in _standardize_input_data
'with shape ' + str(data_shape))
ValueError: Error when checking target: expected dense_2 to have 3
dimensions, but got array with shape (60000, 10)
One small note, I only tested my scripts with the tensorflow backend.
Does anyone have any suggestion on how this wrapping could be accomplished?
I am new to PyTorch and I am trying out the Embedding Layer.
I wrote a naive classification task, where all the inputs are the equal and all the labels are set to 1.0. I hence expect the model to learn quickly to predict 1.0.
The input is always 0, which is fed into a nn.Embedding(1,32) layer, followed by nn.Linear(32,1) and nn.Relu().
However, an unexpected and undesired behavior occurs: training outcome is different for different times I run the code.
For example,
setting the random seed to 10, model converges: loss decreases and model always predicts 1.0
setting the random seed to 1111, model doesn't converge: loss doesn't decrease and model always predicts 0.5. In those cases the parameters are not updated
Here is the minimal, replicable code:
from torch.nn import BCEWithLogitsLoss
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
from torch.autograd import Variable
from torch.utils.data import Dataset
import torch
class MyModel(nn.Module):
def __init__(self):
super(MyModel, self).__init__()
self.vgg_fc = nn.Linear(32, 1)
self.relu = nn.ReLU()
self.embeddings = nn.Embedding(1, 32)
def forward(self, data):
emb = self.embeddings(data['index'])
return self.relu(self.vgg_fc(emb))
class MyDataset(Dataset):
def __init__(self):
pass
def __len__(self):
return 1000
def __getitem__(self, idx):
return {'label': 1.0, 'index': 0}
def train():
model = MyModel()
db = MyDataset()
dataloader = DataLoader(db, batch_size=256, shuffle=True, num_workers=16)
loss_function = BCEWithLogitsLoss()
optimizer_rel = optim.SGD(model.parameters(), lr=0.1)
for epoch in range(50):
for i_batch, sample_batched in enumerate(dataloader):
model.zero_grad()
out = model({'index': Variable(sample_batched['index'])})
labels = Variable(sample_batched['label'].type(torch.FloatTensor).view(sample_batched['label'].shape[0], 1))
loss = loss_function(out, labels)
loss.backward()
optimizer_rel.step()
print 'Epoch:', epoch, 'batch', i_batch, 'Tr_Loss:', loss.data[0]
return model
if __name__ == '__main__':
# please, try seed 10 (converge) and seed 1111 (fails)
torch.manual_seed(10)
train()
Without specifying the random seed, different runs have different outcome.
Why is, in those cases, the model unable to learn such a easy task?
Is there any mistake in the way I use nn.Embedding layer?
Thank you
I found the problem was the final relu layer, before the sigmoid.
As stated here, that layer will:
throw away information without adding any additional benefit
Removing the layer, the network learned as expected with any seed.