i'm newly in pytorch
it is the model with bidirectional lstm, is there any body to tel me what is the equivalent of this two different lstm & bi-lstm model?
i try some torch codes but it do not worked.because this code has suitable acc in keras,i want the exact model in torch and i unfortunately can't find it:(
fist_one:
def lstm_model(embedding_size, vocab_size):
title = layers.Input(shape=(None,), dtype='int32', name='title')
body = layers.Input(shape=(None,), dtype='int32', name='body')
embedding = layers.Embedding(
mask_zero=True,
input_dim=vocab_size,
output_dim=embedding_size,
weights=[w2v_weights],
trainable=True
)
lstm_1 = layers.LSTM(units=80, return_sequences=True)
lstm_2 = layers.LSTM(units=80, return_sequences=False)
emb_title = embedding(title)
print("question embedding shape", emb_title.shape)
sum_a = lstm_2(lstm_1(emb_title))
print("q_output shape", sum_a.shape)
emb_body = embedding(body)
print("answer embedding shape", emb_body.shape)
sum_b = lstm_2(lstm_1(emb_body))
print("a_output shape", sum_a.shape)
sim = layers.dot([sum_a, sum_b], axes=1, normalize=True)
print("qa_similarity shape", sim.shape)
# sim = layers.Activation(activation='sigmoid')(sim)
sim_model = models.Model(
inputs=[title, body],
outputs=[sim],
)
sim_model.compile(loss='mean_squared_error', optimizer='nadam', metrics=['accuracy'])
embedding_model = models.Model(
inputs=[title],
outputs=[sum_a]
)
return sim_model, embedding_model
second one:
def bilstm_model(embedding_size, vocab_size):
title = layers.Input(shape=(None,), dtype='int32', name='title')
body = layers.Input(shape=(None,), dtype='int32', name='body')
embedding = layers.Embedding(
mask_zero=True,
input_dim=vocab_size,
output_dim=embedding_size,
weights=[w2v_weights],
trainable=True
)
lstm_1 = layers.Bidirectional(LSTM(activation='tanh', dropout=0.2, units=100, return_sequences=True))
lstm_2 = layers.Bidirectional(LSTM(activation='tanh', dropout=0.2, units=100, return_sequences=False))
sum_a = lstm_2(lstm_1(embedding(title)))
sum_b = lstm_2(lstm_1(embedding(body)))
sim = layers.dot([sum_a, sum_b], axes=1, normalize=True)
# sim = layers.Activation(activation='sigmoid')(sim)
sim_model = models.Model(
inputs=[title, body],
outputs=[sim],
)
sim_model.compile(loss='mean_squared_error', optimizer='adam', metrics=['accuracy'])
embedding_model = models.Model(
inputs=[title],
outputs=[sum_a]
)
return sim_model, embedding_model
i;m llokingo for true answer in weeks:(
Here is the possible conversion of your first lstm_model to PyTorch
Usually, you create a class for your networks in PyTorch.
Therefore I'll be implementing LSTM using a class
from torch import nn
import torch.nn.functional as F
class LSTMModel(nn.Module):
def __init__(self, vocab_size, hidden_size, num_layers,
dropout, embedding_size):
super(LSTMModel, self).__init__()
self.encoder = nn.Embedding(num_embeddings=embedding_size,
embedding_dim=vocab_size)
self.rnn = getattr(nn, 'LSTM')(vocab_size,
hidden_size,
num_layers,
dropout=dropout)
self.decoder = nn.Linear(in_features=hidden_size,
out_features=embedding_size)
self.init_weights()
self.hidden_size = hidden_size
self.weight_size = (num_layers, vocab_size, hidden_size)
def init_weights(self):
init_range = 0.1
nn.init.uniform_(self.encoder.weight, -init_range,
init_range)
nn.init.zeros_(self.decoder.weight)
nn.init.uniform_(self.decoder.weight, -init_range,
init_range)
def forward(self, input_, hidden_):
embedded = self.encoder(input_)
output, hidden_ = self.rnn(embedded, hidden_)
decoded = self.decoder(hidden_)
return F.log_softmax(input=decoded, dim=1), hidden_
def init_hidden(self):
weight = next(self.parameters())
return (weight.new_zeros(self.weight_size),
weight.new_zeros(self.weight_size))
Now, if you directly use the network above, you might encounter some problems. In that case, you need to modify the values.
class Model(nn.Module):
def __init__(self, **kwargs):
super().__init__()
self.embeddings = nn.Embedding(num_embeddings=kwargs["vocab_size"],
embedding_dim=kwargs["embedding_dim"],
padding_idx=kwargs["pad_idx"])
self.embeddings.weight.requires_grad = True # to not refine-tune
if kwargs["model"] == "lstm":
self.lstm = nn.LSTM(input_size=kwargs["embedding_dim"], # input
hidden_size=kwargs["lstm_units"], # output
num_layers=kwargs["lstm_layers"],
bidirectional=False,
batch_first=True)
if kwargs["model"] == "BiLSTM":
self.lstm = nn.LSTM(input_size=kwargs["embedding_dim"], # input
hidden_size=kwargs["bilstm_units"], # output
num_layers=kwargs["bilstm_layers"],
bidirectional=True,
batch_first=True)
self.dropout = nn.Dropout(kwargs["dropout"])
self.tanh = F.tanh
self.dropout = nn.Dropout(kwargs["dropout"])
def forward(self):
pass
class LSTM_Model(Model):
"""
a class to define multiple models
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
def forward(self, question, answer):
question_embedding = self.embeddings(question)
# print("question embedding shape:", question_embedding.shape)
answer_embedding = self.embeddings(answer)
# print("answer embedding shape:", answer_embedding.shape)
q_output, (qhidden, qcell) = self.lstm(question_embedding)
print("q_output shape:", q_output.shape)
# print("qhidden shape:", qhidden.shape)
# print("qcell shape:", qcell.shape)
a_output, (ahidden, acell) = self.lstm(answer_embedding)
print("a_output shape:", a_output.shape)
# print("ahidden shape:", ahidden.shape)
# print("acell shape:", acell.shape)
# qa_similary = torch.mm(qhidden[-1], ahidden[-1])
# qa_similary =torch.matmul((qhidden[-1]), torc.th(ahidden[-1]))
q_output = q_output[-1]
q_output = q_output.squeeze()
a_output = a_output[-1]
a_output = a_output.squeeze()
mm = torch.mul((q_output), (a_output))
mm -= mm.min(1, keepdim=True)[0]
mm /= mm.max(1, keepdim=True)[0]
qa_similary =torch.mean(mm, dim=1)
# print("qa_similary shape:", qa_similary.shape)
return qa_similary, qhidden
print("**************************MODEL DEFINE & CREATED!****************************")
is this a true and completely exact implemetation of that keras code for two layer lstm?
Related
I am trying to implement a custom model on top of the
neuralspace-reverie/indic-transformers-bn-bert pretrained model for binary sentence classification. I am confused if the ordering of dropout and activation function matters in the class, and if overall the implementation of custom hidden layers are correct. I added 2 dense layers (l1 and l2) with tanh activation function and dropout 0.1. The number of nodes for these layers are 512 and 256 respectively and used softmax as output layer.
class MyTaskSpecificCustomModel(nn.Module):
def __init__(self, checkpoint, num_labels):
super(MyTaskSpecificCustomModel, self).__init__()
self.num_labels = num_labels
self.model = AutoModel.from_pretrained(checkpoint, config = AutoConfig.from_pretrained(checkpoint,output_attention = True,output_hidden_state = True))
# This is to freeze the weights of the pretrained model.
for _ , param in self.model.named_parameters():
param.requires_grad=False
# New Layer
self.dropout1 = nn.Dropout(0.1)
# self.classifier = nn.Linear(768, num_labels )
#layer 1
self.dropout2 = nn.Dropout(0.1)
self.activation1 = nn.Tanh()
self.l1 = nn.Linear(768, 512)
#layer 2
self.dropout3 = nn.Dropout(0.1)
self.l2 = nn.Linear(512, 256)
self.activation2 = nn.Tanh()
#layer 3
self.l3 = nn.Linear(256, num_labels)
# self.activation3 = nn.Tanh()
self.softmax = nn.LogSoftmax(dim=1)
def forward(self, input_ids = None, attention_mask=None, Type = None):
outputs = self.model(input_ids = input_ids, attention_mask = attention_mask)
last_hidden_state = outputs[0]
sequence_outputs = self.dropout1(last_hidden_state)
# logits = self.classifier(sequence_outputs[:, 0, : ].view(-1, 768))
#layer 1
logits = self.l1(sequence_outputs[:, 0, : ].view(-1, 768))
logits = self.dropout2(logits)
logits = self.activation1(logits)
#layer 2
logits = self.l2(logits)
logits = self.dropout3(logits)
logits = self.activation2(logits)
#output layer
logits = self.l3(logits)
logits = self.softmax(logits)
return logits
This is my attention layer code :
implementation of attention layer
**class Attention(nn.Module):
def __init__(self, feature_dim, step_dim, bias=True, **kwargs):
super(Attention, self).__init__(**kwargs)
self.supports_masking = True
self.bias = bias
self.feature_dim = feature_dim
self.step_dim = step_dim
self.features_dim = 0
weight = torch.zeros(feature_dim, 1)
nn.init.kaiming_uniform_(weight)
self.weight = nn.Parameter(weight)
if bias:
self.b = nn.Parameter(torch.zeros(step_dim))
def forward(self, x, mask=None):
feature_dim = self.feature_dim
step_dim = self.step_dim
eij = torch.mm(
x.contiguous().view(-1, feature_dim),
self.weight
).view(-1, step_dim)
if self.bias:
eij = eij + self.b
eij = torch.tanh(eij)
a = torch.exp(eij)
if mask is not None:
a = a * mask
a = a / (torch.sum(a, 1, keepdim=True) + 1e-10)
weighted_input = x * torch.unsqueeze(a, -1)
return torch.sum(weighted_input, 1)**
This is RNN codes :
**# Instantiate the model w/ hyperparams
weights_matrix = weights_matrix
output_size = 13 # number of classes to predict
hidden_dim = 64
drop_prob = 0.5
# The RNN model that will be used to perform classification
class AttentionLSTM(nn.Module):
def __init__(self, weights_matrix, output_size, hidden_dim, drop_prob):
super(AttentionLSTM, self).__init__()
# embedding layers
self.embedding, self.num_embeddings, self.embeddings_size = create_emb_layer(weights_matrix, True)
# embedding dropout
self.dropout = nn.Dropout2d(drop_prob)
# First lstm and GRU layers
self.lstm1 = nn.LSTM(self.embeddings_size, hidden_dim, batch_first=True, bidirectional=True)
self.gru1 = nn.GRU(hidden_dim * 2, hidden_dim, bidirectional=True, batch_first=True)
# attention layer
self.attention = Attention(hidden_dim*2, seq_length)
# Second lstm and GRU layers
self.lstm2 = nn.LSTM(hidden_dim * 2, hidden_dim, batch_first=True, bidirectional=True)
self.gru2 = nn.GRU(hidden_dim * 2, hidden_dim, bidirectional=True, batch_first=True)
# linear
self.fc = nn.Linear(hidden_dim * 2, hidden_dim * 2)
self.out = nn.Linear(hidden_dim * 2, output_size)
# activation functions
self.sigmoid = nn.Sigmoid() # for hidden layers
self.softmax = nn.Softmax(dim=1) # for output layer
def forward(self, x):
batch_size = x.size(0)
# embedding output
x = x.long()
embeds = self.embedding(x)
embeds = torch.squeeze(torch.unsqueeze(embeds, 0))
# lstm, and gru outputs
lstm_out1, _ = self.lstm1(embeds)
gru_out1, _ = self.gru1(lstm_out1)
gru_out1 = gru_out1.view(batch_size, -1, hidden_dim * 2)
attention_out = self.attention(gru_out1, seq_length)
attention_out = attention_out.view(batch_size, -1, hidden_dim * 2)
attention_out = self.sigmoid(attention_out)
lstm_out2, _ = self.lstm2(attention_out)
# slice lstm_out to just get output of last element of the input sequence
lstm_out2 = lstm_out2[:, -1]
gru_out2, _ = self.gru2(lstm_out2)
# linear outputs
fc_out = self.softmax(self.fc(gru_out2))
final_out = self.out(fc_out)
return final_out**
I am sure that my dataset is balanced after pre-processing step but my model always predict the same output. Precision and fscore are changing for each input, however, this problem makes my recall score 1.0 since output is always same whatever input is.
If anybody help me, i will be appreciated
It required some time to build networks from your requirements but I provided a few samples to create a customer layer or model, you start from an embedded layer and suddenly random leaves of data create different input every time GRU and LSTM learning layers may provide good results when they had :
Matching input and target layer and parameters.
Learning scopes when they can differentiate input, repeating of gated current, and LSTM is specifically used when patterns of data are
significant such as pictures or continue data.
Linear, and Sigmoid provide contrast differentiate and softmax sometime we required when compared based on distribution values. This
is supposed to create contrast output excepted softmax applied on
weights of values.
Loss Fn is based on a similar output dimension/expectation
[ Sample ]:
class create_emb_layer( tf.keras.layers.Embedding ):
def __init__( self, weights_matrix, bidirectional=True ):
self.num_embeddings = weights_matrix[0]
self.embeddings_size = weights_matrix[1]
self.bidirectional = bidirectional
super(create_emb_layer, self).__init__( self.embeddings_size, self.num_embeddings )
def build(self, input_shape):
self.kernel = self.add_weight("kernel",
shape=[int(input_shape[-1]),
self.input_dim])
def call(self, inputs):
return tf.matmul(inputs, self.kernel)
[ My model ]:
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Model Initialize
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
model = tf.keras.models.Sequential([
tf.keras.layers.InputLayer(input_shape=( 32, 32, 4 )),
tf.keras.layers.Normalization(mean=3., variance=2.),
tf.keras.layers.Normalization(mean=4., variance=6.),
tf.keras.layers.Conv2D(32, (3, 3), activation='relu'),
tf.keras.layers.MaxPooling2D((2, 2)),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Reshape((128, 225)),
tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(96, return_sequences=True, return_state=False)),
tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(96)),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(192, activation='relu'),
tf.keras.layers.Dense(10),
])
[ Output ]:
I want to implement a Hierarchical attention mechanism for document classification presented by Yang. But I want to replace LSTM with Transformer.
I used Apoorv Nandan's text classification with Transformer:
https://keras.io/examples/nlp/text_classification_with_transformer/
I have implemented Transformer hierarchically to classification. One for sentence representation and another one for document representation. The code is as follow:
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from keras.utils.np_utils import to_categorical
class MultiHeadSelfAttention(layers.Layer):
def __init__(self, embed_dim, num_heads=8):
super(MultiHeadSelfAttention, self).__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
if embed_dim % num_heads != 0:
raise ValueError(
f"embedding dimension = {embed_dim} should be divisible by number of heads = {num_heads}"
)
self.projection_dim = embed_dim // num_heads
self.query_dense = layers.Dense(embed_dim)
self.key_dense = layers.Dense(embed_dim)
self.value_dense = layers.Dense(embed_dim)
self.combine_heads = layers.Dense(embed_dim)
def attention(self, query, key, value):
score = tf.matmul(query, key, transpose_b=True)
dim_key = tf.cast(tf.shape(key)[-1], tf.float32)
scaled_score = score / tf.math.sqrt(dim_key)
weights = tf.nn.softmax(scaled_score, axis=-1)
output = tf.matmul(weights, value)
return output, weights
def separate_heads(self, x, batch_size):
x = tf.reshape(x, (batch_size, -1, self.num_heads, self.projection_dim))
return tf.transpose(x, perm=[0, 2, 1, 3])
def call(self, inputs):
# x.shape = [batch_size, seq_len, embedding_dim]
batch_size = tf.shape(inputs)[0]
query = self.query_dense(inputs) # (batch_size, seq_len, embed_dim)
key = self.key_dense(inputs) # (batch_size, seq_len, embed_dim)
value = self.value_dense(inputs) # (batch_size, seq_len, embed_dim)
query = self.separate_heads(
query, batch_size
) # (batch_size, num_heads, seq_len, projection_dim)
key = self.separate_heads(
key, batch_size
) # (batch_size, num_heads, seq_len, projection_dim)
value = self.separate_heads(
value, batch_size
) # (batch_size, num_heads, seq_len, projection_dim)
attention, weights = self.attention(query, key, value)
attention = tf.transpose(
attention, perm=[0, 2, 1, 3]
) # (batch_size, seq_len, num_heads, projection_dim)
concat_attention = tf.reshape(
attention, (batch_size, -1, self.embed_dim)
) # (batch_size, seq_len, embed_dim)
output = self.combine_heads(
concat_attention
) # (batch_size, seq_len, embed_dim)
return output
def compute_output_shape(self, input_shape):
# it does not change the shape of its input
return input_shape
class TransformerBlock(layers.Layer):
def __init__(self, embed_dim, num_heads, ff_dim, dropout_rate, name=None):
super(TransformerBlock, self).__init__(name=name)
self.att = MultiHeadSelfAttention(embed_dim, num_heads)
self.ffn = keras.Sequential(
[layers.Dense(ff_dim, activation="relu"), layers.Dense(embed_dim), ]
)
self.layernorm1 = layers.LayerNormalization(epsilon=1e-6)
self.layernorm2 = layers.LayerNormalization(epsilon=1e-6)
self.dropout1 = layers.Dropout(dropout_rate)
self.dropout2 = layers.Dropout(dropout_rate)
def call(self, inputs, training):
attn_output = self.att(inputs)
attn_output = self.dropout1(attn_output, training=training)
out1 = self.layernorm1(inputs + attn_output)
ffn_output = self.ffn(out1)
ffn_output = self.dropout2(ffn_output, training=training)
return self.layernorm2(out1 + ffn_output)
def compute_output_shape(self, input_shape):
# it does not change the shape of its input
return input_shape
class TokenAndPositionEmbedding(layers.Layer):
def __init__(self, maxlen, vocab_size, embed_dim, name=None):
super(TokenAndPositionEmbedding, self).__init__(name=name)
self.token_emb = layers.Embedding(input_dim=vocab_size, output_dim=embed_dim)
self.pos_emb = layers.Embedding(input_dim=maxlen, output_dim=embed_dim)
def call(self, x):
maxlen = tf.shape(x)[-1]
positions = tf.range(start=0, limit=maxlen, delta=1)
positions = self.pos_emb(positions)
x = self.token_emb(x)
return x + positions
def compute_output_shape(self, input_shape):
# it changes the shape from (batch_size, maxlen) to (batch_size, maxlen, embed_dim)
return input_shape + (self.pos_emb.output_dim,)
# Lower level (produce a representation of each sentence):
embed_dim = 100 # Embedding size for each token
num_heads = 2 # Number of attention heads
ff_dim = 64 # Hidden layer size in feed forward network inside transformer
L1_dense_units = 100 # Size of the sentence-level representations output by the word-level model
dropout_rate = 0.1
vocab_size = 1000
class_number = 5
max_docs = 10000
max_sentences = 15
max_words = 60
word_input = layers.Input(shape=(max_words,), name='word_input')
word_embedding = TokenAndPositionEmbedding(maxlen=max_words, vocab_size=vocab_size,
embed_dim=embed_dim, name='word_embedding')(word_input)
word_transformer = TransformerBlock(embed_dim=embed_dim, num_heads=num_heads, ff_dim=ff_dim,
dropout_rate=dropout_rate, name='word_transformer')(word_embedding)
word_pool = layers.GlobalAveragePooling1D(name='word_pooling')(word_transformer)
word_drop = layers.Dropout(dropout_rate, name='word_drop')(word_pool)
word_dense = layers.Dense(L1_dense_units, activation="relu", name='word_dense')(word_drop)
word_encoder = keras.Model(word_input, word_dense)
word_encoder.summary()
# =========================================================================
# Upper level (produce a representation of each document):
L2_dense_units = 100
sentence_input = layers.Input(shape=(max_sentences, max_words), name='sentence_input')
# This is the line producing "NotImplementedError":
sentence_encoder = tf.keras.layers.TimeDistributed(word_encoder, name='sentence_encoder')(sentence_input)
sentence_transformer = TransformerBlock(embed_dim=L1_dense_units, num_heads=num_heads, ff_dim=ff_dim,
dropout_rate=dropout_rate, name='sentence_transformer')(sentence_encoder)
sentence_dense = layers.TimeDistributed(layers.Dense(int(L2_dense_units)),name='sentence_dense')(sentence_transformer)
sentence_out = layers.Dropout(dropout_rate)(sentence_dense)
preds = layers.Dense(class_number , activation='softmax', name='sentence_output')(sentence_out)
model = keras.Model(sentence_input, preds)
model.summary()
#==========================================================================
Everything is OK(for testing you can copy and paste it in googlecolab). But when I compile and fit the model by following codes, it throws an error:
X = tf.random.uniform(shape=(max_docs, max_sentences, max_words), minval=1, maxval=1000, dtype=tf.dtypes.int32, seed=1)
y = tf.random.uniform(shape=(max_docs, ), minval=0, maxval=class_number , dtype=tf.dtypes.int32, seed=1)
y = to_categorical(y)
model.compile(optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"])
history = model.fit(
X, y, batch_size=32, epochs=25,
)
The error is:
ValueError: Shapes (None, 5) and (None, 15, 5) are incompatible
When I had a similar error, I found that a Flatten() layer helped, I had incompatible shapes of (None, x, y) and (None, y).
If you try to provide a flatten layer for the part that gives you the (None, 15, 5), then it should output something like (None, 75).
The flatten layer merely removes dimensions, when I was doing this I got the output as (None, xy) and due to the way Tensorflow works, it was able to match both shapes as xy is obviously a factor of just y.
I am building an Autoencoder model and I have found two snippets of code with the same architecture model which are in Keras and PyTorch. But when I run it, a large difference in time even though they are using the same architecture. Could you please explain why I am encountering such a huge difference in time as well as performance?
PyTorch Code
class Encoder(nn.Module):
def __init__(self, seq_len, n_features, embedding_dim=64):
super(Encoder, self).__init__()
self.seq_len, self.n_features = seq_len, n_features
self.embedding_dim, self.hidden_dim = embedding_dim, 2 * embedding_dim
self.rnn1 = nn.LSTM(
input_size=n_features,
hidden_size=self.hidden_dim,
num_layers=1,
batch_first=True
)
self.rnn2 = nn.LSTM(
input_size=self.hidden_dim,
hidden_size=embedding_dim,
num_layers=1,
batch_first=True
)
def forward(self, x):
x = x.reshape((1, self.seq_len, self.n_features))
x, (_, _) = self.rnn1(x)
x, (hidden_n, _) = self.rnn2(x)
return hidden_n.reshape((self.n_features, self.embedding_dim))
class Decoder(nn.Module):
def __init__(self, seq_len, input_dim=64, n_features=1):
super(Decoder, self).__init__()
self.seq_len, self.input_dim = seq_len, input_dim
self.hidden_dim, self.n_features = 2 * input_dim, n_features
self.rnn1 = nn.LSTM(
input_size=input_dim,
hidden_size=input_dim,
num_layers=1,
batch_first=True
)
self.rnn2 = nn.LSTM(
input_size=input_dim,
hidden_size=self.hidden_dim,
num_layers=1,
batch_first=True
)
self.output_layer = nn.Linear(self.hidden_dim, n_features)
def forward(self, x):
x = x.repeat(self.seq_len, self.n_features)
x = x.reshape((self.n_features, self.seq_len, self.input_dim))
x, (hidden_n, cell_n) = self.rnn1(x)
x, (hidden_n, cell_n) = self.rnn2(x)
x = x.reshape((self.seq_len, self.hidden_dim))
return self.output_layer(x)
class RecurrentAutoencoder(nn.Module):
def __init__(self, seq_len, n_features, embedding_dim=64):
super(RecurrentAutoencoder, self).__init__()
self.encoder = Encoder(seq_len, n_features, embedding_dim).to(device)
self.decoder = Decoder(seq_len, embedding_dim, n_features).to(device)
def forward(self, x):
x = self.encoder(x)
x = self.decoder(x)
return x
model = RecurrentAutoencoder(seq_len, n_features, 128)
import time
time_dict = {}
def train_model(model, train_dataset, val_dataset, n_epochs):
optimizer = torch.optim.Adam(model.parameters(), lr=1e-3)
criterion = nn.L1Loss(reduction='sum').to(device)
history = dict(train=[], val=[])
best_model_wts = copy.deepcopy(model.state_dict())
best_loss = 10000.0
time_dict[0] = time.time()
for epoch in range(1, n_epochs + 1):
model = model.train()
train_losses = []
for seq_true in train_dataset:
optimizer.zero_grad()
seq_true = seq_true.to(device)
seq_pred = model(seq_true)
loss = criterion(seq_pred, seq_true)
loss.backward()
optimizer.step()
train_losses.append(loss.item())
val_losses = []
model = model.eval()
with torch.no_grad():
for seq_true in val_dataset:
seq_true = seq_true.to(device)
seq_pred = model(seq_true)
loss = criterion(seq_pred, seq_true)
val_losses.append(loss.item())
train_loss = np.mean(train_losses)
val_loss = np.mean(val_losses)
history['train'].append(train_loss)
history['val'].append(val_loss)
if val_loss < best_loss:
best_loss = val_loss
best_model_wts = copy.deepcopy(model.state_dict())
print(f'Epoch {epoch}: train loss {train_loss} val loss {val_loss}')
stop = time.time()
time_dict[epoch] = stop
model.load_state_dict(best_model_wts)
return model.eval(), history
model, history = train_model(
model,
train_dataset,
val_dataset,
n_epochs=10)
Output of print(model)
RecurrentAutoencoder( (encoder): Encoder(
(rnn1): LSTM(1, 256, batch_first=True)
(rnn2): LSTM(256, 128, batch_first=True) )
(decoder): Decoder(
(rnn1): LSTM(128, 128, batch_first=True)
(rnn2): LSTM(128, 256, batch_first=True)
(output_layer): Linear(in_features=256, out_features=1, bias=True) ) )
Keras Code
model = keras.Sequential()
model.add(keras.layers.LSTM(
units=256,
input_shape=(X_train.shape[1], X_train.shape[2]), return_sequences=True))
model.add(keras.layers.LSTM(units=128, return_sequences=True))
model.add(keras.layers.LSTM(units=256, return_sequences=True))
model.add(keras.layers.TimeDistributed(keras.layers.Dense(units=X_train.shape[2])))
model.compile(loss='mae', optimizer='adam')
nb_epoch = 10
autoencoder.compile(optimizer='adam',
loss='mean_squared_error')
tensorboard = TensorBoard(log_dir='/tmp/logs',
histogram_freq=0,
write_graph=True, #to visualize
write_images=True)
history = autoencoder.fit(X_train, X_train,
epochs=nb_epoch,
shuffle=True,
validation_data=(X_validate, X_validate),
verbose=1,
callbacks=[tensorboard]).history
Output of print(model.summary())
Layer (type) Output Shape Param #
lstm_6 (LSTM) (None, 1, 256) 266240
lstm_7 (LSTM) (None, 1, 128) 197120
lstm_8 (LSTM) (None, 1, 256) 394240
time_distributed (TimeDistri (None, 1, 3) 771
The architecture of both these models look same, but there is a difference in performance and training time.
/pytorch/aten/src/ATen/native/cudnn/RNN.cpp:1266: UserWarning: RNN module weights are not part of single contiguous chunk of memory.
This means they need to be compacted at every call, possibly greatly increasing memory usage. To compact weights again call flatten_parameters().
Hello. I am using pytorch.
I am trying to use DataParallel function in pytorch,
but the model is LSTM. I'm warned to flatten the model again,
but I don't know when and where to flatten.
Can you let me know?
This is my model
import torch.nn as nn
from torchvision import models
class ConvLstm(nn.Module):
def __init__(self, latent_dim, model, hidden_size, lstm_layers, bidirectional, n_class):
super(ConvLstm, self).__init__()
self.conv_model = Pretrained_conv(latent_dim, model)
self.Lstm = Lstm(latent_dim, hidden_size, lstm_layers, bidirectional)
self.output_layer = nn.Sequential(
nn.Linear(2 * hidden_size if bidirectional ==
True else hidden_size, n_class),
nn.Softmax(dim=-1)
)
def forward(self, x):
batch_size, timesteps, channel_x, h_x, w_x = x.shape
conv_input = x.view(batch_size * timesteps, channel_x, h_x, w_x)
conv_output = self.conv_model(conv_input)
lstm_input = conv_output.view(batch_size, timesteps, -1)
lstm_output = self.Lstm(lstm_input)
lstm_output = lstm_output[:, -1, :]
output = self.output_layer(lstm_output)
return output
class Pretrained_conv(nn.Module):
def __init__(self, latent_dim, model):
if model == 'resnet152':
super(Pretrained_conv, self).__init__()
self.conv_model = models.resnet152(pretrained=True)
# ====== freezing all of the layers ======
for param in self.conv_model.parameters():
param.requires_grad = False
# ====== changing the last FC layer to an output with the size we need. this layer is un freezed ======
self.conv_model.fc = nn.Linear(
self.conv_model.fc.in_features, latent_dim)
def forward(self, x):
return self.conv_model(x)
class Lstm(nn.Module):
def __init__(self, latent_dim, hidden_size, lstm_layers, bidirectional):
super(Lstm, self).__init__()
self.Lstm = nn.LSTM(latent_dim, hidden_size=hidden_size,
num_layers=lstm_layers, batch_first=True, bidirectional=bidirectional)
self.hidden_state = None
def reset_hidden_state(self):
self.hidden_state = None
def forward(self, x):
output, self.hidden_state = self.Lstm(x, self.hidden_state)
return output
Enter LSTM and execute the following code.
def foward_step(model, images, labels, criterion, mode=''):
model.module.Lstm.reset_hidden_state()
if mode == 'test':
with torch.no_grad():
output = model(images)
else:
output = model(images)
loss = criterion(output, labels)
# Accuracy calculation
predicted_labels = output.detach().argmax(dim=1)
acc = (predicted_labels == labels).cpu().numpy().sum()
return loss, acc, predicted_labels.cpu()
This is main
model = nn.DataParallel(model, device_ids=[0,1,2,3]).cuda()