Updating specific rows of a tensor matrix during gradient updation? - python

I have been trying to implement the paper: SeER: An Explainable Deep Learning MIDI-based Hybrid Song Recommender System.
So, what I have been doing is this:
Model Code:
class HybridFactorization(tf.keras.layers.Layer):
# embedding_size is also the number of lstm units
# num_users, num_movies = input_shape
# required_users: (batch_size, embedding_size)
# songs_output: (batch_size, embedding_size)
def __init__(self, embedding_size, num_users, num_tracks):
super(HybridFactorization, self).__init__()
self.embedding_size = embedding_size
self.num_users = num_users
self.num_tracks = num_tracks
self.required_users = None
self.U = self.add_weight("U",
shape=[self.num_users, self.embedding_size],
dtype=tf.float32,
initializer=tf.initializers.GlorotUniform)
self.lstm = tf.keras.layers.LSTM(self.embedding_size)
def call(self, user_index, songs_batch):
output_lstm = self.lstm(songs_batch)
self.required_users = self.U.numpy()
self.required_users = tf.convert_to_tensor(self.required_users[np.array(user_index)],
dtype=tf.float32)
return tf.matmul(self.required_users, output_lstm, transpose_b=True)
class HybridRecommender(tf.keras.Model):
def __init__(self, embedding_size, num_users, num_tracks):
super(HybridRecommender, self).__init__()
self.HybridFactorization = HybridFactorization(embedding_size,
num_users, num_tracks)
def call(self, user_index, songs_batch):
output = self.HybridFactorization(user_index, songs_batch)
return output
Utility Functions and running the model:
def loss_fn(source, target):
mse = tf.keras.losses.MeanSquaredError()
return mse(source, target)
model = HybridRecommender(EMBEDDING_SIZE, num_users, num_tracks)
Xhat = model(user_index, songs_batch)
tf.keras.backend.clear_session()
optimizer = tf.keras.optimizers.Adam()
EPOCHS = 1
for epoch in range(EPOCHS):
start = time.time()
total_loss = 0
for (batch, (input_batch, target_batch)) in enumerate(train_dataset):
songs_batch = create_songs_batch(input_batch)
user_index = input_batch[:, 0].numpy()
X = create_pivot_batch(input_batch, target_batch)
with tf.GradientTape() as tape:
Xhat = model(user_index, songs_batch)
batch_loss = loss_fn(X, Xhat)
variables = model.trainable_variables
gradients = tape.gradient(batch_loss, variables)
optimizer.apply_gradients(zip(gradients, variables))
total_loss += batch_loss
Now, various functions like create_songs_batch(input_batch) and create_pivot_batch(input_batch, target_batch) just provide data in the required format.
My model runs but I get the warning:
WARNING:tensorflow:Gradients do not exist for variables ['U:0'] when minimizing the loss.
Now, I can see why variable U is not being updated as there is no direct path to it.
I want to update some specific rows of U which are mentioned in user_index in every batch call.
Is there a way to do it?

So, I was able to solve the problem by rather than copying some rows of U and trying to solve it. Instead, I used a temporary matrix that is one hot encoded form of user_index and multiplied it with U to desired results and it also removed the results.
Part of code that needs to be modified:
def call(self, user_index, songs_batch):
# output_lstm: (batch_size, emb_sz)
# batch_encoding: (batch_size, num_users)
# required_users: (batch_size, emb_sz)
output_lstm = self.lstm(songs_batch)
user_idx = np.array(user_index)
batch_encoding = np.zeros((user_idx.size, self.num_users))
batch_encoding[np.arange(user_idx.size), user_idx] = 1
batch_encoding = tf.convert_to_tensor(batch_encoding, dtype=tf.float32)
self.required_users = tf.matmul(batch_encoding, self.U)
return tf.matmul(self.required_users, output_lstm, transpose_b=True)

Related

How to train a new custom layer with already trained layer using Subclass Modelling?

The model MIMOSystem consist of many custom layers out of which only one layer is trainable, that is NeuralReceiver as shown below. After training the MIMOSystem model, I save the weights of it (weights belong to NeuralReceiver layer only). Now I want to load the weights onto second model MIMOSystem2 since it also has NeuralReceiver layer and freeze it. And train the MIMOSystem2 with already trained NeuralReceiver layer and new trainable layer NN_decoder. How do I load weights to NeuralReceiver() layer of MIMOSystem2 model and freeze it?
class MIMOSystem(Model): # Inherits from Keras Model
def __init__(self, training):
super(MIMOSystem, self).__init__()
self.training = training
self.constellation = Constellation("qam", num_bits_per_symbol)
self.mapper = Mapper(constellation=self.constellation)
self.demapper = Demapper("app",constellation=self.constellation)
self.binary_source = BinarySource()
self.channel = ApplyFlatFadingChannel(add_awgn=True)
self.neural_receiver = NeuralReceiver() # the only trainable layer
self.encoder = encoder = LDPC5GEncoder(k, n)
self.decoder = LDPC5GDecoder(encoder, hard_out=True)
self.bce = tf.keras.losses.BinaryCrossentropy(from_logits=False)
self.acc = tf.keras.metrics.BinaryAccuracy()
#tf.function
def __call__(self, batch_size, ebno_db):
if self.training:
coderate = 1.0
codewords = self.binary_source([batch_size, num_tx_ant, k])
else:
coderate = k/n
bits = self.binary_source([batch_size, num_tx_ant, k])
codewords = self.encoder(bits)
x = self.mapper(codewords)
no = ebnodb2no(ebno_db,num_bits_per_symbol,coderate)
channel_shape = [tf.shape(x)[0], num_rx_ant, num_tx_ant]
h = complex_normal(channel_shape)
y = self.channel([x, h, no])
x_hat, no_eff = self.neural_receiver(y,h) # custom trainable layer
llr = self.demapper([x_hat, no_eff])
if self.training:
bits_hat = tf.nn.sigmoid(llr)
loss = self.bce(codewords, bits_hat)
acc = self.acc(codewords, bits_hat)
return loss, acc
else:
bits_hat = self.decoder(llr)
return bits, bits_hat
The trainable layer NeuralReceiver() consist of few sublayers, only two mentioned to give an idea.
class NeuralReceiver(Layer):
def __init__(self):
super().__init__()
self.relu_layer = relu_layer()
self.sign_layer = sign_layer()
def __call__(self, y_, H_):
return x_hat, no_eff
MIMOSystem2 would have a freezed layer NeuralReceiver and trainable layer NN_decoder
class MIMOSystem2(Model): # Inherits from Keras Model
def __init__(self, training):
super(MIMOSystem2, self).__init__()
self.training = training
self.constellation = Constellation("qam", num_bits_per_symbol)
self.mapper = Mapper(constellation=self.constellation)
self.demapper = Demapper("app",constellation=self.constellation)
self.binary_source = BinarySource()
self.channel = ApplyFlatFadingChannel(add_awgn=True)
self.neural_receiver = NeuralReceiver() # the frozen layer
self.encoder = encoder = LDPC5GEncoder(k, n)
self.NN_decoder = NN_decoder() # new trainable layer
self.bce = tf.keras.losses.BinaryCrossentropy(from_logits=False)
self.acc = tf.keras.metrics.BinaryAccuracy()
#tf.function
def __call__(self, batch_size, ebno_db):
coderate = k/n
bits = self.binary_source([batch_size, num_tx_ant, k])
codewords = self.encoder(bits)
x = self.mapper(codewords)
no = ebnodb2no(ebno_db,num_bits_per_symbol,coderate)
channel_shape = [tf.shape(x)[0], num_rx_ant, num_tx_ant]
h = complex_normal(channel_shape)
y = self.channel([x, h, no])
x_hat, no_eff = self.neural_receiver(y,h) # already trainable layer to be frozen
llr = self.demapper([x_hat, no_eff])
bits_hat = self.NN_decoder(llr) # new trainable layer
if self.training:
loss = self.bce(codewords, bits_hat)
acc = self.acc(codewords, bits_hat)
return loss, acc
else:
return bits, bits_hat
for layer in model1.layers[-1].submodules:
layer.trainable = False
#Now append your model, after which node you wanna append your node mention that, I am appending after the last node, So I wrote -1.
x= model1.layers-1
x = tf.keras.layers.Dense(...)(x)
...
...
...
model = tf.keras.Model(inputs, x)

tensorflow autodiff slower than pytorch's counterpart

I am using tensorflow 2.0 and trying to evaluate gradients for backpropagating to a simple feedforward neural network. Here's how my model looks like:
def __init__(self, input_size, output_size):
inputs = tf.keras.Input(shape=(input_size,))
hidden_layer1 = tf.keras.layers.Dense(30, activation='relu')(inputs)
outputs = tf.keras.layers.Dense(output_size)(hidden_layer1)
self.model = tf.keras.Model(inputs=inputs, outputs=outputs)
self.optimizer = tf.keras.optimizers.Adam(learning_rate=0.001)
self.loss_function = tf.keras.losses.Huber()
The forward pass to this network is fine but when I use gradient tape to train the model, it is at least 10x slower than PyTorch.
Training function:
def learn_modified_x(self, inputs, targets, actions):
with tf.GradientTape() as tape:
predictions = self.model(inputs)
predictions_for_action = gather_single_along_axis(predictions, actions)
loss = self.loss_function(targets, predictions_for_action)
grads = tape.gradient(loss, self.model.trainable_weights)
self.optimizer.apply_gradients(zip(grads, self.model.trainable_weights))
I tried commenting lines to find what is actually causing the problem. I discovered that tape.gradient is a significant contributor to this situation.
Any idea?
PyTorch implementation
def __init__(self, input_size, nb_action):
super(Network, self).__init__()
self.input_size = input_size
self.nb_action = nb_action
self.fc1 = nn.Linear(input_size, 30)
self.fc2 = nn.Linear(30, nb_action)
def forward(self, state):
x = F.relu(self.fc1(state))
q_values = self.fc2(x)
return q_values
def learn(self, batch_state, batch_next_state, batch_reward, batch_action):
outputs = self.model(batch_state).gather(1, batch_action.unsqueeze(1)).squeeze(1)
next_outputs = self.model(batch_next_state).detach().max(1)[0]
target = self.gamma*next_outputs + batch_reward
td_loss = F.smooth_l1_loss(outputs, target)
self.optimizer.zero_grad()
td_loss.backward(retain_variables = True)
self.optimizer.step()
def __init__(self,...):
...
self.model.call = tf.function(self.model.call)
...
you need use tf.function to wrap your model's call function.

Pytorch - How to add a self-attention to another architecture

I'm a beginner with pytorch framework and I'm trying to add a multiheaded self attention on top of another architecture (BERT) (this is a simple question but I'm not familiar with PyTorch):
UPDATE 1
import math
class PositionalEncoding(nn.Module):
def __init__(self, d_model, dropout=0.1, max_len=5000):
super(PositionalEncoding, self).__init__()
self.dropout = nn.Dropout(p=dropout)
self.d_model = d_model
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0).transpose(0, 1)
self.register_buffer('pe', pe)
def forward(self, x, seq_len = 768, mask = None):
pos_emb = self.pe[:, :seq_len]
x = x * mask[:, :, None].float()
x = x + pos_emb
return x
The problem in how to add the transformer is in the following class:
class CamemBERTQA(nn.Module):
def __init__(self,bert_type, hidden_size, num_labels, num_inter_layers=1, heads = 12, do_lower_case = True):
super(CamemBERTQA, self).__init__()
self.do_lower_case = do_lower_case
self.bert_type = bert_type
self.hidden_size = hidden_size
self.num_labels = num_labels
self.num_inter_layers = num_inter_layers
self.camembert = CamembertModel.from_pretrained(self.bert_type)
# ---------------- Transformer ------------------------------------------
self.d_model = self.hidden_size # 768
dropout = 0.1
self.pos_emb = PositionalEncoding(d_model = self.d_model, dropout = dropout)
self.transformer_inter = nn.ModuleList(
[nn.TransformerEncoderLayer(d_model = self.d_model, nhead = heads, dim_feedforward = 2048, dropout = dropout)
for _ in range(num_inter_layers)])
# ---------------- Transformer ------------------------------------------
self.qa_outputs = nn.Linear(self.hidden_size, self.num_labels)
def forward(self, input_ids, mask=None):
bert_output = self.camembert(input_ids = input_ids) # input_ids is a tensor
# ---------------- Transformer ------------------------------------------
seq_len = self.hidden_size
x = self.pos_emb(x = bert_output, seq_len = seq_len, mask = None)
for i in range(self.num_inter_layers):
x = self.transformer_inter[i](i, x, x, 1 - mask) # all_tokens * max_tokens * dim
output = self.layer_norm(x)
# ---------------- Transformer ------------------------------------------
sequence_output = output[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
outputs = (start_logits, end_logits,)
return x
Thank you so much.
So it seems that you're trying to add a Transformer network on top of the BERT component. It has to be mentioned that the self-attention network is only a part of the Transformer network, meaning that Transformers have other components besides self-attention as well. I would recommend using the Transformer (which has the self-attention component included) as an encoder that receives BERT vectors and transforms them into another representation (in another space).
Try this instead of self.attention = MultiHeadAttention():
self.transformer_inter = nn.ModuleList(
[TransformerEncoderLayer(d_model, heads, d_ff, dropout)
for _ in range(num_inter_layers)])
and then in forward(), call self.transformer_inter through a loop which will give you the representations produced by Transformer architecture. Like this:
def forward(self, bert_output, mask):
batch_size, seq_len = bert_output.size(0), bert_output.size(1)
# Transformer Encoder
pos_emb = self.pos_emb.pe[:, :seq_len]
x = bert_output * mask[:, :, None].float()
x = x + pos_emb
for i in range(self.num_inter_layers):
x = self.transformer_inter[i](i, x, x, 1 - mask) # all_tokens * max_tokens * dim
x = self.layer_norm(x) # Transformer also normalizes the outputs from each layer.
# x is the encoded vectors by Transformer encoder
return x
Then using a nn.Linear(.) layer, do another transformation to map the hidden_size to the number of labels for your task, which will give you the logits for each label. These all should be done within BERT class that you have posted.
Note that the TransformerEncoderLayer is a placeholder class that I used above. So you have to either implement it or use open source packages. As Transformers are quite well-known, I think you won't have trouble finding an implementation of it.

pytorch, Using nn.DataParallel in LSTM

/pytorch/aten/src/ATen/native/cudnn/RNN.cpp:1266: UserWarning: RNN module weights are not part of single contiguous chunk of memory.
This means they need to be compacted at every call, possibly greatly increasing memory usage. To compact weights again call flatten_parameters().
Hello. I am using pytorch.
I am trying to use DataParallel function in pytorch,
but the model is LSTM. I'm warned to flatten the model again,
but I don't know when and where to flatten.
Can you let me know?
This is my model
import torch.nn as nn
from torchvision import models
class ConvLstm(nn.Module):
def __init__(self, latent_dim, model, hidden_size, lstm_layers, bidirectional, n_class):
super(ConvLstm, self).__init__()
self.conv_model = Pretrained_conv(latent_dim, model)
self.Lstm = Lstm(latent_dim, hidden_size, lstm_layers, bidirectional)
self.output_layer = nn.Sequential(
nn.Linear(2 * hidden_size if bidirectional ==
True else hidden_size, n_class),
nn.Softmax(dim=-1)
)
def forward(self, x):
batch_size, timesteps, channel_x, h_x, w_x = x.shape
conv_input = x.view(batch_size * timesteps, channel_x, h_x, w_x)
conv_output = self.conv_model(conv_input)
lstm_input = conv_output.view(batch_size, timesteps, -1)
lstm_output = self.Lstm(lstm_input)
lstm_output = lstm_output[:, -1, :]
output = self.output_layer(lstm_output)
return output
class Pretrained_conv(nn.Module):
def __init__(self, latent_dim, model):
if model == 'resnet152':
super(Pretrained_conv, self).__init__()
self.conv_model = models.resnet152(pretrained=True)
# ====== freezing all of the layers ======
for param in self.conv_model.parameters():
param.requires_grad = False
# ====== changing the last FC layer to an output with the size we need. this layer is un freezed ======
self.conv_model.fc = nn.Linear(
self.conv_model.fc.in_features, latent_dim)
def forward(self, x):
return self.conv_model(x)
class Lstm(nn.Module):
def __init__(self, latent_dim, hidden_size, lstm_layers, bidirectional):
super(Lstm, self).__init__()
self.Lstm = nn.LSTM(latent_dim, hidden_size=hidden_size,
num_layers=lstm_layers, batch_first=True, bidirectional=bidirectional)
self.hidden_state = None
def reset_hidden_state(self):
self.hidden_state = None
def forward(self, x):
output, self.hidden_state = self.Lstm(x, self.hidden_state)
return output
Enter LSTM and execute the following code.
def foward_step(model, images, labels, criterion, mode=''):
model.module.Lstm.reset_hidden_state()
if mode == 'test':
with torch.no_grad():
output = model(images)
else:
output = model(images)
loss = criterion(output, labels)
# Accuracy calculation
predicted_labels = output.detach().argmax(dim=1)
acc = (predicted_labels == labels).cpu().numpy().sum()
return loss, acc, predicted_labels.cpu()
This is main
model = nn.DataParallel(model, device_ids=[0,1,2,3]).cuda()

Keras/Tf Custom Loss Function at the level of tensor entries

I would like to write a custom loss function in Keras that takes in a tensor of predicted values and then for each process it through another function before adding to the loss. Here is what this looks like. This is a complete code example, please look at the function sample_loss.
class AE():
def __init__(self,inputSize=78,numNeurons = 50, latentSize=5,expSize = 10,batchSize = 1, activation="relu"):
K.clear_session()
self.inputSize = inputSize
self.latentSize = latentSize
self.activation = activation
self.numNeurons = numNeurons
self.expSize = expSize
self.batchSize = batchSize
self.encoder = self.createEncoder()
self.decoder = self.createDecoder()
self.filterModel = self.createFilterModel()
self.AE = self.createAE()
def sample_loss(self,y_pred ):
loss = 0.0
for j in tf.range(0,self.inputSize ,2):
pt = y_pred[j:j+2]
loss += K.sum(self.filterModel.predict(pt))
return loss
def createEncoder(self):
# create the encoder
xIn = Input(shape=(self.inputSize,), name="data_in")
y_train = Input(shape=(self.latentSize,), name='y_train')
x = Dense(self.numNeurons,activation=self.activation)(xIn)
xlatent = Dense(self.latentSize,activation=self.activation)(x)
# create the encoder
encoder = Model(xIn,xlatent)
return encoder
def createDecoder(self):
# create the decoder
latentIn = Input(shape=(self.latentSize,))
x = Dense(self.numNeurons,activation=self.activation)(latentIn)
out = Dense(self.inputSize,activation="linear")(x)
# create a decoder
decoder = Model(latentIn,out)
return decoder
def createAE(self):
xIn = Input(shape=(self.inputSize,), name="ae_data_in")
# create the total model
latentOutput = self.encoder(xIn)
dataOutput = self.decoder(latentOutput)
model = Model(inputs=xIn,outputs=dataOutput)
model.add_loss( self.sample_loss( dataOutput) )
model.summary()
return model
def createFilterModel(self):
xIn = Input(shape=(2,), name="filter_data_in")
x = Dense(4, activation='sigmoid')(xIn)
x = Dense(1, activation='sigmoid')(x)
model = Model(xIn,x)
model.compile(optimizer='adam', loss='binary_crossentropy')
return model
modelAE = AE()
modelAE.AE.compile(optimizer='adam',loss="mse", metrics=['accuracy'])
So I have a dictionary of models, filterModels. These are actually predictive models in Keras. For each model in this dictionary, I want to pass it the corresponding part of y_pred and then add its output to the loss.
How can I do this?
Here is the error the current code gives me:
'''
ValueError: When feeding symbolic tensors to a model, we expect the tensors to have a static batch size. Got tensor with shape: (None, 78)
'''

Categories

Resources