Debugging Neural Network's feedforward propagation - python

I am implementing a Neural Network's forward propagation.
This is my train method:
def train(self, x, y):
for layer in self.layers:
prediction = layer.forward_prop(x.T) # column vector
and this is the forward_prop method:
def forward_prop(self, x):
if self.is_input:
self.out = x
else:
# bug here: x has to be updated according to previous layer's output!
w_sum = np.dot(self.weight, x) + self.bias
self.out = self.activation(w_sum)
return self.out
There is a error, since layer 2 is trying to multiply self.weight and x instead of the previous layer's output:
ValueError: shapes (4,4) and (2,1) not aligned: 4 (dim 1) != 2 (dim 0)
That is I want:
Layer 0 (input layer): returns z^(0) = x
Layer 1 (first hidden layer): returns z^(1) = sig(W^(1) # z^(0) + b^(1))
Layer 2 (second hidden layer): returns z^(2) = sig(W^(2) # z^(1) + b^(2))
Layer 3 (output): returns z^(3) = sig(W^(3) # z^(2) + b^(3))
...
Layer i: returns z^(i) = sig(W^(i) # z^(i-1) + b^(i))
...
How can I store the previous layer's output?
This is my replicable example:
import numpy as np
def sigmoid(x):
return 1 / (1 + np.exp(-x))
class NeuralNetwork:
def __init__(self):
self.layers = []
def add_layer(self, layer):
self.layers.append(layer)
def create(self):
for i, layer in enumerate(self.layers):
if i == 0:
layer.is_input = True
else:
layer.init_parameters(self.layers[i - 1].neurons)
def summary(self):
for i, layer in enumerate(self.layers):
print("Layer", i)
print("neurons:", layer.neurons)
print("is_input:", layer.is_input)
print("act:", layer.activation)
print("weight:", np.shape(layer.weight))
print(layer.weight)
print("bias:", np.shape(layer.bias))
print(layer.bias)
print("")
def train(self, x, y):
for layer in self.layers:
prediction = layer.forward_prop(x.T) # column vector
class Layer:
def __init__(self, neurons, is_input=False, activation=None):
self.out = None
self.weight = None
self.bias = None
self.neurons = neurons
self.is_input = is_input
self.activation = activation
def init_parameters(self, prev_layer_neurons):
self.weight = np.asmatrix(np.random.normal(0, 0.5, (self.neurons, prev_layer_neurons)))
self.bias = np.asmatrix(np.random.normal(0, 0.5, self.neurons)).T # column vector
if self.activation is None:
self.activation = sigmoid
def forward_prop(self, x):
if self.is_input:
self.out = x
else:
w_sum = np.dot(self.weight, x) + self.bias # Bug
self.out = self.activation(w_sum)
return self.out
if __name__ == '__main__':
net = NeuralNetwork()
d = 2 # input dimension
c = 1 # output
for m in (d, 4, 4, c):
layer = Layer(m)
net.add_layer(layer)
net.create()
# net.summary() # dbg
# Training set
X = np.asmatrix([
[0, 0],
[0, 1],
[1, 0],
[1, 1]
])
y = np.asarray([0, 0, 1, 0])
net.train(X[2], y[2])

I simply solved this way:
def train(self, x, y):
z = x.T
for layer in self.layers:
z = layer.forward_prop(z)

Related

Combining two pytorch functions into one

I have two functions like this (code source of the functions is here):
device = torch.device('cuda')
dataset = TUDataset(root='/tmp/MUTAG', name='MUTAG', use_node_attr=True)
loader = DataLoader(dataset, batch_size=32, shuffle=True)
train_dataset = dataset #just for testing
val_dataset = dataset
test_dataset = dataset
graph_train_loader = DataLoader(train_dataset, batch_size=8, shuffle=True)
graph_val_loader = DataLoader(val_dataset, batch_size=8)
gnn_layer_by_name = {
"GCN": geom_nn.GCNConv,
"GAT": geom_nn.GATConv,
"GraphConv": geom_nn.GraphConv
}
class GCNLayer(nn.Module):
def __init__(self, c_in, c_out):
super().__init__()
self.projection = nn.Linear(c_in, c_out)
def forward(self, node_feats, adj_matrix):
num_neighbours = adj_matrix.sum(dim=-1, keepdims=True)
node_feats = self.projection(node_feats)
node_feats = torch.bmm(adj_matrix, node_feats)
node_feats = node_feats / num_neighbours
return node_feats
class GNNModel(nn.Module)
def __init__(self, c_in, c_hidden, c_out, num_layers, activation_function, optimizer_name, learning_rate, dp_rate_linear,layer_name="GCN", **kwargs):
super().__init__()
gnn_layer = gnn_layer_by_name[layer_name]
layers = []
activation_function = eval(activation_function) ##not great to use
in_channels, out_channels = c_in, c_hidden
for l_idx in range(num_layers-1):
layers += [
gnn_layer(in_channels=in_channels,
out_channels=out_channels,
**kwargs),
activation_function,
nn.Dropout(p=dp_rate_linear)
]
in_channels = c_hidden
layers += [gnn_layer(in_channels=in_channels,
out_channels=c_out,
**kwargs)]
self.layers = nn.ModuleList(layers)
def forward(self, x, edge_index):
for l in self.layers:
if isinstance(l, geom_nn.MessagePassing):
x = l(x, edge_index)
else:
x = l(x)
return x
class GraphGNNModel(nn.Module):
def __init__(self, c_in, c_hidden, c_out, dp_rate_linear,**kwargs):
super().__init__()
self.GNN = GNNModel(c_in=c_in,
c_hidden=c_hidden,
c_out=c_hidden,
dp_rate_linear = dp_rate_linear,
**kwargs)
self.head = nn.Sequential(
nn.Dropout(p=dp_rate_linear),
nn.Linear(c_hidden, c_out)
)
def forward(self, x, edge_index, batch_idx):
x = self.GNN(x, edge_index)
x = geom_nn.global_mean_pool(x, batch_idx)
x = self.head(x)
return x
As you can see, I really don't need GNNModel and GraphGNNModel to be two separate functions, the second function is just adding a sequential layer to the end of the first function.
I tried combining the functions by doing:
class GNNModel(nn.Module):
def __init__(self, c_in, c_hidden, c_out, num_layers, activation_function, optimizer_name, learning_rate, dp_rate_linear,layer_name="GCN" ,**kwargs):
"""
Inputs:
c_in - Dimension of input features
c_hidden - Dimension of hidden features
c_out - Dimension of the output features. Usually number of classes in classification
num_layers - Number of "hidden" graph layers
layer_name - String of the graph layer to use
dp_rate_linear - Dropout rate to apply throughout the network
kwargs - Additional arguments for the graph layer (e.g. number of heads for GAT; i'm not using gat here)
activation_function - Activation function
"""
super().__init__()
gnn_layer = gnn_layer_by_name[layer_name]
layers = []
activation_function = eval(activation_function) ##not great to use
in_channels, out_channels = c_in, c_hidden
for l_idx in range(num_layers-1):
layers += [
gnn_layer(in_channels=in_channels,
out_channels=out_channels,
**kwargs),
activation_function,
nn.Dropout(p=dp_rate_linear)
]
in_channels = c_hidden
layers += [gnn_layer(in_channels=in_channels,
out_channels=c_out,
**kwargs)]
self.layers = nn.ModuleList(layers)
self.head = nn.Sequential(
nn.Dropout(p=dp_rate_linear),
nn.Linear(c_hidden, c_out)
)
def forward(self, x, edge_index):
for l in self.layers:
if isinstance(l, geom_nn.MessagePassing): #passing data between conv
x = l(x, edge_index) #what is this
else:
x = l(x)
x = self.GNN(x, edge_index)
x = geom_nn.global_mean_pool(x, batch_idx)
x = self.head(x)
return x
But I get the error:
TypeError: forward() takes 3 positional arguments but 4 were given
Could someone show me the correct way to combine these (the exact explanation of the code is in the Graph level tasks/graph classification of here?
Try adding batch_idx as param in your new forward function. I noted some other inconsistencies like, where is geom_nn being passed to the function? you probably want to use self.geom_nn, and for that you need to fix the __init__() part as well.
def forward(self, x, edge_index, batch_idx): #here you must pass batch_idx
for l in self.layers:
if isinstance(l, geom_nn.MessagePassing): #passing data between conv
x = l(x, edge_index) #what is this
else:
x = l(x)
x = self.GNN(x, edge_index)
x = geom_nn.global_mean_pool(x, batch_idx) #here you use batch_idx
#where is geom_nn coming from???
x = self.head(x)
return x

Training loss increases instead of decrease with epochs

I am developing from scratch my first feed-forward fully-connected ANN based on batch learning mode on a toy training set. I am using back-propagation for calculating the gradient of the loss function with respect to weights and biases and using the gradient descent method as a learning rule. But when I print the training loss, it gets bigger as the epoch increases:
E(0) on TrS is: [[7.83898769]]
E(1) on TrS is: [[10.00738465]]
E(2) on TrS is: [[10.76653098]]
E(3) on TrS is: [[15.94001008]]
E(4) on TrS is: [[23.80650667]]
E(5) on TrS is: [[28.65805023]]
E(6) on TrS is: [[29.56550719]]
E(7) on TrS is: [[30.5424694]]
E(8) on TrS is: [[34.26980112]]
E(9) on TrS is: [[39.9948856]]
This is my loss_functions.py file:
import numpy as np
def sum_of_squares(c, t, y, derivative=False):
ret = 0
for k in range(c):
ret += np.square(y - t)
ret = 1 / 2 * ret
if derivative:
return y - t
return ret
this is my activation_functions.py file:
import numpy as np
def sigmoid(a, derivative=False):
f_a = 1 / (1 + np.exp(-a))
df_a = np.multiply(f_a, (1 - f_a))
if derivative:
return df_a
return f_a
def identity(a, derivative=False):
f = a
df = np.ones(np.shape(a))
if derivative:
return df
return f
and this is the main.py file:
from activation_functions import *
from loss_functions import *
class NeuralNetwork:
def _init_(self):
self.layers = []
def add_layer(self, layer):
self.layers.append(layer)
def create(self):
for i, layer in enumerate(self.layers):
if i == 0:
layer.type = "input"
else:
if i == len(self.layers) - 1:
layer.type = "output"
else:
layer.type = "hidden"
layer.configure(self.layers[i - 1].neurons)
def train(self, X, targets):
MAX_EPOCHS = 10
loss_function = sum_of_squares
E = 0 # errore sull'intero DS
for epoch in range(MAX_EPOCHS):
for i, x in enumerate(X):
target = targets[i]
prediction = self.forward_prop(x.T)
E_n = loss_function(c, target, prediction)
E += E_n
self.back_prop(target, local_loss=sum_of_squares)
print("E(%d) on TrS is:" % epoch, E) # increasing!!!
self.learning_rule(l_rate=0.05)
def forward_prop(self, z):
for layer in self.layers:
z = layer.forward_prop_step(z)
return z
def back_prop(self, target, local_loss):
for i, layer in enumerate(self.layers[:0:-1]):
next_layer = self.layers[-i]
prev_layer = self.layers[-i - 2]
layer.back_prop_step(next_layer, prev_layer, target, local_loss)
def learning_rule(self, l_rate):
# GD
for layer in self.layers:
if layer.type != "input":
layer.weight -= l_rate * layer.dE_dW
layer.bias -= l_rate * layer.dE_db
class Layer:
def _init_(self, neurons, type=None, activation=None):
self.dE_dW = 0
self.dE_db = 0
self.dEn_db = None # based on the n-th item
self.dEn_dW = None # based on the n-th item
self.dact_a = None
self.out = None
self.weight = None
self.bias = None
self.w_sum = None
self.neurons = neurons
self.type = type
self.activation = activation
self.deltas = None
def configure(self, prev_layer_neurons):
self.weight = np.asmatrix(np.random.normal(0, 0.5, (self.neurons, prev_layer_neurons)))
self.bias = np.asmatrix(np.random.normal(0, 0.5, self.neurons)).T
if self.activation is None:
if self.type == "hidden":
self.activation = sigmoid
elif self.type == "output":
self.activation = identity
def forward_prop_step(self, z):
if self.type == "input":
self.out = z
else:
self.w_sum = np.dot(self.weight, z) + self.bias
self.out = self.activation(self.w_sum)
return self.out
def back_prop_step(self, next_layer, prev_layer, target, local_loss):
if self.type == "input":
pass
elif self.type == "output":
self.dact_a = self.activation(self.w_sum, derivative=True)
self.deltas = np.multiply(self.dact_a, local_loss(c, target, self.out, derivative=True))
else:
self.dact_a = self.activation(self.w_sum, derivative=True)
self.deltas = np.multiply(self.dact_a, np.dot(next_layer.weight.T, next_layer.deltas))
self.dEn_dW = np.dot(self.deltas, prev_layer.out.T)
self.dEn_db = self.deltas
self.dE_dW += self.dEn_dW
self.dE_db += self.dEn_db
if _name_ == '_main_':
net = NeuralNetwork()
for m in (2, 4, 4, 1):
net.add_layer(Layer(m))
net.create()
X = np.asmatrix([
[1, 0],
[1, 1],
[0, 1],
[0, 0]
])
targets = np.asarray([1, 0, 0, 0])
net.train(X, targets)
What I did for trying to fix the problem is:
Check for any bug
Decrease the learning rate (l_rate)
Increase MAX_EPOCHS value
Replace - symbol to + in GD formula
Unfortunately none of these worked. There must be a hidden bug somewhere in the code...
How can I resolve the issue?
Your task is about classification meanwhile you have chose the sum-of-squares loss function which works properly for regression tasks. You should use the cross-entropy loss function, instead.

how to convert k.variable, addweights to pytorch code

i have a code written by tensorflow code i need to convert to pytorch.
i think i almost done with this but i'm not sure about detail.
first i have
class adaptive_implicit_trans(layers.Layer):
def __init__(self, **kwargs):
super(adaptive_implicit_trans, self).__init__(**kwargs)
def build(self, input_shape):
conv_shape = (1,1,64,64)
self.it_weights = self.add_weight(
shape = (1,1,64,1),
initializer = initializers.get('ones'),
constraint = constraints.NonNeg(),
name = 'ait_conv')
kernel = np.zeros(conv_shape)
r1 = sqrt(1.0/8)
r2 = sqrt(2.0/8)
for i in range(8):
_u = 2*i+1
for j in range(8):
_v = 2*j+1
index = i*8+j
for u in range(8):
for v in range(8):
index2 = u*8+v
t = cos(_u*u*pi/16)*cos(_v*v*pi/16)
t = t*r1 if u==0 else t*r2
t = t*r1 if v==0 else t*r2
kernel[0,0,index2,index] = t
self.kernel = k.variable(value = kernel, dtype = 'float32')
def call(self, inputs):
self.kernel = self.kernel*self.it_weights
y = k.conv2d(inputs,
self.kernel,
padding = 'same',
data_format='channels_last')
return y
def compute_output_shape(self, input_shape):
return input_shape
and i need to convert it to pytorch code.
so i made this.
class It_Weight(nn.Module):
def __init__(self):
super().__init__()
self.ReLU = nn.ReLU()
self.it_weights = nn.Parameter(torch.autograd.Variable(torch.ones((1, 64, 1, 1)),
requires_grad=True))
def forward(self, input):
y = input.to('cuda') * self.it_weights
return self.ReLU(y)
def compute_output_shape(self, input_shape):
return input_shape
class Kernel(nn.Module):
def __init__(self):
super().__init__()
conv_shape = (64, 64, 1, 1)
kernel = torch.zeros(conv_shape).cuda()
r1 = sqrt(1.0 / 8)
r2 = sqrt(2.0 / 8)
for i in range(8):
_u = 2 * i + 1
for j in range(8):
_v = 2 * j + 1
index = i * 8 + j
for u in range(8):
for v in range(8):
index2 = u * 8 + v
t = cos(_u * u * pi / 16) * cos(_v * v * pi / 16)
t = t * r1 if u == 0 else t * r2
t = t * r1 if v == 0 else t * r2
kernel[index, index2, 0, 0] = t
self.kernel = torch.autograd.Variable(kernel)
def forward(self):
return self.kernel
def compute_output_shape(self, input_shape):
return input_shape
class adaptive_implicit_trans(nn.Module):
def __init__(self):
super().__init__()
self.it_weights = It_Weight()
self.kernel = Kernel()
def forward(self, inputs):
self.kernel1 = self.it_weights(self.kernel()) # 출력도 kernel사이즈랑 동일
y = F.conv2d(inputs, self.kernel1)
return y
def compute_output_shape(self, input_shape):
return input_shape
the number of class was increased but it was my best in order to avoid error.
i don't know where is wrong.
i have been trying to train this pytorch code. but performance is lower than tensorflow code.
where should i edit ??
please help me.

Tensorflow 2.2.0 Keras Subclass Model can train and predict, but throws exception when save:Dimension size must be evenly divisible X

This Model is a variety of CNN and uses Causal Dilational Convolution Layer.
I can train and predict with 0 error, but when I use model.save() to save model, it throws Exception.
So I use save_weights and load_weights to save and load model.
I wonder why this error appears:
model.save("path")
out:
ValueError: Dimension size must be evenly divisible by 2 but is 745 for '{{node conv1d_5/SpaceToBatchND}} = SpaceToBatchND[T=DT_FLOAT, Tblock_shape=DT_INT32, Tpaddings=DT_INT32](conv1d_5/Pad, conv1d_5/SpaceToBatchND/block_shape, conv1d_5/SpaceToBatchND/paddings)' with input shapes: [?,745,32], [1], [1,2] and with computed input tensors: input[1] = <2>, input[2] = <[0 0]>.
Input shape is (None,743,27)
Output shape is (None,24,1)
def slice(x, seq_length):
return x[:, -seq_length:, :]
class ResidualBlock(tf.keras.layers.Layer):
def __init__(self, n_filters, filter_width, dilation_rate):
super(ResidualBlock, self).__init__()
self.n_filters = n_filters
self.filter_width = filter_width
self.dilation_rate = dilation_rate
# preprocessing - equivalent to time-distributed dense
self.x = Conv1D(32, 1, padding='same', activation='relu')
# filter convolution
self.x_f = Conv1D(filters=n_filters,
kernel_size=filter_width,
padding='causal',
dilation_rate=dilation_rate,
activation='tanh')
# gating convolution
self.x_g = Conv1D(filters=n_filters,
kernel_size=filter_width,
padding='causal',
dilation_rate=dilation_rate,
activation='sigmoid')
# postprocessing - equivalent to time-distributed dense
self.z_p = Conv1D(32, 1, padding='same', activation='relu')
def call(self, inputs):
x = self.x(inputs)
f = self.x_f(x)
g = self.x_g(x)
z = tf.multiply(f, g)
z = self.z_p(z)
return tf.add(x, z), z
def get_config(self):
config = super(ResidualBlock, self).get_config()
config.update({"n_filters": self.n_filters,
"filter_width": self.filter_width,
"dilation_rate": self.dilation_rate})
return config
class WaveNet(tf.keras.Model):
def __init__(self, n_filters=32, filter_width=2, dilation_rates=None, drop_out=0.2, pred_length=24):
super().__init__(name='WaveNet')
# Layer Parameter
self.n_filters = n_filters
self.filter_width = filter_width
self.drop_out = drop_out
self.pred_length = pred_length
if dilation_rates is None:
self.dilation_rates = [2 ** i for i in range(8)]
else:
self.dilation_rates = dilation_rates
# Layer
self.residual_stacks = []
for dilation_rate in self.dilation_rates:
self.residual_stacks.append(ResidualBlock(self.n_filters, self.filter_width, dilation_rate))
# self.add = Add()
self.cut = Lambda(slice, arguments={'seq_length': pred_length})
self.conv_1 = Conv1D(128, 1, padding='same')
self.relu = Activation('relu')
self.drop = Dropout(drop_out)
self.skip = Lambda(lambda x: x[:, -2 * pred_length + 1:-pred_length + 1, :1])
self.conv_2 = Conv1D(1, 1, padding='same')
def _unroll(self, inputs, **kwargs):
outputs = inputs
skips = []
for residual_block in self.residual_stacks:
outputs, z = residual_block(outputs)
skips.append(z)
outputs = self.relu(Add()(skips))
outputs = self.cut(outputs)
outputs = self.conv_1(outputs)
outputs = self.relu(outputs)
outputs = self.drop(outputs)
outputs = Concatenate()([outputs, self.skip(inputs)])
outputs = self.conv_2(outputs)
outputs = self.cut(outputs)
return outputs
def _get_output(self, input_tensor):
pass
def call(self, inputs, training=False, **kwargs):
if training:
return self._unroll(inputs)
else:
return self._get_output(inputs)
Train step
model = WaveNet()
model.compile(Adam(), loss=loss)
# ok
history = model.fit(train_x, train_y,
batch_size=batch_size,
epochs=epochs,
callbacks=[cp_callback] if save else None)
# ok
result = model.predict(test_x)
# error
model.save("path")

NameError: name 'K' is not defined

I'm following the guide to Transformers and the colab project https://colab.research.google.com/drive/1XBP0Zh8K4g_n0A2p1UlGFf3dij0EX_Kt
but when I run the cell with the line multi_head = build_model() I get the error.
this is the output from the console:
NameError Traceback (most recent call
last) in ()
----> 1 multi_head = build_model()
5 frames in (x)
40 self.dropout = Dropout(attn_dropout)
41 def call(self, q, k, v, mask):
---> 42 attn = Lambda(lambda x:K.batch_dot(x[0],x[1],axes=[2,2])/self.temper)([q, k])
43 if mask is not None:
44 mmask = Lambda(lambda x:(-1e+10)*(1-x))(mask)
NameError: name 'K' is not defined
It just runs after the model architecture code, which the error refers to.
Can you see where this Kshould be defined?
import random, os, sys
import numpy as np
from tensorflow.keras.models import *
from tensorflow.keras.layers import *
from tensorflow.keras.callbacks import *
from tensorflow.keras.initializers import *
import tensorflow as tf
from tensorflow.python.keras.layers import Layer
try:
from dataloader import TokenList, pad_to_longest
# for transformer
except: pass
embed_size = 60
class LayerNormalization(Layer):
def __init__(self, eps=1e-6, **kwargs):
self.eps = eps
super(LayerNormalization, self).__init__(**kwargs)
def build(self, input_shape):
self.gamma = self.add_weight(name='gamma', shape=input_shape[-1:],
initializer=Ones(), trainable=True)
self.beta = self.add_weight(name='beta', shape=input_shape[-1:],
initializer=Zeros(), trainable=True)
super(LayerNormalization, self).build(input_shape)
def call(self, x):
mean = K.mean(x, axis=-1, keepdims=True)
std = K.std(x, axis=-1, keepdims=True)
return self.gamma * (x - mean) / (std + self.eps) + self.beta
def compute_output_shape(self, input_shape):
return input_shape
class ScaledDotProductAttention():
def __init__(self, d_model, attn_dropout=0.1):
self.temper = np.sqrt(d_model)
self.dropout = Dropout(attn_dropout)
def __call__(self, q, k, v, mask):
attn = Lambda(lambda x:K.batch_dot(x[0],x[1],axes=[2,2])/self.temper)([q, k])
if mask is not None:
mmask = Lambda(lambda x:(-1e+10)*(1-x))(mask)
attn = Add()([attn, mmask])
attn = Activation('softmax')(attn)
attn = self.dropout(attn)
output = Lambda(lambda x:K.batch_dot(x[0], x[1]))([attn, v])
return output, attn
class MultiHeadAttention():
# mode 0 - big martixes, faster; mode 1 - more clear implementation
def __init__(self, n_head, d_model, d_k, d_v, dropout, mode=0, use_norm=True):
self.mode = mode
self.n_head = n_head
self.d_k = d_k
self.d_v = d_v
self.dropout = dropout
if mode == 0:
self.qs_layer = Dense(n_head*d_k, use_bias=False)
self.ks_layer = Dense(n_head*d_k, use_bias=False)
self.vs_layer = Dense(n_head*d_v, use_bias=False)
elif mode == 1:
self.qs_layers = []
self.ks_layers = []
self.vs_layers = []
for _ in range(n_head):
self.qs_layers.append(TimeDistributed(Dense(d_k, use_bias=False)))
self.ks_layers.append(TimeDistributed(Dense(d_k, use_bias=False)))
self.vs_layers.append(TimeDistributed(Dense(d_v, use_bias=False)))
self.attention = ScaledDotProductAttention(d_model)
self.layer_norm = LayerNormalization() if use_norm else None
self.w_o = TimeDistributed(Dense(d_model))
def __call__(self, q, k, v, mask=None):
d_k, d_v = self.d_k, self.d_v
n_head = self.n_head
if self.mode == 0:
qs = self.qs_layer(q) # [batch_size, len_q, n_head*d_k]
ks = self.ks_layer(k)
vs = self.vs_layer(v)
def reshape1(x):
s = tf.shape(x) # [batch_size, len_q, n_head * d_k]
x = tf.reshape(x, [s[0], s[1], n_head, d_k])
x = tf.transpose(x, [2, 0, 1, 3])
x = tf.reshape(x, [-1, s[1], d_k]) # [n_head * batch_size, len_q, d_k]
return x
qs = Lambda(reshape1)(qs)
ks = Lambda(reshape1)(ks)
vs = Lambda(reshape1)(vs)
if mask is not None:
mask = Lambda(lambda x:K.repeat_elements(x, n_head, 0))(mask)
head, attn = self.attention(qs, ks, vs, mask=mask)
def reshape2(x):
s = tf.shape(x) # [n_head * batch_size, len_v, d_v]
x = tf.reshape(x, [n_head, -1, s[1], s[2]])
x = tf.transpose(x, [1, 2, 0, 3])
x = tf.reshape(x, [-1, s[1], n_head*d_v]) # [batch_size, len_v, n_head * d_v]
return x
head = Lambda(reshape2)(head)
elif self.mode == 1:
heads = []; attns = []
for i in range(n_head):
qs = self.qs_layers[i](q)
ks = self.ks_layers[i](k)
vs = self.vs_layers[i](v)
head, attn = self.attention(qs, ks, vs, mask)
heads.append(head); attns.append(attn)
head = Concatenate()(heads) if n_head > 1 else heads[0]
attn = Concatenate()(attns) if n_head > 1 else attns[0]
outputs = self.w_o(head)
outputs = Dropout(self.dropout)(outputs)
if not self.layer_norm: return outputs, attn
# outputs = Add()([outputs, q]) # sl: fix
return self.layer_norm(outputs), attn
class PositionwiseFeedForward():
def __init__(self, d_hid, d_inner_hid, dropout=0.1):
self.w_1 = Conv1D(d_inner_hid, 1, activation='relu')
self.w_2 = Conv1D(d_hid, 1)
self.layer_norm = LayerNormalization()
self.dropout = Dropout(dropout)
def __call__(self, x):
output = self.w_1(x)
output = self.w_2(output)
output = self.dropout(output)
output = Add()([output, x])
return self.layer_norm(output)
class EncoderLayer():
def __init__(self, d_model, d_inner_hid, n_head, d_k, d_v, dropout=0.1):
self.self_att_layer = MultiHeadAttention(n_head, d_model, d_k, d_v, dropout=dropout)
self.pos_ffn_layer = PositionwiseFeedForward(d_model, d_inner_hid, dropout=dropout)
def __call__(self, enc_input, mask=None):
output, slf_attn = self.self_att_layer(enc_input, enc_input, enc_input, mask=mask)
output = self.pos_ffn_layer(output)
return output, slf_attn
def GetPosEncodingMatrix(max_len, d_emb):
pos_enc = np.array([
[pos / np.power(10000, 2 * (j // 2) / d_emb) for j in range(d_emb)]
if pos != 0 else np.zeros(d_emb)
for pos in range(max_len)
])
pos_enc[1:, 0::2] = np.sin(pos_enc[1:, 0::2]) # dim 2i
pos_enc[1:, 1::2] = np.cos(pos_enc[1:, 1::2]) # dim 2i+1
return pos_enc
def GetPadMask(q, k):
ones = K.expand_dims(K.ones_like(q, 'float32'), -1)
mask = K.cast(K.expand_dims(K.not_equal(k, 0), 1), 'float32')
mask = K.batch_dot(ones, mask, axes=[2,1])
return mask
def GetSubMask(s):
len_s = tf.shape(s)[1]
bs = tf.shape(s)[:1]
mask = K.cumsum(tf.eye(len_s, batch_shape=bs), 1)
return mask
class Transformer():
def __init__(self, len_limit, embedding_matrix, d_model=embed_size, \
d_inner_hid=512, n_head=10, d_k=64, d_v=64, layers=2, dropout=0.1, \
share_word_emb=False, **kwargs):
self.name = 'Transformer'
self.len_limit = len_limit
self.src_loc_info = False # True # sl: fix later
self.d_model = d_model
self.decode_model = None
d_emb = d_model
pos_emb = Embedding(len_limit, d_emb, trainable=False, \
weights=[GetPosEncodingMatrix(len_limit, d_emb)])
i_word_emb = Embedding(max_features, d_emb, weights=[embedding_matrix]) # Add Kaggle provided embedding here
self.encoder = Encoder(d_model, d_inner_hid, n_head, d_k, d_v, layers, dropout, \
word_emb=i_word_emb, pos_emb=pos_emb)
def get_pos_seq(self, x):
mask = K.cast(K.not_equal(x, 0), 'int32')
pos = K.cumsum(K.ones_like(x, 'int32'), 1)
return pos * mask
def compile(self, active_layers=999):
src_seq_input = Input(shape=(None, ))
x = Embedding(max_features, embed_size, weights=[embedding_matrix])(src_seq_input)
# LSTM before attention layers
x = Bidirectional(LSTM(128, return_sequences=True))(x)
x = Bidirectional(LSTM(64, return_sequences=True))(x)
x, slf_attn = MultiHeadAttention(n_head=3, d_model=300, d_k=64, d_v=64, dropout=0.1)(x, x, x)
avg_pool = GlobalAveragePooling1D()(x)
max_pool = GlobalMaxPooling1D()(x)
conc = concatenate([avg_pool, max_pool])
conc = Dense(64, activation="relu")(conc)
x = Dense(1, activation="sigmoid")(conc)
self.model = Model(inputs=src_seq_input, outputs=x)
self.model.compile(optimizer = 'adam', loss = 'mean_squared_error', metrics=['accuracy'])
If you look at where K is being used you will see:
K.expand_dims
K.cumsum
K.batch_dot
These are Keras backend functions. The code is missing a from keras import backend as K, which I think is a standard abbreviation.

Categories

Resources