Rare case with: mat1 and mat2 shapes cannot be multiplied - python

self.model = DQNetwork(11, 256, 3)
class DQNetwork(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super().__init__()
self.linear1 = nn.Linear(input_size, hidden_size)
self.linear2 = nn.Linear(hidden_size, hidden_size)
self.linear3 = nn.Linear(hidden_size, output_size)
def forward(self, x):
x = F.relu(self.linear1(x))
x = F.relu(self.linear2(x))
x = self.linear3(x)
return x
Traceback (most recent call last):
File "E:/Work/Programming/PyArk/main.py", line 32, in <module>
agent.train()
File "E:\Work\Programming\PyArk\Agent\agent.py", line 31, in train
self.step(states, actions, rewards, next_states, dones)
File "E:\Work\Programming\PyArk\Agent\agent.py", line 20, in step
self._trainer.train(state, action, reward, next_state, done)
File "E:\Work\Programming\PyArk\Agent\DQN\dqn_trainer.py", line 32, in train
prediction = self.model(state)
File "E:\Work\Programming\PyArk\venv\lib\site-packages\torch\nn\modules\module.py", line 1102, in _call_impl
return forward_call(*input, **kwargs)
File "E:\Work\Programming\PyArk\Agent\DQN\dqn_network.py", line 19, in forward
x = F.relu(self.linear1(x))
File "E:\Work\Programming\PyArk\venv\lib\site-packages\torch\nn\modules\module.py", line 1102, in _call_impl
return forward_call(*input, **kwargs)
File "E:\Work\Programming\PyArk\venv\lib\site-packages\torch\nn\modules\linear.py", line 103, in forward
return F.linear(input, self.weight, self.bias)
File "E:\Work\Programming\PyArk\venv\lib\site-packages\torch\nn\functional.py", line 1848, in linear
return torch._C._nn.linear(input, weight, bias)
RuntimeError: mat1 and mat2 shapes cannot be multiplied (11x5 and 11x256)
I don't understand why this error is popping out
I use the same code in other projects... what is going on..?

model( torch.zeros(11,5) ) --> model( torch.zeros(5,11) )

Related

File "/model.py", line 33, in forward x_out = torch.cat(x_out, 1) IndexError: Dimension out of range (expected to be in range of [-1, 0], but got 1)

I read previous answers but couldnt fix this. whenever I run the code, this error pops up at different epochs, sometimes the executu=ion goes till 50s and then suddenly this error appears and the execution stops. at some other times this error appears at epoch 16s and so on.
0it [00:00, ?it/s]/usr/local/lib/python3.8/dist-packages/torch/nn/functional.py:1960: UserWarning: nn.functional.sigmoid is deprecated. Use torch.sigmoid instead.
warnings.warn("nn.functional.sigmoid is deprecated. Use torch.sigmoid instead.")
185it [00:07, 23.88it/s]
Traceback (most recent call last):
File "/content/drive/MyDrive/train.py", line 241, in <module>
train()
File "/content/drive/MyDrive/train.py", line 98, in train
text_aligned_match, image_aligned_match, pred_similarity_match = similarity_module(fixed_text, matched_image)
File "/usr/local/lib/python3.8/dist-packages/torch/nn/modules/module.py", line 1130, in _call_impl
return forward_call(*input, **kwargs)
File "/content/drive/MyDrive/model.py", line 106, in forward
text_encoding, image_encoding = self.encoding(text, image)
File "/usr/local/lib/python3.8/dist-packages/torch/nn/modules/module.py", line 1130, in _call_impl
return forward_call(*input, **kwargs)
File "/content/drive/MyDrive/model.py", line 70, in forward
text_encoding = self.shared_text_encoding(text)
File "/usr/local/lib/python3.8/dist-packages/torch/nn/modules/module.py", line 1130, in _call_impl
return forward_call(*input, **kwargs)
File "/content/drive/MyDrive/model.py", line 33, in forward
x_out = torch.cat(x_out, 1)
IndexError: Dimension out of range (expected to be in range of [-1, 0], but got 1)
The line creating issue is
x_out = torch.cat(x_out, 1)
I tried The line creating issue is
x_out = torch.cat(x_out, dim=0) but that doesnt solve. gives following error
0it [00:01, ?it/s]
Traceback (most recent call last):
File "/content/drive/MyDrive/train.py", line 241, in <module>
train()
File "/content/drive/MyDrive/train.py", line 98, in train
text_aligned_match, image_aligned_match, pred_similarity_match = similarity_module(fixed_text, matched_image)
File "/usr/local/lib/python3.8/dist-packages/torch/nn/modules/module.py", line 1130, in _call_impl
return forward_call(*input, **kwargs)
File "/content/drive/MyDrive/model.py", line 106, in forward
text_encoding, image_encoding = self.encoding(text, image)
File "/usr/local/lib/python3.8/dist-packages/torch/nn/modules/module.py", line 1130, in _call_impl
return forward_call(*input, **kwargs)
File "/content/drive/MyDrive/model.py", line 71, in forward
text_shared = self.shared_text_linear(text_encoding)
File "/usr/local/lib/python3.8/dist-packages/torch/nn/modules/module.py", line 1130, in _call_impl
return forward_call(*input, **kwargs)
File "/usr/local/lib/python3.8/dist-packages/torch/nn/modules/container.py", line 139, in forward
input = module(input)
File "/usr/local/lib/python3.8/dist-packages/torch/nn/modules/module.py", line 1130, in _call_impl
return forward_call(*input, **kwargs)
File "/usr/local/lib/python3.8/dist-packages/torch/nn/modules/linear.py", line 114, in forward
return F.linear(input, self.weight, self.bias)
RuntimeError: mat1 and mat2 shapes cannot be multiplied (132x32 and 128x64)
Code is:
import math
import random
from random import random, seed
import torch
import torch.nn as nn
from torch.distributions import Normal, Independent
from torch.nn.functional import softplus
#random.seed(825)
seed(825)
class FastCNN(nn.Module):
# a CNN-based altertative approach of bert for text encoding
def __init__(self, channel=32, kernel_size=(1, 2, 4, 8)):
super(FastCNN, self).__init__()
self.fast_cnn = nn.ModuleList()
for kernel in kernel_size:
self.fast_cnn.append(
nn.Sequential(
nn.Conv1d(200, channel, kernel_size=kernel),
nn.BatchNorm1d(channel),
nn.ReLU(),
nn.AdaptiveMaxPool1d(1)
)
)
def forward(self, x):
x = x.permute(0, 2, 1)
x_out = []
for module in self.fast_cnn:
x_out.append(module(x).squeeze())
x_out = torch.cat(x_out, dim=0)
return x_out
class EncodingPart(nn.Module):
def __init__(
self,
cnn_channel=32,
cnn_kernel_size=(1, 2, 4, 8),
shared_image_dim=128,
shared_text_dim=128
):
super(EncodingPart, self).__init__()
self.shared_text_encoding = FastCNN(
channel=cnn_channel,
kernel_size=cnn_kernel_size
)
self.shared_text_linear = nn.Sequential(
nn.Linear(128, 64),
nn.BatchNorm1d(64),
nn.ReLU(),
nn.Dropout(),
nn.Linear(64, shared_text_dim),
nn.BatchNorm1d(shared_text_dim),
nn.ReLU()
)
self.shared_image = nn.Sequential(
nn.Linear(512, 256),
nn.BatchNorm1d(256),
nn.ReLU(),
nn.Dropout(),
nn.Linear(256, shared_image_dim),
nn.BatchNorm1d(shared_image_dim),
nn.ReLU()
)
def forward(self, text, image):
text_encoding = self.shared_text_encoding(text)
text_shared = self.shared_text_linear(text_encoding)
image_shared = self.shared_image(image)
return text_shared, image_shared
class SimilarityModule(nn.Module):
def __init__(self, shared_dim=128, sim_dim=64):
super(SimilarityModule, self).__init__()
self.encoding = EncodingPart()
self.text_aligner = nn.Sequential(
nn.Linear(shared_dim, shared_dim),
nn.BatchNorm1d(shared_dim),
nn.ReLU(),
nn.Linear(shared_dim, sim_dim),
nn.BatchNorm1d(sim_dim),
nn.ReLU()
)
self.image_aligner = nn.Sequential(
nn.Linear(shared_dim, shared_dim),
nn.BatchNorm1d(shared_dim),
nn.ReLU(),
nn.Linear(shared_dim, sim_dim),
nn.BatchNorm1d(sim_dim),
nn.ReLU()
)
self.sim_classifier_dim = sim_dim * 2
self.sim_classifier = nn.Sequential(
nn.BatchNorm1d(self.sim_classifier_dim),
nn.Linear(self.sim_classifier_dim, 64),
nn.BatchNorm1d(64),
nn.ReLU(),
nn.Linear(64, 2)
)
def forward(self, text, image):
text_encoding, image_encoding = self.encoding(text, image)
text_aligned = self.text_aligner(text_encoding)
image_aligned = self.image_aligner(image_encoding)
sim_feature = torch.cat([text_aligned, image_aligned], 1)
pred_similarity = self.sim_classifier(sim_feature)
return text_aligned, image_aligned, pred_similarity
class Encoder(nn.Module):
def __init__(self, z_dim=2):
super(Encoder, self).__init__()
self.z_dim = z_dim
# Vanilla MLP
self.net = nn.Sequential(
nn.Linear(64, 64),
nn.ReLU(True),
nn.Linear(64, z_dim * 2),
)
def forward(self, x):
# x = x.view(x.size(0), -1) # Flatten the input
params = self.net(x)
mu, sigma = params[:, :self.z_dim], params[:, self.z_dim:]
sigma = softplus(sigma) + 1e-7
return Independent(Normal(loc=mu, scale=sigma), 1)
class AmbiguityLearning(nn.Module):
def __init__(self):
super(AmbiguityLearning, self).__init__()
self.encoding = EncodingPart()
self.encoder_text = Encoder()
self.encoder_image = Encoder()
def forward(self, text_encoding, image_encoding):
# text_encoding, image_encoding = self.encoding(text, image)
p_z1_given_text = self.encoder_text(text_encoding)
p_z2_given_image = self.encoder_image(image_encoding)
z1 = p_z1_given_text.rsample()
z2 = p_z2_given_image.rsample()
kl_1_2 = p_z1_given_text.log_prob(z1) - p_z2_given_image.log_prob(z1)
kl_2_1 = p_z2_given_image.log_prob(z2) - p_z1_given_text.log_prob(z2)
skl = (kl_1_2 + kl_2_1)/ 2.
skl = nn.functional.sigmoid(skl)
return skl
class UnimodalDetection(nn.Module):
def __init__(self, shared_dim=128, prime_dim = 16):
super(UnimodalDetection, self).__init__()
self.text_uni = nn.Sequential(
nn.Linear(shared_dim, shared_dim),
nn.BatchNorm1d(shared_dim),
nn.ReLU(),
nn.Linear(shared_dim, prime_dim),
nn.BatchNorm1d(prime_dim),
nn.ReLU()
)
self.image_uni = nn.Sequential(
nn.Linear(shared_dim, shared_dim),
nn.BatchNorm1d(shared_dim),
nn.ReLU(),
nn.Linear(shared_dim, prime_dim),
nn.BatchNorm1d(prime_dim),
nn.ReLU()
)
def forward(self, text_encoding, image_encoding):
text_prime = self.text_uni(text_encoding)
image_prime = self.image_uni(image_encoding)
return text_prime, image_prime
class CrossModule4Batch(nn.Module):
def __init__(self, text_in_dim=64, image_in_dim=64, corre_out_dim=64):
super(CrossModule4Batch, self).__init__()
self.softmax = nn.Softmax(-1)
self.corre_dim = 64
self.pooling = nn.AdaptiveMaxPool1d(1)
self.c_specific_2 = nn.Sequential(
nn.Linear(self.corre_dim, corre_out_dim),
nn.BatchNorm1d(corre_out_dim),
nn.ReLU()
)
def forward(self, text, image):
text_in = text.unsqueeze(2)
image_in = image.unsqueeze(1)
corre_dim = text.shape[1]
similarity = torch.matmul(text_in, image_in) / math.sqrt(corre_dim)
correlation = self.softmax(similarity)
correlation_p = self.pooling(correlation).squeeze()
correlation_out = self.c_specific_2(correlation_p)
return correlation_out
class DetectionModule(nn.Module):
def __init__(self, feature_dim=64+16+16, h_dim=64):
super(DetectionModule, self).__init__()
self.encoding = EncodingPart()
self.ambiguity_module = AmbiguityLearning()
self.uni_repre = UnimodalDetection()
self.cross_module = CrossModule4Batch()
self.classifier_corre = nn.Sequential(
nn.Linear(feature_dim, h_dim),
nn.BatchNorm1d(h_dim),
nn.ReLU(),
# nn.Dropout(),
nn.Linear(h_dim, h_dim),
nn.BatchNorm1d(h_dim),
nn.ReLU(),
# nn.Dropout(),
nn.Linear(h_dim, 2)
)
def forward(self, text_raw, image_raw, text, image):
# text_encoding, image_encoding = self.encoding_module(text, image)
skl = self.ambiguity_module(text, image)
text_prime, image_prime = self.encoding(text_raw, image_raw)
text_prime, image_prime = self.uni_repre(text_prime, image_prime)
correlation = self.cross_module(text, image)
weight_uni = (1-skl).unsqueeze(1)
weight_corre = skl.unsqueeze(1)
text_final = weight_uni * text_prime
img_final = weight_uni * image_prime
corre_final = weight_corre * correlation
final_corre = torch.cat([text_final, img_final, corre_final], 1)
pre_label = self.classifier_corre(final_corre)
return pre_label
Please suggest a fix
Try printing the shapes of each tensor in x_out before the problem line. My guess is one of your layers is giving you a tensor that, upon calling squeeze(), becomes one dimensional.
def forward(self, x):
x = x.permute(0, 2, 1)
x_out = []
for module in self.fast_cnn:
module_out = module(x).squeeze()
# prints shape with < 2 dimensions before error
print('Module out shape': module_out.shape)
# potential solution
if len(module_out.shape) < 2:
module_out = torch.unsqueeze(module_out, 1)
x_out.append(module_out)
x_out = torch.cat(x_out, dim=1)
return x_out

How to combine EmbeddingBag and LSTM layers?

Python 3.9.6 Pytorch 1.9.0
I created a neural network with three layers, the first of which is EmbeddingBag and second- LSTM.
class PyTorchNetwork(nn.Module):
def __init__(self, vocab_size, embed_dim, hidden_dim, dropout):
nn.Module.__init__(self)
#embedding layer
self.embedding = nn.EmbeddingBag(num_embeddings = vocab_size,
embedding_dim = embed_dim,
sparse = True)
#lstm layer
self.lstm = nn.LSTM(input_size = embed_dim,
hidden_size = hidden_dim,
num_layers = 1,
dropout = dropout)
self.fc = nn.Linear(in_features=hidden_dim,
out_features=1)
self.act = nn.Sigmoid()
#init network small weights
self.init_weights()
def forward(self, text, offsets):
#text.shape = torch.Size([363])
#offsets.shape = torch.Size([64])
embedded = self.embedding(text, offsets)
#embedded.shape= torch.Size([64, 32])
_, (hidden, _) = self.lstm(embedded) #ERROR!!!
dense_outputs = self.fc(hidden.squeeze(0))
outputs = self.act(dense_outputs)
return outputs
When I start training, the output from the first layer doesn't match in dimensionality to the input of the second layer. The program gives an error:
vocab_size = len(vocabulary)
embed_dim = 32
hidden_dim = 16
dropout = self.DROP_OUT
model = PyTorchNetwork(vocab_size, embed_dim, hidden_dim, dropout).to(self.device)
predicted_label = model(text, offsets)
Full error trace:
> File "/Presentation/MixinButton.py", line 31, in _press_button_start
> self.controller.perform_business_task()
> File "/ControllerPresentation/AController.py", line 16, in perform_business_task
> result = self.perform_task()
> File "/ControllerPresentation/TuningPredictionModelController.py", line 36, in perform_task
> return Initialization.ibusiness.tuning_prediction_model(self.signal_message, self.analysis_type, self.operation, self.severity_of_disease, self.print_only_final_results)
> File "/Business/IBusiness.py", line 35, in tuning_prediction_model
> return self._perform_task(task)
> File "/Business/IBusiness.py", line 59, in _perform_task
> task.run()
> File "/Business/TuningPredictionModelTask.py", line 27, in run
> result = optimizator.learn(severity, silently=True)
> File "/home/ivan/eclipse-workspace/GompertzLaw/NeuralNetwork/PytorchOptimizator.py", line 94, in learn
> self._train(train_dataloader, model, optimizer, epoch, silently)
> File "/NeuralNetwork/PytorchOptimizator.py", line 115, in _train
> predicted_label = model(text, offsets)
> File "/home/ivan/.local/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1051, in _call_impl
> return forward_call(*input, **kwargs)
> File "/NeuralNetwork/PyTorchNetwork.py", line 46, in forward
> _, (hidden, _) = self.lstm(embedded)
> File "/home/ivan/.local/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1051, in _call_impl
> return forward_call(*input, **kwargs)
> File "/home/ivan/.local/lib/python3.9/site-packages/torch/nn/modules/rnn.py", line 677, in forward
> self.check_forward_args(input, hx, batch_sizes)
> File "/home/ivan/.local/lib/python3.9/site-packages/torch/nn/modules/rnn.py", line 620, in check_forward_args
> self.check_input(input, batch_sizes)
> File "/home/ivan/.local/lib/python3.9/site-packages/torch/nn/modules/rnn.py", line 201, in check_input
> raise RuntimeError( RuntimeError: input must have 3 dimensions, got 2
How do I fix the error?

Tensorflow 2 :NotImplementedError: numpy() is only available when eager execution is enabled

There is a question in this code, I delete SeBlock class and just run CNN class, then all is well. If I plug SeBlock to CNN class the error will occur, and display NotImplementedError. I don't know cause this problem, I try to solve this problem, but what method I searched all is not working. Can somebody help me, thanks very much !
import tensorflow as tf
class SeBlock(tf.keras.Model):
def __init__(self, ratio, channel):
super(SeBlock, self).__init__()
self.kernel_initializer = tf.keras.initializers.VarianceScaling()
self.bias_initializer = tf.constant_initializer(value=0.0)
self.ratio = ratio
self.ReduceMean = tf.keras.layers.GlobalAveragePooling2D()
self.DenseCut = tf.keras.Sequential([
tf.keras.layers.Dense(units=channel,
activation=tf.nn.relu, kernel_initializer=self.kernel_initializer,
bias_constraint=self.bias_initializer),
tf.keras.layers.Dense(units=channel,
activation=tf.nn.sigmoid,
kernel_initializer=self.kernel_initializer,
bias_constraint=self.bias_initializer)
])
self.flatten = tf.keras.layers.Reshape(target_shape=(1, 1, channel,))
def call(self, inputs, training=True):
if training:print("training network")
x = self.ReduceMean(inputs)
x = self.DenseCut(x, training)
scale = self.flatten(x)
scale = tf.keras.layers.multiply([inputs,scale])
# scale *= inputs
return scale
class CNN(tf.keras.Model):
def __init__(self, se_block):
super(CNN, self).__init__()
self.conv1 = tf.keras.layers.Conv2D(
filters=32, #
kernel_size=[5, 5], #
padding='same', #
activation=tf.nn.relu #
)
self.seblock1 = self._make_layer(se_block= se_block, ratio=1, input_channel=32)
self.pool1 = tf.keras.layers.MaxPool2D(pool_size=[2, 2], strides=2)
self.conv2 = tf.keras.layers.Conv2D(
filters=64,
kernel_size=[5, 5],
padding='same',
activation=tf.nn.relu
)
self.pool2 = tf.keras.layers.MaxPool2D(pool_size=[2, 2], strides=2)
self.flatten = tf.keras.layers.Reshape(target_shape=(112 * 112 * 64,))
self.dense2 = tf.keras.layers.Dense(units=10)
def _make_layer(self, se_block, ratio, input_channel):
return tf.keras.Sequential([se_block(ratio=ratio,channel=input_channel)])
def call(self, inputs, training=True):
print("1",inputs.get_shape().as_list())
x = self.conv1(inputs) # [batch_size, 28, 28, 32]
# print("start se-block")
x = self.seblock1(x, training)
# print("end se-block")
x = self.pool1(x) # [batch_size, 14, 14, 32]
x = self.conv2(x) # [batch_size, 14, 14, 64]
x = self.pool2(x) # [batch_size, 7, 7, 64]
x = self.flatten(x) # [batch_size, 7 * 7 * 64]
x = self.dense2(x) # [batch_size, 10]
return tf.nn.softmax(x)
def CNNDense():
return CNN(SeBlock)
The main code is below.
import tensorflow as tf
import LoadImage as readimage
import DenseBSE
tf.keras.backend.clear_session()
train_path = r"E:\BaiduNetdiskDownload\板角\boardtrain"
test_path = r"E:\BaiduNetdiskDownload\板角\boardtest"
BatchSize = 4
Epoch = 60
lr = 0.001
ds_train, train_count = readimage.load_tensor_img(train_path,
batch_size=BatchSize,
epoch=Epoch)
ds_test, test_count = readimage.load_tensor_img(test_path,
batch_size=BatchSize,
epoch=Epoch)
model = DenseBSE.CNNDense()
model.build(input_shape=(BatchSize, 448, 448, 3))
model.summary()
model.compile(optimizer=tf.keras.optimizers.Adam(),
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
epoch_steps = train_count // BatchSize
val_steps = test_count // BatchSize
model.fit(ds_train, epochs=Epoch, steps_per_epoch = epoch_steps,
validation_data=ds_test, validation_steps = val_steps)
And the error information is displayed below.
Traceback (most recent call last):
File "C:\ProgramData\Anaconda3\lib\site-packages\IPython\core\interactiveshell.py", line 3343, in run_code
exec(code_obj, self.user_global_ns, self.user_ns)
File "<ipython-input-2-41c34ae2b3b4>", line 1, in <module>
runfile('E:/PythonProject/CNN_training.py', wdir='E:/PythonProject')
File "C:\Program Files\JetBrains\PyCharm 2020.3.5\plugins\python\helpers\pydev\_pydev_bundle\pydev_umd.py", line 197, in runfile
pydev_imports.execfile(filename, global_vars, local_vars) # execute the script
File "C:\Program Files\JetBrains\PyCharm 2020.3.5\plugins\python\helpers\pydev\_pydev_imps\_pydev_execfile.py", line 18, in execfile
exec(compile(contents+"\n", file, 'exec'), glob, loc)
File "E:/PythonProject/CNN_training.py", line 35, in <module>
model.fit(ds_train, epochs=Epoch, steps_per_epoch = epoch_steps,
File "C:\ProgramData\Anaconda3\lib\site-packages\tensorflow\python\keras\engine\training.py", line 1100, in fit
tmp_logs = self.train_function(iterator)
File "C:\ProgramData\Anaconda3\lib\site-packages\tensorflow\python\eager\def_function.py", line 828, in __call__
result = self._call(*args, **kwds)
File "C:\ProgramData\Anaconda3\lib\site-packages\tensorflow\python\eager\def_function.py", line 871, in _call
self._initialize(args, kwds, add_initializers_to=initializers)
File "C:\ProgramData\Anaconda3\lib\site-packages\tensorflow\python\eager\def_function.py", line 725, in _initialize
self._stateful_fn._get_concrete_function_internal_garbage_collected( # pylint: disable=protected-access
File "C:\ProgramData\Anaconda3\lib\site-packages\tensorflow\python\eager\function.py", line 2969, in _get_concrete_function_internal_garbage_collected
graph_function, _ = self._maybe_define_function(args, kwargs)
File "C:\ProgramData\Anaconda3\lib\site-packages\tensorflow\python\eager\function.py", line 3361, in _maybe_define_function
graph_function = self._create_graph_function(args, kwargs)
File "C:\ProgramData\Anaconda3\lib\site-packages\tensorflow\python\eager\function.py", line 3196, in _create_graph_function
func_graph_module.func_graph_from_py_func(
File "C:\ProgramData\Anaconda3\lib\site-packages\tensorflow\python\framework\func_graph.py", line 990, in func_graph_from_py_func
func_outputs = python_func(*func_args, **func_kwargs)
File "C:\ProgramData\Anaconda3\lib\site-packages\tensorflow\python\eager\def_function.py", line 634, in wrapped_fn
out = weak_wrapped_fn().__wrapped__(*args, **kwds)
File "C:\ProgramData\Anaconda3\lib\site-packages\tensorflow\python\framework\func_graph.py", line 977, in wrapper
raise e.ag_error_metadata.to_exception(e)
NotImplementedError: in user code:
C:\ProgramData\Anaconda3\lib\site-packages\tensorflow\python\keras\engine\training.py:805 train_function *
return step_function(self, iterator)
C:\ProgramData\Anaconda3\lib\site-packages\tensorflow\python\keras\engine\training.py:795 step_function **
outputs = model.distribute_strategy.run(run_step, args=(data,))
C:\ProgramData\Anaconda3\lib\site-packages\tensorflow\python\distribute\distribute_lib.py:1259 run
return self._extended.call_for_each_replica(fn, args=args, kwargs=kwargs)
C:\ProgramData\Anaconda3\lib\site-packages\tensorflow\python\distribute\distribute_lib.py:2730 call_for_each_replica
return self._call_for_each_replica(fn, args, kwargs)
C:\ProgramData\Anaconda3\lib\site-packages\tensorflow\python\distribute\distribute_lib.py:3417 _call_for_each_replica
return fn(*args, **kwargs)
C:\ProgramData\Anaconda3\lib\site-packages\tensorflow\python\keras\engine\training.py:788 run_step **
outputs = model.train_step(data)
C:\ProgramData\Anaconda3\lib\site-packages\tensorflow\python\keras\engine\training.py:757 train_step
self.optimizer.minimize(loss, self.trainable_variables, tape=tape)
C:\ProgramData\Anaconda3\lib\site-packages\tensorflow\python\keras\optimizer_v2\optimizer_v2.py:498 minimize
return self.apply_gradients(grads_and_vars, name=name)
C:\ProgramData\Anaconda3\lib\site-packages\tensorflow\python\keras\optimizer_v2\optimizer_v2.py:631 apply_gradients
return distribute_ctx.get_replica_context().merge_call(
C:\ProgramData\Anaconda3\lib\site-packages\tensorflow\python\distribute\distribute_lib.py:2941 merge_call
return self._merge_call(merge_fn, args, kwargs)
C:\ProgramData\Anaconda3\lib\site-packages\tensorflow\python\distribute\distribute_lib.py:2948 _merge_call
return merge_fn(self._strategy, *args, **kwargs)
C:\ProgramData\Anaconda3\lib\site-packages\tensorflow\python\keras\optimizer_v2\optimizer_v2.py:682 _distributed_apply **
update_ops.extend(distribution.extended.update(
C:\ProgramData\Anaconda3\lib\site-packages\tensorflow\python\distribute\distribute_lib.py:2494 update
return self._update(var, fn, args, kwargs, group)
C:\ProgramData\Anaconda3\lib\site-packages\tensorflow\python\distribute\distribute_lib.py:3431 _update
return self._update_non_slot(var, fn, (var,) + tuple(args), kwargs, group)
C:\ProgramData\Anaconda3\lib\site-packages\tensorflow\python\distribute\distribute_lib.py:3437 _update_non_slot
result = fn(*args, **kwargs)
C:\ProgramData\Anaconda3\lib\site-packages\tensorflow\python\keras\optimizer_v2\optimizer_v2.py:661 apply_grad_to_update_var **
return var.assign(var.constraint(var))
C:\ProgramData\Anaconda3\lib\site-packages\tensorflow\python\ops\init_ops_v2.py:290 __call__
return constant_op.constant(self.value, dtype=dtype, shape=shape)
C:\ProgramData\Anaconda3\lib\site-packages\tensorflow\python\framework\constant_op.py:264 constant
return _constant_impl(value, dtype, shape, name, verify_shape=False,
C:\ProgramData\Anaconda3\lib\site-packages\tensorflow\python\framework\constant_op.py:281 _constant_impl
tensor_util.make_tensor_proto(
C:\ProgramData\Anaconda3\lib\site-packages\tensorflow\python\framework\tensor_util.py:454 make_tensor_proto
if shape is not None and np.prod(shape, dtype=np.int64) == 0:
<__array_function__ internals>:5 prod
C:\ProgramData\Anaconda3\lib\site-packages\numpy\core\fromnumeric.py:2961 prod
return _wrapreduction(a, np.multiply, 'prod', axis, dtype, out,
C:\ProgramData\Anaconda3\lib\site-packages\numpy\core\fromnumeric.py:90 _wrapreduction
return ufunc.reduce(obj, axis, dtype, out, **passkwargs)
C:\ProgramData\Anaconda3\lib\site-packages\tensorflow\python\ops\resource_variable_ops.py:483 __array__
return np.asarray(self.numpy())
C:\ProgramData\Anaconda3\lib\site-packages\tensorflow\python\ops\resource_variable_ops.py:619 numpy
raise NotImplementedError(
NotImplementedError: numpy() is only available when eager execution is enabled.
This notebook should help to upgrade, check, and enable. Good luck!

Custom attention layer return AttributeError during model saving

I was working on a model and I used this custom attention layer,
Note: Here's a colab sample notebook to reproduce similar error,
https://colab.research.google.com/drive/1RDcJwpVbT6JR8_LA52r1nHPSK0w1HuY7?usp=sharing
class AttentionWeightedAverage(Layer):
def __init__(self, return_attention=False, **kwargs):
self.init = initializers.get('uniform')
self.supports_masking = True
self.return_attention = return_attention
super(AttentionWeightedAverage, self).__init__(** kwargs)
def build(self, input_shape):
self.input_spec = [InputSpec(ndim=3)]
assert len(input_shape) == 3
self.w = self.add_weight(shape=(input_shape[2], 1),
name='{}_w'.format(self.name),
initializer=self.init, trainable=True)
super(AttentionWeightedAverage, self).build(input_shape)
def call(self, h, mask=None):
h_shape = K.shape(h)
d_w, T = h_shape[0], h_shape[1]
logits = K.dot(h, self.w) # w^T h
logits = K.reshape(logits, (d_w, T))
alpha = K.exp(logits - K.max(logits, axis=-1, keepdims=True)) # exp
# masked timesteps have zero weight
if mask is not None:
mask = K.cast(mask, K.floatx())
alpha = alpha * mask
alpha = alpha / (K.sum(alpha, axis=1, keepdims=True) + K.epsilon()) # softmax
r = K.sum(h * K.expand_dims(alpha), axis=1) # r = h*alpha^T
h_star = K.tanh(r) # h^* = tanh(r)
if self.return_attention:
return [h_star, alpha]
return h_star
def get_output_shape_for(self, input_shape):
return self.compute_output_shape(input_shape)
def compute_output_shape(self, input_shape):
output_len = input_shape[2]
if self.return_attention:
return [(input_shape[0], output_len), (input_shape[0], input_shape[1])]
return (input_shape[0], output_len)
def compute_mask(self, input, input_mask=None):
if isinstance(input_mask, list):
return [None] * len(input_mask)
else:
return None
and my model architecture is something like below,
dense()(x)
Bidirectional(lstm(return_sequences=True))(x)
attentionweightedaverage()(x)
dense(1, 'softmax')
After training for few epochs when I try to save my model and I'm getting this below error which I think is related to the custom attention layer I have used.
I couldn't figure it out. Any help is appreciated.
The below error only occurs when I try to save entire model using model.save and if I use model.save_weights() it works.
I'm using tensorflow 2.1.0
Here's the Traceback,
Traceback (most recent call last):
File "classifiers/main.py", line 26, in <module>
main()
File "classifiers/main.py", line 18, in main
clf.model.save(f'./classifiers/saved_models/{args.model_name}')
File "C:\Users\user\miniconda3\envs\user\lib\site-packages\tensorflow_core\python\keras\engine\network.p
signatures, options)
File "C:\Users\user\miniconda3\envs\user\lib\site-packages\tensorflow_core\python\keras\saving\save.py",
signatures, options)
File "C:\Users\user\miniconda3\envs\user\lib\site-packages\tensorflow_core\python\keras\saving\saved_mod
save_lib.save(model, filepath, signatures, options)
File "C:\Users\user\miniconda3\envs\user\lib\site-packages\tensorflow_core\python\saved_model\save.py",
checkpoint_graph_view)
File "C:\Users\user\miniconda3\envs\user\lib\site-packages\tensorflow_core\python\saved_model\signature_
functions = saveable_view.list_functions(saveable_view.root)
File "C:\Users\user\miniconda3\envs\user\lib\site-packages\tensorflow_core\python\saved_model\save.py",
self._serialization_cache)
File "C:\Users\user\miniconda3\envs\user\lib\site-packages\tensorflow_core\python\keras\engine\base_laye
.list_functions_for_serialization(serialization_cache))
File "C:\Users\user\miniconda3\envs\user\lib\site-packages\tensorflow_core\python\keras\saving\saved_mod
fns = self.functions_to_serialize(serialization_cache)
File "C:\Users\user\miniconda3\envs\user\lib\site-packages\tensorflow_core\python\keras\saving\saved_mod
serialization_cache).functions_to_serialize)
File "C:\Users\user\miniconda3\envs\user\lib\site-packages\tensorflow_core\python\keras\saving\saved_mod
serialization_cache)
File "C:\Users\user\miniconda3\envs\user\lib\site-packages\tensorflow_core\python\keras\saving\saved_mod
serialization_cache))
File "C:\Users\user\miniconda3\envs\user\lib\site-packages\tensorflow_core\python\keras\saving\saved_mod
functions = save_impl.wrap_layer_functions(self.obj, serialization_cache)
File "C:\Users\user\miniconda3\envs\user\lib\site-packages\tensorflow_core\python\keras\saving\saved_mod
original_fns = _replace_child_layer_functions(layer, serialization_cache)
File "C:\Users\user\miniconda3\envs\user\lib\site-packages\tensorflow_core\python\keras\saving\saved_mod
serialization_cache).functions)
File "C:\Users\user\miniconda3\envs\user\lib\site-packages\tensorflow_core\python\keras\saving\saved_mod
serialization_cache)
File "C:\Users\user\miniconda3\envs\user\lib\site-packages\tensorflow_core\python\keras\saving\saved_mod
functions = save_impl.wrap_layer_functions(self.obj, serialization_cache)
File "C:\Users\user\miniconda3\envs\user\lib\site-packages\tensorflow_core\python\keras\saving\saved_mod
'{}_layer_call_and_return_conditional_losses'.format(layer.name))
File "C:\Users\user\miniconda3\envs\user\lib\site-packages\tensorflow_core\python\keras\saving\saved_mod
self.add_trace(*self._input_signature)
File "C:\Users\user\miniconda3\envs\user\lib\site-packages\tensorflow_core\python\keras\saving\saved_mod
fn.get_concrete_function(*args, **kwargs)
File "C:\Users\user\miniconda3\envs\user\lib\site-packages\tensorflow_core\python\keras\saving\saved_mod
return super(LayerCall, self).get_concrete_function(*args, **kwargs)
File "C:\Users\user\miniconda3\envs\user\lib\site-packages\tensorflow_core\python\eager\def_function.py"
self._initialize(args, kwargs, add_initializers_to=initializers)
File "C:\Users\user\miniconda3\envs\user\lib\site-packages\tensorflow_core\python\eager\def_function.py"
*args, **kwds))
File "C:\Users\user\miniconda3\envs\user\lib\site-packages\tensorflow_core\python\eager\function.py", li
graph_function, _, _ = self._maybe_define_function(args, kwargs)
File "C:\Users\user\miniconda3\envs\user\lib\site-packages\tensorflow_core\python\eager\function.py", li
graph_function = self._create_graph_function(args, kwargs)
File "C:\Users\user\miniconda3\envs\user\lib\site-packages\tensorflow_core\python\eager\function.py", li
capture_by_value=self._capture_by_value),
File "C:\Users\user\miniconda3\envs\user\lib\site-packages\tensorflow_core\python\framework\func_graph.p
func_outputs = python_func(*func_args, **func_kwargs)
File "C:\Users\user\miniconda3\envs\user\lib\site-packages\tensorflow_core\python\eager\def_function.py"
return weak_wrapped_fn().__wrapped__(*args, **kwds)
return layer_call(inputs, *args, **kwargs), layer.get_losses_for(inputs)
File "C:\Users\user\miniconda3\envs\user\lib\site-packages\classifiers\blstm_attention.py", line 43, in
call
logits = K.dot(h, self.w) # w^T h
File "C:\Users\user\miniconda3\envs\user\lib\site-packages\tensorflow_core\python\keras\backend.py", line 1653, in dot
if ndim(x) is not None and (ndim(x) > 2 or ndim(y) > 2):
File "C:\Users\user\miniconda3\envs\user\lib\site-packages\tensorflow_core\python\keras\backend.py", line 1202, in ndim
dims = x.shape._dims
AttributeError: 'list' object has no attribute 'shape'

Input_shape is None in custom layer

I'm building my own layer in Tensorflow 2.1 and using it in custom model. However when I'm trying to learn something, the layer is trying to build itself when called for the first time, and it needs input_shape to do it. As far as I know, it should compute it because it's getting an actual input, but it seems that input_size is None.
My question is: what I did wrong and how to correct that?
Below I'm attaching an example to reproduce the problem.
My code (MinimalRNNCell is copied from tensorflow website https://www.tensorflow.org/api_docs/python/tf/keras/layers/RNN):
import tensorflow as tf
from tensorflow.keras.layers import Layer
from tensorflow.keras import Model
import numpy as np
class MinimalRNNCell(Layer):
def __init__(self, units, **kwargs):
self.units = units
self.state_size = units
super(MinimalRNNCell, self).__init__(**kwargs)
def build(self, input_shape):
self.kernel = self.add_weight(shape=(input_shape[-1], self.units),
initializer='uniform',
name='kernel')
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units),
initializer='uniform',
name='recurrent_kernel')
self.built = True
def call(self, inputs, states):
prev_output = states[0]
h = K.dot(inputs, self.kernel)
output = h + K.dot(prev_output, self.recurrent_kernel)
return output, [output]
class RNNXModel(Model):
def __init__(self, size):
super(RNNXModel, self).__init__()
self.minimalrnn=MinimalRNNCell(size)
def call(self, inputs):
out=self.minimalrnn(input)
return out
x=np.array([[[1,2,3],[4,5,6],[7,8,9]],[[10,11,12],[13,14,15],[16,17,18]]])
y=np.array([[1,2,3],[10,11,12]])
model=RNNXModel(3)
model.compile(optimizer='sgd', loss='mse')
model.fit(x,y,epochs=10, batch_size=1)
Error I'm getting:
Traceback (most recent call last):
File "/home/.../test.py", line 64, in <module>
model.fit(x,y,epochs=10, batch_size=1)
File "/home/.../.venv/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/training.py", line 819, in fit
use_multiprocessing=use_multiprocessing)
File "/home/.../.venv/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/training_v2.py", line 235, in fit
use_multiprocessing=use_multiprocessing)
File "/home/.../.venv/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/training_v2.py", line 593, in _process_training_inputs
use_multiprocessing=use_multiprocessing)
File "/home/.../.venv/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/training_v2.py", line 646, in _process_inputs
x, y, sample_weight=sample_weights)
File "/home/.../.venv/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/training.py", line 2346, in _standardize_user_data
all_inputs, y_input, dict_inputs = self._build_model_with_inputs(x, y)
File "/home/.../.venv/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/training.py", line 2572, in _build_model_with_inputs
self._set_inputs(cast_inputs)
File "/home/.../.venv/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/training.py", line 2659, in _set_inputs
outputs = self(inputs, **kwargs)
File "/home/.../.venv/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/base_layer.py", line 773, in __call__
outputs = call_fn(cast_inputs, *args, **kwargs)
File "/home/.../.venv/lib/python3.6/site-packages/tensorflow_core/python/autograph/impl/api.py", line 237, in wrapper
raise e.ag_error_metadata.to_exception(e)
TypeError: in converted code:
/home/.../test.py:36 call *
out=self.minimalrnn(input)
/home/.../.venv/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/base_layer.py:818 __call__
self._maybe_build(inputs)
/home/.../.venv/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/base_layer.py:2116 _maybe_build
self.build(input_shapes)
/home/.../test.py:14 build
self.kernel = self.add_weight(shape=(input_shape[-1], self.units),
TypeError: 'NoneType' object is not subscriptable
There is a typo (input should be inputs). input is a built-in function (documentation).
class RNNXModel(Model):
def __init__(self, size):
super(RNNXModel, self).__init__()
self.minimalrnn=MinimalRNNCell(size)
def call(self, inputs):
out=self.minimalrnn(inputs) # changed from `input`
return out

Categories

Resources