Eager execution inside lambda layer in Tensorflow - python

I have Tensorflow 2.9.1 installed at my laptop, and according to the documentation the eager execution should be enabled by default. I have a problem while trying to convert Tensor object to Numpy Array inside a model. I keep getting 'Tensor' object has no attribute 'numpy'
I wanted to have some Lambda Layers inside my model and do some operations using Numpy, but the eager execution seems to be disabled inside the model. I tried to run tf.executing_eagerly() inside model and it returned false. On the otherhand, when I tried to run tf.executing_eagerly() outside the mode, I got true.
Could someone clear my confusion here?
import keras
import tensorflow as tf
from keras import layers, models
import numpy as np
import matplotlib.pyplot as plt
tf.config.run_functions_eagerly(True)
def do_something(input_tensor):
a = add_one(input_tensor.numpy)
b = minus_one(a)
c = tf.convert_to_tensor(b, dtype=tf.float32)
return c
def add_one(input):
return input + 1.0
def minus_one(input):
return input - 1.0
encoding_dim = 32
input_img = layers.Input(shape=(784,))
encoded = layers.Dense(encoding_dim, activation='relu')(input_img)
simulation_layer = layers.Lambda(do_something, name="channel_simulation")(encoded)
decoded = layers.Dense(784, activation='sigmoid')(encoded)
autoencoder = models.Model(input_img, decoded)
encoder = models.Model(input_img, encoded)
encoded_input = layers.Input(shape=(encoding_dim,))
decoder_layer = autoencoder.layers[-1]
decoder = models.Model(encoded_input, decoder_layer(encoded_input))
autoencoder.compile(optimizer='adam', loss='binary_crossentropy', run_eagerly=True)

Related

How to pass the input tensor of a model to a loss function?

My goal is to create a custom loss function that calculates the loss based on y_true, y_pred and the tensor of the models input layer:
import numpy as np
from tensorflow import keras as K
input_shape = (16, 16, 1)
input = K.layers.Input(input_shape)
dense = K.layers.Dense(16)(input)
output = K.layers.Dense(1)(dense)
model = K.Model(inputs=input, outputs=output)
def CustomLoss(y_true, y_pred):
return K.backend.sum(K.backend.abs(y_true - model.input * y_pred))
model.compile(loss=CustomLoss)
model.fit(np.ones(input_shape), np.zeros(input_shape))
However, this code fails with the following error message:
TypeError: Cannot convert a symbolic Keras input/output to a numpy array. This error may indicate that you're trying to pass a symbolic value to a NumPy call, which is not supported. Or, you may be trying to pass Keras symbolic inputs/outputs to a TF API that does not register dispatching, preventing Keras from automatically converting the API call to a lambda layer in the Functional Model.
How can I pass the input tensor of my model to the loss function?
Tensorflow Version: 2.4.1
Python Version: 3.8.8
You can use add_loss to pass external layers to your loss. Here an example:
import numpy as np
from tensorflow import keras as K
def CustomLoss(y_true, y_pred, input_l):
return K.backend.sum(K.backend.abs(y_true - input_l * y_pred))
input_shape = (16, 16, 1)
n_sample = 10
X = np.random.uniform(0,1, (n_sample,) + input_shape)
y = np.random.uniform(0,1, (n_sample,) + input_shape)
inp = K.layers.Input(input_shape)
dense = K.layers.Dense(16)(inp)
out = K.layers.Dense(1)(dense)
target = K.layers.Input(input_shape)
model = K.Model(inputs=[inp,target], outputs=out)
model.add_loss( CustomLoss( target, out, inp ) )
model.compile(loss=None, optimizer='adam')
model.fit(x=[X,y], y=None, epochs=3)
To use the model in inference mode (removing the target from inputs)
final_model = K.Model(model.input[0], model.output)
final_model.predict(X)

"Tensor is unhashable if Tensor equality is enabled. Instead, use tensor.experimental_ref() as the key" when running sess.run()

TypeError: Tensor is unhashable if Tensor equality is enabled. Instead, use tensor.experimental_ref() as the key.
... is being displayed when I run the code below.
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.keras.models import load_model
print(tf.__version__)
seed_num=1
data_path = 'Caltech-256/'
batch_size = 80 # the number of images to load per iteration
target_size=(64,64) # pixel size of each image
num_pixels_and_channels = (64,64,3) # pixels and channels
input_shape = (1,64,64,3)
layer = 1
feature = 0
val_data_gen_aug_rotate = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1./255,
validation_split=0.1)
val_img = val_data_gen_aug_rotate.flow_from_directory(data_path,
subset='validation',
color_mode='rgb',
target_size=target_size,
batch_size=batch_size,
class_mode='categorical',
seed=seed_num)
sample_imgs_val, sample_labels_val = next(val_img)
model = load_model("Models/ex_13_epoch_4_3563_336.h5")
sess = tf.compat.v1.Session()
input_layer = model.layers[0].input
output_layer = model.layers[layer].output
outputs = sess.run(output_layer, feed_dict = {input_layer : sample_imgs_val})
The issue is with the code outputs = sess.run(output_layer, feed_dict = {input_layer : sample_imgs_val}). What is causing the error and how can it be fixed?
I'm using tensorflow version 2.1.0 on a CPU via a Jupyter Notebook.
The error is due to version.
You are trying to use Tensorflow 1.x, which works in graph mode whereas TensorFlow 2.x works in eager mode.

Keras functional API and TensorFlow Hub

I'm trying to use a Universal Sentence Encoder from TF Hub as a keras layer in a functional way. I would like to use hub.KerasLayer with Keras Functional API, but i'm not sure how to achieve that, so far I've only seen exmaples of hub.KerasLayer with the Sequential API
import tensorflow_hub as hub
import tensorflow as tf
from tensorflow.keras import layers
import tf_sentencepiece
use_url = 'https://tfhub.dev/google/universal-sentence-encoder-multilingual-large/1'
english_sentences = ["dog", "Puppies are nice.", "I enjoy taking long walks along the beach with my dog."]
english_sentences = np.array(english_sentences, dtype=object)[:, np.newaxis]
seq = layers.Input(shape=(None, ), name='sentence', dtype=tf.string)
module = hub.KerasLayer(hub.Module(use_url))(seq)
model = tf.keras.models.Model(inputs=[seq], outputs=[module])
model.summary()
x = model.predict(english_sentences)
print(x)
the code above runs into this error when passing the input layer to the embedding: TypeError: Can't convert 'inputs': Shape TensorShape([Dimension(None), Dimension(None)]) is incompatible with TensorShape([Dimension(None)])
Is it possible to use hub.KerasLayer with keras functional API in TensorFlow 1.x? if it can be done, how?
Try This
sentence_encoding_layer = hub.KerasLayer("https://tfhub.dev/google/universal-sentence-encoder/4",
trainable=False,
input_shape = [],
dtype = tf.string,
name = 'U.S.E')
inputs = tf.keras.layers.Input(shape = (), dtype = 'string',name = 'input_layer')
x = sentence_encoding_layer(inputs)
x = tf.keras.layers.Dense(64,activation = 'relu')(x)
outputs = tf.keras.layers.Dense(1,activation = 'sigmoid',name = 'output_layer')(x)
model = tf.keras.Model(inputs,outputs,name = 'Transfer_learning_USE')
model.summary()
model.predict([sentence])
If you use v3 of the same universal sentence encoder with tf 1.15, you can do such thing by replacing lines from
import tf_sentencepiece
use_url = 'https://tfhub.dev/google/universal-sentence-encoder-multilingual-large/1'
module = hub.KerasLayer(hub.Module(use_url))(seq)
to
import tensorflow_text
use_url = 'https://tfhub.dev/google/universal-sentence-encoder-multilingual-large/3'
module = hub.KerasLayer(use_url)(seq)
First shape is what you are passing into the model, Shape TensorShape([Dimension(None), Dimension(None)]). Second shape is what you are expecting, TensorShape([Dimension(None)]). So in this error, its telling you it expecting a shape of ()...
Or
If you are expecting to do batches of text, perhaps do TimeDistributed layer, like so...
module = tf.keras.layers.TimeDistributed(hub.KerasLayer(hub.Module(use_url)))(seq)
However you maybe forced to do specific size for text length...

Keras (Tensorflow) - name array_ops not defined

I'm having an issue with Keras/Tensorflow deserializing a model. Basically this is an implementation of a convolutional neural network on text, which requires a dimension to be added at an early stage. The error message is this:
File
"/usr/lib/python3.6/site-packages/tensorflow/python/keras/_impl/keras/backend.py",
line 2231, in expand_dims NameError: name 'array_ops' is not defined
The code causing this error message:
import numpy as np
from docopt import docopt
import tensorflow as tf
from vdcnn import utils
if __name__ == '__main__':
arguments = docopt(__doc__, version='1.0')
model = tf.keras.models.load_model(arguments["--checkpoint"])
print(type(model))
proc = utils.Preprocessor(padding_size=256)
data, labels, test_data, test_labels = proc.process_document(arguments["--data"])
for i in range(len(test_data)):
test_vec = test_data[i]
prediction = model.predict(x=test_vec[np.newaxis])
predlabel = utils.labels_in_order[np.argmax(prediction)]
truthlabel = utils.labels_in_order[np.argmax(test_labels[i])]
print("Truth: {} \t Predicted: {}".format(truthlabel, predlabel)
The code that calls "expand_dims" uses a Keras Lambda wrapper around the Tensorflow function:
...
inputs = tf.keras.Input(shape=(self.sequence_max_length,), dtype='int32', name='inputs')
embedding = tf.keras.layers.Embedding(self.num_quantized_chars, self.embedding_size, input_length=self.sequence_max_length)(inputs)
embedding = tf.keras.layers.Lambda(tf.expand_dims, arguments={'axis' : -1, 'name' : 'embedding_expanded'})(embedding)
conv0 = tf.keras.layers.Conv2D(filters=64, kernel_size=3, strides=[1, self.embedding_size], padding='same', kernel_initializer='he_normal')(embedding)
conv0 = tf.keras.layers.Activation('relu')(conv0)
...
And, just for kicks, the line it's referencing in the tensorflow libs:
from tensorflow.python.ops import array_ops
[two thousand lines of crap]
def expand_dims(x, axis=-1):
"""Adds a 1-sized dimension at index "axis".
Arguments:
x: A tensor or variable.
axis: Position where to add a new axis.
Returns:
A tensor with expanded dimensions.
"""
return array_ops.expand_dims(x, axis)
I'm using Python 3.6 and Tensorflow 1.5, and this error occurs on both OSX 10.11.6 and RHEL 7. I've tried using various permutations of tf.keras, tf.keras.backend, just keras without tf, and all of it basically calls the exact same code, although sometimes it complains about "gen_array_ops" instead of "array_ops" with the same problem.
Anyone have any thoughts?
The issue was this: https://github.com/keras-team/keras/issues/8123#issuecomment-354857044
On top of that, it required reinstalling everything on all machines and using keras directly instead of tf.keras to get the proper error message, apparently because of how Keras uses object serialization and the way Python "tracebacks" work.

How to show TensorBoard's CPU/memory usage (RunMetadata) for Keras

I want to view the CPU/memory usage in TensorBoard with Keras.
For this purpose, I need to execute the method of add_run_metadata.
But I cannot found the way to pass the add_run_metadata method in Keras's TensorBoard callback.
Is this good way to implement CPU/memory usage for Keras.
Reference
See following "Runtime Statistics" in TensorFlow
https://www.tensorflow.org/programmers_guide/graph_viz
add_run_metadata is defined in following location (in TensorFlow)
https://github.com/tensorflow/tensorflow/blob/v1.5.0/tensorflow/python/summary/writer/writer.py#L248
TensorBoard callback in Keras is defined here
https://github.com/keras-team/keras/blob/2.1.3/keras/callbacks.py#L587
EDIT: I encountered the same problem. I'm editing to share how I attempted to approch this.
I changed the keras source for: callbacks.py, and replaced this line in on_epoch_end() with -
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
result = self.sess.run([self.merged], feed_dict=feed_dict, options=run_options, run_metadata=run_metadata)
self.writer.add_run_metadata(run_metadata, 'epoch%d_step%d' % (epoch, i))
However I end up with the following error:
...\tensorflow\stream_executor\dso_loader.cc:141] Couldn't open CUDA library cupti64_90.dll
...\tensorflow/stream_executor/lib/statusor.h:212] Non-OK-status: status_ status: Failed precondition: could not dlopen DSO: cupti64_90.dll; dlerror: cupti64_90.dll not found
Which is puzzling to me as it seems to be related to the proper installation of cuda and not related in any obvious (to me) way to the change I made.
I'm using keras version 2.1.6 and tensorflow version 1.6.0
The solution is to run the Keras model in a TF session, and is based on the blog post: keras-as-a-simplified-interface-to-tensorflow#calling-keras-layers-on-tensorflow-tensors. Bellow is a detailed full and minimal working example.
First of all, dummy generation of data:
data.py
import numpy as np
def load_data(n = 1000):
x = np.random.rand(n, 100)
y = np.sum(x, axis=1, keepdims=True)
return x, y
The core idea is to run the model in TF session, so the main code is pure TF, and only the model itself is defined with Keras. For this to work (following the above mentioned tutorial):
The model needs to be built on top of a tf.placeholder, instead of the keras.layers.Input.
Remain as a tensor, and not compiled into a keras.models.Model.
from keras.layers import Dense
model.py
def load_network(input_tensor):
x = Dense(100, activation='relu')(input_tensor)
x = Dense(100, activation='relu')(x)
x = Dense(1, activation='sigmoid')(x)
return x
And the TF session that runs the keras model (a clean, but full, version of the TensorBoard tutorial):
run_runtime_stats.py
import tensorflow as tf
sess = tf.Session()
from keras import backend as K
from keras.objectives import mean_squared_error
K.set_session(sess)
from model import load_network
from data import load_data
# load your keras model as a tf.Tensor
input = tf.placeholder(tf.float32, shape=(None, 100)) # is passed as input to our keras layers
labels = tf.placeholder(tf.float32, shape=(None, 1))
net = load_network(input) # type(net) == tf.Tensor
loss = tf.reduce_mean(mean_squared_error(labels, net))
opt = tf.train.GradientDescentOptimizer(0.1).minimize(loss)
writer = tf.summary.FileWriter(r'./logs', sess.graph)
sess.run(tf.global_variables_initializer())
with sess.as_default():
x, y = load_data(64)
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
sess.run([opt],
feed_dict={input: x, labels: y},
options=run_options,
run_metadata=run_metadata)
writer.add_run_metadata(run_metadata, 'runtime-statistics')
writer.close()

Categories

Resources