Tensorflow 2 eager execution disabled inside a custom layer - python

I'm using TF2 installed via pip in a ubuntu 18.04 box
$ pip freeze | grep "tensorflow"
tensorflow==2.0.0
tensorflow-estimator==2.0.1
And I'm playing with a custom layer.
import tensorflow as tf
from tensorflow.keras.preprocessing import sequence
from tensorflow.keras.layers import Input, Concatenate, Dense, Bidirectional, LSTM, Embedding
from tensorflow.keras.models import Model
from tensorflow.keras.datasets import imdb
class Attention(tf.keras.layers.Layer):
def __init__(self, units):
super(Attention, self).__init__()
self.W1 = Dense(units)
self.W2 = Dense(units)
self.V = Dense(1)
def call(self, features, hidden):
hidden_with_time_axis = tf.expand_dims(hidden, 1)
score = tf.nn.tanh(self.W1(features) + self.W2(hidden_with_time_axis))
attention_weights = tf.nn.softmax(self.V(score), axis=1)
context_vector = attention_weights * features
context_vector = tf.reduce_sum(context_vector, axis=1)
return context_vector, attention_weights
vocab_size = 10000
(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=vocab_size)
max_len = 200
rnn_cell_size = 128
x_train = sequence.pad_sequences(x_train, maxlen=max_len, padding='post')
x_test = sequence.pad_sequences(x_test, maxlen=max_len, truncating='post', padding='post')
# Network
sequence_input = Input(shape=(max_len,), dtype='int32')
embedded_sequences = Embedding(vocab_size, 128, input_length=max_len)(sequence_input)
# lstm = Bidirectional(LSTM(rnn_cell_size, dropout=0.3, return_sequences=True, return_state=True), name="bi_lstm_0")(embedded_sequences)
lstm, forward_h, forward_c, backward_h, backward_c = Bidirectional(LSTM(rnn_cell_size, dropout=0.2, return_sequences=True, return_state=True))(embedded_sequences)
state_h = Concatenate()([forward_h, backward_h])
state_c = Concatenate()([forward_c, backward_c])
attention = Attention(8)
context_vector, attention_weights = attention(lstm, state_h)
output = Dense(1, activation='sigmoid')(context_vector)
model = Model(inputs=sequence_input, outputs=output)
# summarize layers
print(model.summary())
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
history = model.fit(x_train, y_train, epochs=10, batch_size=200, validation_split=.3, verbose=1)
result = model.evaluate(x_test, y_test)
print(result)
I would like to debug/inspect the Attention.call() function, but I'm not able to get the tensors values when a set a breakpoint inside the funcion.
Before I start the .fit(), I can verify that the eager execution is Enabled
print(tf.executing_eagerly())
True
But inside the Attention.call() function the eager execution is Disabled
print(tf.executing_eagerly())
False
Any reason for the eager execution be false during the call() execution ? How to enable it ?

By default, tf.keras model is compiled to a static graph to deliver the best execution performance. Just think that #tf.function is by default annotated for tf.keras model.
https://www.tensorflow.org/api_docs/python/tf/keras/Model#run_eagerly
To enable eager mode explicitly for tf.keras model, in your code, compile the model with run_eagerly=True.
model.compile(optimizer='adam', run_eagerly = True, loss='binary_crossentropy', metrics=['accuracy'])

Related

Keras_tuner TypeError('Inputs to a layer should be tensors... )

I was able to finally get the below code, up to the keras_tuner, I mention this as I'm not exactly sure I'm even working out the class structure correctly. I'm doing all of this so as to try put in my own values for "input_size" and "output_size" for the input and output size of the deep layers.
but then I get the error:
raise TypeError('Inputs to a layer should be tensors. Got: %s' % (x,))
TypeError: Inputs to a layer should be tensors. Got: <keras_tuner.engine.hyperparameters.HyperParameters object at 0x000001650C5DCE50>
I don't know how to fix this and why it would even be an issue as everything before it doesn't throw an error.
from tensorflow import keras
from tensorflow.keras import layers, Sequential
from tensorflow.keras.layers import Dense, Dropout
import keras_tuner
from kerastuner import HyperModel
class CNNHyperModel(HyperModel):
def call_existing_code(self,units, activation, dropout, lr, layers, optimizer, loss, input_size, output_size):
model = Sequential()
model.add(Dense(units=units, input_dim=input_size, activation=activation))
for i in range(layers):
model.add(Dense(units=units, activation=activation))
if dropout:
model.add(Dropout(rate=0.25))
model.add(Dense(output_size, activation=activation))
# model.add(Dense(10, activation="softmax"))
model.compile(
optimizer=optimizer,
loss=loss,
metrics=["accuracy"],
)
return model
def build_model(test, input_size, output_size):
hp = keras_tuner.HyperParameters()
units = hp.Int("units", min_value=32, max_value=512, step=32)
activation = hp.Choice("activation", ["relu", "tanh"])
dropout = hp.Boolean("dropout")
layers = hp.Int('layers', 2, 6)
lr = hp.Float("lr", min_value=1e-4, max_value=1e-2, sampling="log")
loss = hp.Choice("loss", ['sparse_categorical_crossentropy', 'categorical_crossentropy'])
optimizer = hp.Choice("optimizer", ["adam", "RMSProp"])
# call existing model-building code with the hyperparameter values.
model = test.call_existing_code(
units=units, activation=activation, dropout=dropout, lr=lr, layers=layers, optimizer=optimizer, loss=loss, \
input_size=input_size, output_size=output_size)
return model
input_size = 11
output_size = 8
Testclass = CNNHyperModel(keras_tuner.HyperParameters())
built_model = Testclass.build_model(input_size, output_size)
tuner = keras_tuner.RandomSearch(
hypermodel=built_model,
objective="val_accuracy",
max_trials=3,
executions_per_trial=2,
overwrite=True,
directory="my_dir",
)
tuner.search_space_summary()

Keep getting NaNs value for scoring when tuning on KerasRegressor

I am trying to tune hyperparameter on the KerasRegressor
However, i only get the result of NaN's which is shown below, may i know what cause the issue?
everything works fine when i try to compile my model... but the scoring for the best parameters it always show NaNs, metrics that i used is RMSE
code snippet at below:
def create_model(optimizer,activation,lstm_unit_1,lstm_unit_2,lstm_unit_3, init='glorot_uniform'):
model = Sequential()
model.add(Conv1D(lstm_unit_1, kernel_size=1, activation=activation, input_shape = (trainX.shape[1], trainX.shape[2])))
model.add(GRU(lstm_unit_2, activation = activation, return_sequences=True, input_shape = (trainX.shape[1], trainX.shape[2])))
model.add(GRU(lstm_unit_3, activation = activation, return_sequences=True, input_shape = (trainX.shape[1], trainX.shape[2])))
model.add(Dense(units = 1))
model.add(Flatten())
model.compile(optimizer = optimizer, loss = 'mse', metrics = ['mean_squared_error'])
return model
model = tf.keras.wrappers.scikit_learn.KerasRegressor(build_fn = create_model,
epochs = 150,
verbose=False)
batch_size = [16,32,64,128]
lstm_unit_1 = [128,256,512]
lstm_unit_2 = lstm_unit_1.copy()
lstm_unit_3 = lstm_unit_1.copy()
optimizer = ['SGD','Adam','Adamax','RMSprop']
activation = ['relu','linear','sigmoid',]
param_grid = dict(lstm_unit_1=lstm_unit_1,
lstm_unit_2=lstm_unit_2,
lstm_unit_3=lstm_unit_3,
optimizer=optimizer,
activation=activation,
batch_size = batch_size)
warnings.filterwarnings("ignore")
random = RandomizedSearchCV(estimator=model, param_distributions=param_grid, n_jobs=-1, scoring='neg_mean_squared_error')
random_result = random.fit(trainX,trainY)
print(random_result.best_score_)
print(random_result.best_params_)

how to log KerasClassifier model in a sklearn pipeline mlflow?

I have a set of pre-processing stages in sklearn Pipeline and an estimator which is a KerasClassifier (from tensorflow.keras.wrappers.scikit_learn import KerasClassifier).
My overall goal is to tune and log the whole sklearn pipeline in mlflow (in databricks evn). I get a confusing type error which I can't figure out how to reslove:
TypeError: can't pickle _thread.RLock objects
I have the following code (without tuning stage) which returns the above error:
conda_env = _mlflow_conda_env(
additional_conda_deps=None,
additional_pip_deps=[
"cloudpickle=={}".format(cloudpickle.__version__),
"scikit-learn=={}".format(sklearn.__version__),
"numpy=={}".format(np.__version__),
"tensorflow=={}".format(tf.__version__),
],
additional_conda_channels=None,
)
search_space = {
"estimator__dense_l1": 20,
"estimator__dense_l2": 20,
"estimator__learning_rate": 0.1,
"estimator__optimizer": "Adam",
}
def create_model(n):
model = Sequential()
model.add(Dense(int(n["estimator__dense_l1"]), activation="relu"))
model.add(Dense(int(n["estimator__dense_l2"]), activation="relu"))
model.add(Dense(1, activation="sigmoid"))
model.compile(
loss="binary_crossentropy",
optimizer=n["estimator__optimizer"],
metrics=["accuracy"],
)
return model
mlflow.sklearn.autolog()
with mlflow.start_run(nested=True) as run:
classfier = KerasClassifier(build_fn=create_model, n=search_space)
# fit the pipeline
clf = Pipeline(steps=[("preprocessor", preprocessor),
("estimator", classfier)])
h = clf.fit(
X_train,
y_train.values,
estimator__validation_split=0.2,
estimator__epochs=10,
estimator__verbose=2,
)
# log scores
acc_score = clf.score(X=X_test, y=y_test)
mlflow.log_metric("accuracy", acc_score)
signature = infer_signature(X_test, clf.predict(X_test))
# Log the model with a signature that defines the schema of the model's inputs and outputs.
mlflow.sklearn.log_model(
sk_model=clf, artifact_path="model",
signature=signature,
conda_env=conda_env
)
I also get this warning before the error:
WARNING mlflow.sklearn.utils: Truncated the value of the key `steps`. Truncated value: `[('preprocessor', ColumnTransformer(n_jobs=None, remainder='drop', sparse_threshold=0.3,
transformer_weights=None,
transformers=[('num',
Pipeline(memory=None,
note the the whole pipeline runs outside mlflow.
can someone help?
I think I find sort of a workaround/solution for this for now, but I think this issue needs to be addressed in MLFloow anyways.
What I did is not the best way probably.
I used a python package called scikeras that does this wrapping and then could log the model
The code:
import scikeras
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Input, Dense, Dropout, LSTM, Flatten, Activation
from scikeras.wrappers import KerasClassifier
class ModelWrapper(mlflow.pyfunc.PythonModel):
def __init__(self, model):
self.model = model
def predict(self, context, model_input):
return self.model.predict(model_input)
conda_env = _mlflow_conda_env(
additional_conda_deps=None,
additional_pip_deps=[
"cloudpickle=={}".format(cloudpickle.__version__),
"scikit-learn=={}".format(sklearn.__version__),
"numpy=={}".format(np.__version__),
"tensorflow=={}".format(tf.__version__),
"scikeras=={}".format(scikeras.__version__),
],
additional_conda_channels=None,
)
param = {
"dense_l1": 20,
"dense_l2": 20,
"optimizer__learning_rate": 0.1,
"optimizer": "Adam",
"loss":"binary_crossentropy",
}
def create_model(dense_l1, dense_l2, meta):
n_features_in_ = meta["n_features_in_"]
X_shape_ = meta["X_shape_"]
n_classes_ = meta["n_classes_"]
model = Sequential()
model.add(Dense(n_features_in_, input_shape=X_shape_[1:], activation="relu"))
model.add(Dense(dense_l1, activation="relu"))
model.add(Dense(dense_l2, activation="relu"))
model.add(Dense(1, activation="sigmoid"))
return model
mlflow.sklearn.autolog()
with mlflow.start_run(run_name="sample_run"):
classfier = KerasClassifier(
create_model,
loss=param["loss"],
dense_l1=param["dense_l1"],
dense_l2=param["dense_l2"],
optimizer__learning_rate = param["optimizer__learning_rate"],
optimizer= param["optimizer"],
)
# fit the pipeline
clf = Pipeline(steps=[('preprocessor', preprocessor),
('estimator', classfier)])
h = clf.fit(X_train, y_train.values)
# log scores
acc_score = clf.score(X=X_test, y=y_test)
mlflow.log_metric("accuracy", acc_score)
signature = infer_signature(X_test, clf.predict(X_test))
model_nn = ModelWrapper(clf,)
mlflow.pyfunc.log_model(
python_model= model_nn,
artifact_path = "model",
signature = signature,
conda_env = conda_env
)

tf.train.Checkpoint is restoring or not?

I am running tensorflow 2.4 on colab. I tried to save the model using tf.train.Checkpoint() since it includes model subclassing, but after restoration I saw It didn't restored any weights of my model.
Here are few snippets:
### From tensorflow tutorial nmt_with_attention
class Encoder(tf.keras.Model):
def __init__(self, vocab_size, embedding_dim, enc_units, batch_sz):
...
self.gru = tf.keras.layers.GRU(self.enc_units,
return_sequences=True,
return_state=True,
recurrent_initializer='glorot_uniform')
.
.
.
class NMT_Train(tf.keras.Model):
def __init__(self, inp_vocab_size, tar_vocab_size, max_length_inp, max_length_tar, emb_dims, units, batch_size, source_tokenizer, target_tokenizer):
super(NMT_Train, self).__init__()
self.encoder = Encoder(inp_vocab_size, emb_dims, units, batch_size)
...
.
.
.
model = NMT_Train(INP_VOCAB, TAR_VOCAB, MAXLEN, MAXLEN, EMB_DIMS, UNITS, BATCH_SIZE, english_tokenizer, hindi_tokenizer)
model.compile(optimizer = tf.keras.optimizers.Adam(),
loss_object = tf.keras.losses.SparseCategoricalCrossentropy(from_logits= True))
model.fit(dataset, epochs=2)
checkpoint = tf.train.Checkpoint(model = model)
manager = tf.train.CheckpointManager(checkpoint, './ckpts', max_to_keep=1)
manager.save()
model.encoder.gru.get_weights() ### get the output
##[array([[-0.0627057 , 0.05900152, 0.06614069, ...
model.optimizer.get_weights() ### get the output
##[90, array([[ 6.6851695e-05, -4.6736805e-06, -2.3183979e-05, ...
When I later restored it I didn't get any gru weights:
model = NMT_Train(INP_VOCAB, TAR_VOCAB, MAXLEN, MAXLEN, EMB_DIMS, UNITS, BATCH_SIZE, english_tokenizer, hindi_tokenizer)
model.compile(optimizer = tf.keras.optimizers.Adam(),
loss_object = tf.keras.losses.SparseCategoricalCrossentropy(from_logits= True))
checkpoint = tf.train.Checkpoint(model = model)
manager = tf.train.CheckpointManager(checkpoint, './ckpts', max_to_keep=1)
manager.restore_or_initialize()
model.encoder.gru.get_weights() ### empty list
## []
model.optimizer.get_weights() ### empty list
## []
I also tried checkpoint.restore(manager.latest_checkpoint) but nothing changed.
Is there any thing wrong I am doing?? Or suggest any other way around to save the model so that I can retrain it for further epochs.
You are defining a keras model, so why do not use keras model chekpoints?
From Keras documentation:
model.compile(loss=..., optimizer=...,
metrics=['accuracy'])
EPOCHS = 10
checkpoint_filepath = '/tmp/checkpoint'
model_checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(
filepath=checkpoint_filepath,
save_weights_only=True,
monitor='val_accuracy',
mode='max',
save_best_only=True)
# Model weights are saved at the end of every epoch, if it's the best seen
# so far.
model.fit(epochs=EPOCHS, callbacks=[model_checkpoint_callback])
# The model weights (that are considered the best) are loaded into the model.
model.load_weights(checkpoint_filepath)

How to call functions of the class Sequential() if those are not in the source code?

I am quite new to machine learning and I am trying to implement my custom layer in keras. I found a couple of tutorials and it seems comparatively straight forward. What I do not understand, though, is how to implement my new custom layer in Sequential(). See for example this classification problem that I took from the tensorflow website(https://www.tensorflow.org/tutorials/keras/basic_text_classification), posted here for your convenience:
from __future__ import absolute_import, division, print_function
import tensorflow as tf
from tensorflow import keras
import numpy as np
imdb = keras.datasets.imdb
(train_data, train_labels), (test_data, test_labels) = imdb.load_data(num_words=10000)
# A dictionary mapping words to an integer index
word_index = imdb.get_word_index()
# The first indices are reserved
word_index = {k:(v+3) for k,v in word_index.items()}
word_index["<PAD>"] = 0
word_index["<START>"] = 1
word_index["<UNK>"] = 2 # unknown
word_index["<UNUSED>"] = 3
reverse_word_index = dict([(value, key) for (key, value) in word_index.items()])
def decode_review(text):
return ' '.join([reverse_word_index.get(i, '?') for i in text])
train_data = keras.preprocessing.sequence.pad_sequences(train_data,
value=word_index["<PAD>"],
padding='post',
maxlen=256)
test_data = keras.preprocessing.sequence.pad_sequences(test_data,
value=word_index["<PAD>"],
padding='post',
maxlen=256)
# input shape is the vocabulary count used for the movie reviews (10,000 words)
vocab_size = 10000
model = keras.Sequential()
model.add(keras.layers.Embedding(vocab_size, 16))
model.add(keras.layers.GlobalAveragePooling1D())
model.add(keras.layers.Dense(16, activation=tf.nn.relu))
model.add(keras.layers.Dense(1, activation=tf.nn.sigmoid))
model.summary()
model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['acc'])
x_val = train_data[:10000]
partial_x_train = train_data[10000:]
y_val = train_labels[:10000]
partial_y_train = train_labels[10000:]
history = model.fit(partial_x_train,
partial_y_train,
epochs=40,
batch_size=512,
validation_data=(x_val, y_val),
verbose=1)
results = model.evaluate(test_data, test_labels)
print(results)
Do I have to change the source code for keras.Sequential() or is there an easy way?
Furthermore, looking at the source code for the class Sequential() made me wonder: I can't figure out how functions like 'summary()','compile()', 'fit()' and 'evaluate()' can be called if those are not even provided in the source code in this class. Here is the source code for Sequential():
https://github.com/keras-team/keras/blob/a1397169ddf8595736c01fcea084c8e34e1a3884/keras/engine/sequential.py
Sequential is a Model, and not a layer.
The functions you mentioned (summary, compile, fit, evaluate) are implemented in the Model class linked here, as Sequential is a subclass of Model.
If you're writing a custom layer, you should be subclassing Layer instead, and not Model or Sequential.
You would need to implement build, call, and compute_output_shape to create your own layer.
There's a few examples on the Keras documentation:
from keras import backend as K
from keras.layers import Layer
class MyLayer(Layer):
def __init__(self, output_dim, **kwargs):
self.output_dim = output_dim
super(MyLayer, self).__init__(**kwargs)
def build(self, input_shape):
# Create a trainable weight variable for this layer.
self.kernel = self.add_weight(name='kernel',
shape=(input_shape[1], self.output_dim),
initializer='uniform',
trainable=True)
super(MyLayer, self).build(input_shape) # Be sure to call this at the end
def call(self, x):
return K.dot(x, self.kernel)
def compute_output_shape(self, input_shape):
return (input_shape[0], self.output_dim)
To use it, import the MyLayer class from whichever file you put it in, and then add it like the default Keras layers:
from custom.layers import MyLayer
model = keras.Sequential()
model.add(MyLayer())

Categories

Resources