How to evaluate an output label in tensorflow trained model - python

I have the following tensorflow model:\
def build_model():
model = keras.Sequential([
Dense(20, activation=tf.nn.relu, input_shape=[len(all_data[0])]),
Dense(20, activation=tf.nn.relu, input_shape=[20]),
Dense(20, activation=tf.nn.relu, input_shape=[20]),
Dense(1, activation=tf.nn.sigmoid)
])
model.compile(
optimizer=tf.keras.optimizers.Adam(),
loss=tf.keras.losses.binary_crossentropy,
metrics=[
tf.keras.metrics.BinaryAccuracy(name='accuracy')
]
)
return model
And I have trained the model using the following\
model = build_model()
history = model.fit(all_data, all_labels, epochs=1000)
I would like to evaluate the output on an input [0,33,1,0].
How to do this?

Related

Loss does not change during training of my model

I want to predict a time series using cnn-lstm model.This is my model:
def generate_model():
model = keras.models.Sequential([
Conv1D(64, 3, padding='causal', activation='relu', input_shape=(24, 20)),
BatchNormalization(),
Conv1D(64, 3, padding='causal', activation='relu'),
BatchNormalization(),
Conv1D(32, 3, padding='causal', activation='relu'),
MaxPool1D(3),
LSTM(100, dropout=0.2, return_sequences=True),
LSTM(50, dropout=0.3),
Dense(1, activation='relu')
])
model.compile(optimizer=tf.keras.optimizers.Adam(),
loss='mean_squared_error',
metrics=[tf.keras.metrics.MeanAbsoluteError(), tf.keras.metrics.RootMeanSquaredError(), RSquare()])
return model
Then I use this line of code to train my model:
history1 = model1.fit(X1_train, y1_train, epochs=200, batch_size=32, validation_data=(X1_test, y1_test), verbose=2, callbacks=callbacks)
But values of loss and metrics stays the same and does not change. This is how they look.
These are my callbacks, just in case:
from keras.callbacks import LearningRateScheduler
def decay_schedule(epoch, lr):
lr = lr - 0.0001
return lr
lr_scheduler = LearningRateScheduler(decay_schedule)
callback = tf.keras.callbacks.EarlyStopping(monitor='val_loss', mode='max', min_delta=1e-3, patience=50)
callbacks=[lr_scheduler, callback]
Thank you in advance.

Keras_tuner TypeError('Inputs to a layer should be tensors... )

I was able to finally get the below code, up to the keras_tuner, I mention this as I'm not exactly sure I'm even working out the class structure correctly. I'm doing all of this so as to try put in my own values for "input_size" and "output_size" for the input and output size of the deep layers.
but then I get the error:
raise TypeError('Inputs to a layer should be tensors. Got: %s' % (x,))
TypeError: Inputs to a layer should be tensors. Got: <keras_tuner.engine.hyperparameters.HyperParameters object at 0x000001650C5DCE50>
I don't know how to fix this and why it would even be an issue as everything before it doesn't throw an error.
from tensorflow import keras
from tensorflow.keras import layers, Sequential
from tensorflow.keras.layers import Dense, Dropout
import keras_tuner
from kerastuner import HyperModel
class CNNHyperModel(HyperModel):
def call_existing_code(self,units, activation, dropout, lr, layers, optimizer, loss, input_size, output_size):
model = Sequential()
model.add(Dense(units=units, input_dim=input_size, activation=activation))
for i in range(layers):
model.add(Dense(units=units, activation=activation))
if dropout:
model.add(Dropout(rate=0.25))
model.add(Dense(output_size, activation=activation))
# model.add(Dense(10, activation="softmax"))
model.compile(
optimizer=optimizer,
loss=loss,
metrics=["accuracy"],
)
return model
def build_model(test, input_size, output_size):
hp = keras_tuner.HyperParameters()
units = hp.Int("units", min_value=32, max_value=512, step=32)
activation = hp.Choice("activation", ["relu", "tanh"])
dropout = hp.Boolean("dropout")
layers = hp.Int('layers', 2, 6)
lr = hp.Float("lr", min_value=1e-4, max_value=1e-2, sampling="log")
loss = hp.Choice("loss", ['sparse_categorical_crossentropy', 'categorical_crossentropy'])
optimizer = hp.Choice("optimizer", ["adam", "RMSProp"])
# call existing model-building code with the hyperparameter values.
model = test.call_existing_code(
units=units, activation=activation, dropout=dropout, lr=lr, layers=layers, optimizer=optimizer, loss=loss, \
input_size=input_size, output_size=output_size)
return model
input_size = 11
output_size = 8
Testclass = CNNHyperModel(keras_tuner.HyperParameters())
built_model = Testclass.build_model(input_size, output_size)
tuner = keras_tuner.RandomSearch(
hypermodel=built_model,
objective="val_accuracy",
max_trials=3,
executions_per_trial=2,
overwrite=True,
directory="my_dir",
)
tuner.search_space_summary()

Keep getting NaNs value for scoring when tuning on KerasRegressor

I am trying to tune hyperparameter on the KerasRegressor
However, i only get the result of NaN's which is shown below, may i know what cause the issue?
everything works fine when i try to compile my model... but the scoring for the best parameters it always show NaNs, metrics that i used is RMSE
code snippet at below:
def create_model(optimizer,activation,lstm_unit_1,lstm_unit_2,lstm_unit_3, init='glorot_uniform'):
model = Sequential()
model.add(Conv1D(lstm_unit_1, kernel_size=1, activation=activation, input_shape = (trainX.shape[1], trainX.shape[2])))
model.add(GRU(lstm_unit_2, activation = activation, return_sequences=True, input_shape = (trainX.shape[1], trainX.shape[2])))
model.add(GRU(lstm_unit_3, activation = activation, return_sequences=True, input_shape = (trainX.shape[1], trainX.shape[2])))
model.add(Dense(units = 1))
model.add(Flatten())
model.compile(optimizer = optimizer, loss = 'mse', metrics = ['mean_squared_error'])
return model
model = tf.keras.wrappers.scikit_learn.KerasRegressor(build_fn = create_model,
epochs = 150,
verbose=False)
batch_size = [16,32,64,128]
lstm_unit_1 = [128,256,512]
lstm_unit_2 = lstm_unit_1.copy()
lstm_unit_3 = lstm_unit_1.copy()
optimizer = ['SGD','Adam','Adamax','RMSprop']
activation = ['relu','linear','sigmoid',]
param_grid = dict(lstm_unit_1=lstm_unit_1,
lstm_unit_2=lstm_unit_2,
lstm_unit_3=lstm_unit_3,
optimizer=optimizer,
activation=activation,
batch_size = batch_size)
warnings.filterwarnings("ignore")
random = RandomizedSearchCV(estimator=model, param_distributions=param_grid, n_jobs=-1, scoring='neg_mean_squared_error')
random_result = random.fit(trainX,trainY)
print(random_result.best_score_)
print(random_result.best_params_)

Tensorflow Keras multiple input model

I need to adapt this model for two text columns input (instead one column)
tfhub_handle_encoder = \
"https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-4_H-512_A-8/1"
tfhub_handle_preprocess = \
"https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3"
def build_classifier_model():
text_input = tf.keras.layers.Input(
shape=(), dtype=tf.string, name='text')
preprocessing_layer = hub.KerasLayer(
tfhub_handle_preprocess, name='preprocessing')
encoder_inputs = preprocessing_layer(text_input)
encoder = hub.KerasLayer(
tfhub_handle_encoder, trainable=True, name='BERT_encoder')
outputs = encoder(encoder_inputs)
net = outputs['pooled_output']
net = tf.keras.layers.Dropout(0.1)(net)
net = tf.keras.layers.Dense(
6, activation='softmax', name='classifier')(net)
model = tf.keras.Model(text_input, net)
loss = tf.keras.losses.CategoricalCrossentropy(from_logits=False) # (from_logits=True)
metric = tf.metrics.CategoricalAccuracy('accuracy')
optimizer = Adam(
learning_rate=5e-05, epsilon=1e-08, decay=0.01, clipnorm=1.0)
model.compile(
optimizer=optimizer, loss=loss, metrics=metric)
model.summary()
return model
history = classifier_model.fit(
x=X_train['f'].values,
y=y_train_c,
validation_data=(X_valid['f'].values, y_valid_c),
epochs=15)
Seems like this is model from tutorial: https://www.tensorflow.org/text/tutorials/classify_text_with_bert
I have tried modify code for two input layer, but get error because after concatenate there is wrong tensor dimensions:
def build_classifier_model():
input1 = tf.keras.layers.Input(
shape=(), dtype=tf.string, name='text')
input2 = tf.keras.layers.Input(
shape=(), dtype=tf.string, name='text1')
text_input = tf.keras.layers.concatenate([input1, input2], axis=-1)
preprocessing_layer = hub.KerasLayer(
tfhub_handle_preprocess, name='preprocessing')
encoder_inputs = preprocessing_layer(text_input)
encoder = hub.KerasLayer(
tfhub_handle_encoder, trainable=True, name='BERT_encoder')
outputs = encoder(encoder_inputs)
net = outputs['pooled_output']
net = tf.keras.layers.Dropout(0.1)(net)
net = tf.keras.layers.Dense(
6, activation='softmax', name='classifier')(net)
model = tf.keras.Model([input1, input2], net)
loss = tf.keras.losses.CategoricalCrossentropy(from_logits=False) # (from_logits=True)
metric = tf.metrics.CategoricalAccuracy('accuracy')
optimizer = Adam(
learning_rate=5e-05, epsilon=1e-08, decay=0.01, clipnorm=1.0)
model.compile(
optimizer=optimizer, loss=loss, metrics=metric)
model.summary()
return model
Error:
InvalidArgumentError: logits and labels must be broadcastable: logits_size=[64,6] labels_size=[32,6]
[[node categorical_crossentropy/softmax_cross_entropy_with_logits (defined at tmp/ipykernel_39/1837193519.py:5) ]] [Op:__inference_train_function_271676]
If use concatenate with another dimension then model doensn't compile
Weirdly enough, replacing your Concatenation layer with tf.strings.join inside your model seems to work:
def build_classifier_model():
input1 = tf.keras.layers.Input(
shape=(), dtype=tf.string, name='text')
input2 = tf.keras.layers.Input(
shape=(), dtype=tf.string, name='text1')
text_input = tf.strings.join([input1, input2])
preprocessing_layer = hub.KerasLayer(
tfhub_handle_preprocess, name='preprocessing')
encoder_inputs = preprocessing_layer(text_input)
encoder = hub.KerasLayer(
tfhub_handle_encoder, trainable=True, name='BERT_encoder')
outputs = encoder(encoder_inputs)
net = outputs['pooled_output']
net = tf.keras.layers.Dropout(0.1)(net)
output = tf.keras.layers.Dense(
6, activation='softmax', name='classifier')(net)
model = tf.keras.Model([input1, input2], output)
loss = tf.keras.losses.CategoricalCrossentropy(from_logits=False) # (from_logits=True)
metric = tf.metrics.CategoricalAccuracy('accuracy')
optimizer = Adam(
learning_rate=5e-05, epsilon=1e-08, decay=0.01, clipnorm=1.0)
model.compile(
optimizer=optimizer, loss=loss, metrics=metric)
model.summary()
return model
Epoch 1/5
497/1094 [============>.................] - ETA: 2:14 - loss: 1.8664 - accuracy: 0.1641
You could also consider simply doing text_input = input1 + input2 , since the Concatenation layer seems to mess up the batch dimension. Or you could feed each input to your encoder and concatenate the results afterwards:
def build_classifier_model():
input1 = tf.keras.layers.Input(
shape=(), dtype=tf.string, name='text')
input2 = tf.keras.layers.Input(
shape=(), dtype=tf.string, name='text1')
preprocessing_layer = hub.KerasLayer(
tfhub_handle_preprocess, name='preprocessing')
encoder_input1 = preprocessing_layer(input1)
encoder_input2 = preprocessing_layer(input2)
encoder = hub.KerasLayer(
tfhub_handle_encoder, trainable=True, name='BERT_encoder')
output1 = encoder(encoder_input1)
output2 = encoder(encoder_input2)
net = tf.keras.layers.Concatenate(axis=-1)([output1['pooled_output'], output2['pooled_output']])
net = tf.keras.layers.Dropout(0.1)(net)
output = tf.keras.layers.Dense(
6, activation='softmax', name='classifier')(net)
model = tf.keras.Model([input1, input2], output)
loss = tf.keras.losses.CategoricalCrossentropy(from_logits=False) # (from_logits=True)
metric = tf.metrics.CategoricalAccuracy('accuracy')
optimizer = Adam(
learning_rate=5e-05, epsilon=1e-08, decay=0.01, clipnorm=1.0)
model.compile(
optimizer=optimizer, loss=loss, metrics=metric)
model.summary()
return model

How to add svm on top of cnn as final classifier?

I work on sentiment analysis task and i want to add SVM layer on top CNN as a final classifier, how can i do that without using hing-loss?
tweet_input = Input(shape=(seq_len,), dtype='int32')
tweet_encoder = Embedding(vocabulary_size, EMBEDDING_DIM,
input_length=seq_len, trainable=True)(tweet_input)
bigram_branch = Conv1D(filters=64, kernel_size=2, padding='same',
activation='relu', strides=1)(tweet_encoder)
bigram_branch = GlobalMaxPooling1D()(bigram_branch)
trigram_branch = Conv1D(filters=32, kernel_size=3, padding='same',
activation='relu', strides=1)(tweet_encoder)
trigram_branch = GlobalMaxPooling1D()(trigram_branch)
fourgram_branch = Conv1D(filters=16, kernel_size=4, padding='same',
activation='relu', strides=1)(tweet_encoder)
fourgram_branch = GlobalMaxPooling1D()(fourgram_branch)
merged = concatenate([bigram_branch, trigram_branch, fourgram_branch], axis=1)
merged = Dense(512, activation='softmax')(merged)
merged = Dropout(0.8)(merged)
merged = Dense(2)(merged)
output = Activation('sigmoid')(merged)
model = Model(inputs=[tweet_input], outputs=[output])
adam=keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
model.compile(loss='hinge',
optimizer= adam,
metrics=['accuracy'])
model.summary()

Categories

Resources