versions: tensorboard==2.9.0, keras-tuner==1.1.2
Here is simple model of binary classification with hyperparameters to search added in the model by using keras-tuner.
def build_model(hp):
n_layers = 4
n_features = len(X_train.columns)
inputs = tf.keras.Input(shape=(n_features,))
dense = tf.keras.layers.Dense(hp.Int("input_units", min_value=128, max_value=256, step=32),
activation=hp.Choice("activation", ['relu', 'tanh'])
)(inputs)
dense = tf.keras.layers.Dropout(0.2)(dense)
# num_layer as hyperparameter
for i in range(hp.Int("dense_layer", 1, n_layers)):
dense = tf.keras.layers.Dense(hp.Int(f"hidden_unit_{i}", 128, 256, 32),
activation=hp.Choice("activation", ['relu', 'tanh'])
)(dense)
output = tf.keras.layers.Dense(1, activation='sigmoid')(dense)
model = tf.keras.Model(inputs=inputs, outputs=output)
lr = hp.Float("lr", min_value=1e-4, max_value=1e-1, sampling="log")
model.compile(optimizer=tf.keras.optimizers.Adam(lr),
loss=tf.keras.losses.BinaryCrossentropy(),
metrics=metrics)
return model
hyperparameter search space would be
{neurons:[128, 160, 192, 224, 256],
num_hidden_layers:[1,2,3],
activation_function = ['relu', 'tanh'],
learning_rate = [0.0001, 0.001, 0.01]}
Now begin search
tuner = RandomSearch(
build_model,
objective = kt.Objective("val_binary_accuracy", direction="max"),
max_trials = 3,
executions_per_trial = 1,
directory=LOG_DIR
)
tensorboard_cb = tf.keras.callbacks.TensorBoard('logs/hyp_tune/')
tuner.search(X_train, y_train, epochs=10, batch_size=512,
validation_data=(X_test, y_test),
callbacks=[tensorboard_cb]
)
From keras-tuner guide https://keras.io/guides/keras_tuner/visualize_tuning/ This should work fine, showing Hparams when opening tensorboard.
However when I select HPARAMS tab, it outputs message below:
No hparams data was found.
Probable causes:
You haven’t written any hparams data to your event files.
Event files are still being loaded (try reloading this page).
TensorBoard can’t find your event files.
If you’re new to using TensorBoard, and want to find out how to add data and set up your event files, check out the README and perhaps the TensorBoard tutorial.
If you think TensorBoard is configured properly, please see the section of the README devoted to missing data problems and consider filing an issue on GitHub.
I've tried re-searching, restarting notebook, however cannot still no luck.
[EDIT]
when I load tensorboard tensorboard --logdir='logs/t1' it should show logs/t1 at left side of screen below Runs however it shows logs/t0 which is previous run(simple model run w/o hyperparameter tuning) I think since it is showing previous run w/o hyperparameter tuning it has no data showing in HPARAMS tab. How can I delete previous log and load new one? (overwriting hyperparameter tuning model with 'logs/t0' works fine)
I write this code and run it correctly:
At the end, use these two commands and get your output:
%load_ext tensorboard
%tensorboard --logdir /logs/hyp_tune/
Full code:
# !pip install keras-tuner -q
import numpy as np
import keras_tuner
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
(x_train, y_train), (x_test, y_test) = (np.random.rand(1000,4), np.random.rand(1000)) , (np.random.rand(100,4), np.random.rand(100))
def build_model(hp):
n_layers = 4
n_features = x_train.shape[1]
inputs = tf.keras.Input(shape=(n_features,))
dense = tf.keras.layers.Dense(hp.Int("input_units", min_value=128, max_value=256, step=32),
activation=hp.Choice("activation", ['relu', 'tanh'])
)(inputs)
dense = tf.keras.layers.Dropout(0.2)(dense)
# num_layer as hyperparameter
for i in range(hp.Int("dense_layer", 1, n_layers)):
dense = tf.keras.layers.Dense(hp.Int(f"hidden_unit_{i}", 128, 256, 32),
activation=hp.Choice("activation", ['relu', 'tanh'])
)(dense)
output = tf.keras.layers.Dense(1, activation='sigmoid')(dense)
model = tf.keras.Model(inputs=inputs, outputs=output)
lr = hp.Float("lr", min_value=1e-4, max_value=1e-1, sampling="log")
model.compile(optimizer=tf.keras.optimizers.Adam(lr),
loss=tf.keras.losses.BinaryCrossentropy(),
metrics=["accuracy"])
return model
hp = keras_tuner.HyperParameters()
model = build_model(hp)
model.summary()
tuner = keras_tuner.RandomSearch(
build_model,
max_trials=10,
overwrite=True,
objective="val_accuracy",
# Set a directory to store the intermediate results.
directory="/logs/hyp_tune/",
)
tensorboard_cb = tf.keras.callbacks.TensorBoard('/logs/hyp_tune/')
tuner.search(
x_train,
y_train,
validation_data=(x_test, y_test),
batch_size=512,
epochs=10,
callbacks=[tensorboard_cb],
)
output:
%load_ext tensorboard
%tensorboard --logdir /logs/hyp_tune/
Related
I try to display the accuracy and loss of my net with Tensorboard as graphs, but the training and validation data are shown as separate runs. I am still relatively inexperienced with Tensorflow and Tensorboard, so I hope you can see the reason for this
Here is my code:
import os
import time
import pickle
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.callbacks import TensorBoard
print("Loading Data via Pickel")
X = pickle.load(open("X.pickle", "rb"))
y = pickle.load(open("y.pickle", "rb"))
print(len(X))
print(len(y))
startTime = time.time()
hidden_dense_layers = [0,1,2]
hidden_dense_layer_size = [64, 128, 256, 512, 1024]
for dense_layer_ammount in hidden_dense_layers:
for dense_layer_size in hidden_dense_layer_size:
NAME = "{}-hidden_layers-{}-layersize".format(dense_layer_ammount, dense_layer_size)
print("----------", NAME, "----------")
print("Building Model")
# model = keras.Sequential([
# keras.layers.Flatten(input_shape=(200, 200)),
# keras.layers.Dense(500, activation="relu"),
# keras.layers.Dense(1, activation="sigmoid")
# ])
model = keras.Sequential()
model.add(keras.layers.Flatten(input_shape=(75, 75)))
for i in range(dense_layer_ammount):
model.add(keras.layers.Dense(dense_layer_size, activation="relu"))
model.add(keras.layers.Dense(1, activation="sigmoid"))
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
print("Creating Callbacks")
print("Creating Checkpoint Callback")
checkpoint_path = "training_2/cp-{epoch:04d}.ckpt"
checkpoint_dir = os.path.dirname(checkpoint_path)
# Create a callback that saves the model's weights
checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(
filepath=checkpoint_path,
save_weights_only=True,
verbose=1
)
print("Creating Tensorboard Callback")
tensorboard_callback = TensorBoard(log_dir="logs/{}".format(NAME))
print("Training Model")
model.fit(
X,
y,
# batch_size=32,
epochs=10,
callbacks=[
# checkpoint_callback,
tensorboard_callback
],
validation_split=0.3
)
Here is how the runs are Displayed for me
Here is how the Graphs are displayed to me
It is completely normal to have two curves for both graphs. Each curve corresponds to training data or validation data (resp. orange and blue on your plots). To each epoch you get a two-step process:
first you get the actual model parameter tuning with gradient descent, the training step. The blue curve tells you learn something (e.g.: is the model complex enough for the given task ?).
secondly you need to make sure that the trained model is performing well on data that have not been used to tune the parameter, this is the validation step. The red curve will tell you how close you are to an overfitting situation (meaning that you get good performances for the tuning part, but that the model is very bad when feeding with "new data").
I'm working with Google Colab and trying to train a model using VGG blocks. Like this:
METRICS = [
keras.metrics.TruePositives(name='tp'),
keras.metrics.FalsePositives(name='fp'),
keras.metrics.TrueNegatives(name='tn'),
keras.metrics.FalseNegatives(name='fn'),
keras.metrics.BinaryAccuracy(name='accuracy'),
keras.metrics.Precision(name='precision'),
keras.metrics.Recall(name='recall'),
keras.metrics.AUC(name='auc'),
]
# function for creating a vgg block
def vgg_block(layer_in, n_filters, n_conv):
# add convolutional layers
for _ in range(n_conv):
layer_in = Conv2D(n_filters, (3,3), padding='same', activation='relu')(layer_in)
# add max pooling layer
layer_in = MaxPooling2D((2,2), strides=(2,2))(layer_in)
return layer_in
# define model input
visible = Input(shape=(256, 256, 3))
# add vgg module
layer = vgg_block(visible, 64, 2)
#####################################
flat = Flatten()(layer)
hidden1 = Dense(128, activation='relu')(flat)
output = Dense(1, activation='sigmoid')(hidden1)
model = Model(inputs=visible, outputs=output)
print(model.summary())
# plot model architecture
plot_model(model, show_shapes=True, to_file='vgg_block.png')
model.compile(loss='binary_crossentropy',
optimizer='rmsprop',
metrics=METRICS)
# New lines to obtain the best model in term of validation accuracy
from keras.callbacks import ModelCheckpoint
filepath="weights-improvement-{epoch:02d}-{val_accuracy:.2f}.h5"
checkpoint = ModelCheckpoint(filepath, monitor='val_accuracy', verbose=1, save_best_only=True, mode='max')
callbacks_list = [checkpoint]
But, when I try to use model.fit_generator it gives me an error. The code I'm using is:
history = model.fit_generator(
train_generator,
steps_per_epoch=2000 // batch_size,
epochs=20,
validation_data=validation_generator,
validation_steps=800 // batch_size,
callbacks=callbacks_list
)
I have tried everything and I don't know what to do. It gives me the following error:
NotFoundError: 2 root error(s) found.
(0) Not found: Resource localhost/total/N10tensorflow3VarE does not exist.
[[{{node metrics/accuracy/AssignAddVariableOp}}]]
[[metrics/precision/Mean/_87]]
(1) Not found: Resource localhost/total/N10tensorflow3VarE does not exist.
[[{{node metrics/accuracy/AssignAddVariableOp}}]]
0 successful operations.
0 derived errors ignored.
I would appreciate any help. I'm kind of new here. What could I do? Thanks!
It seems the problem arises only using the native keras but when I tried to implement your code and modified it in Tensorflow 2.x as below:
%tensorflow_version 2.x
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.layers import Input, Conv2D, MaxPooling2D, Dense, Flatten
from tensorflow.keras.models import Model
METRICS = [
keras.metrics.TruePositives(name='tp'),
keras.metrics.FalsePositives(name='fp'),
keras.metrics.TrueNegatives(name='tn'),
keras.metrics.FalseNegatives(name='fn'),
keras.metrics.BinaryAccuracy(name='accuracy'),
keras.metrics.Precision(name='precision'),
keras.metrics.Recall(name='recall'),
keras.metrics.AUC(name='auc'),
]
# function for creating a vgg block
def vgg_block(layer_in, n_filters, n_conv):
# add convolutional layers
for _ in range(n_conv):
layer_in = Conv2D(n_filters, (3,3), padding='same', activation='relu')(layer_in)
# add max pooling layer
layer_in = MaxPooling2D((2,2), strides=(2,2))(layer_in)
return layer_in
# define model input
visible = Input(shape=(256, 256, 3))
# add vgg module
layer = vgg_block(visible, 64, 2)
#####################################
flat = Flatten()(layer)
hidden1 = Dense(128, activation='relu')(flat)
output = Dense(1, activation='sigmoid')(hidden1)
model = Model(inputs=visible, outputs=output)
print(model.summary())
# # plot model architecture
# plot_model(model, show_shapes=True, to_file='vgg_block.png')
model.compile(loss='binary_crossentropy',
optimizer='rmsprop',
metrics=METRICS)
# New lines to obtain the best model in term of validation accuracy
from tensorflow.keras.callbacks import ModelCheckpoint
filepath="weights-improvement-{epoch:02d}-{val_accuracy:.2f}.h5"
checkpoint = ModelCheckpoint(filepath, monitor='val_accuracy', verbose=1, save_best_only=True, mode='max')
callbacks_list = [checkpoint]
## Synthetic Inputs
train_input = tf.random.normal((100, 256, 256, 3))
train_output = tf.random.normal((100, 1))
# Test Model.fit same as Model.fit_generator in TF 2.1.0
model.fit(train_input, train_output, epochs = 1)
The problem didn't show up and it is working properly.
You can try this in TF 2.x instead. I hope this solved your problem.
The problem is with the Keras version. Try with Keras 2.3.0 and above versions where they introduce new class-based metrics.
Keras documentation
Hi so im learning about k fold cross validation, this first snippet of code is the building of a simple ANN:
def buildModel():
# Fitting classifier to the Training set
# Create your classifier here
model = Sequential()
model.add(Dense(units = 6, input_dim = X.shape[1], activation = 'relu'))
model.add(Dense(units = 6, activation = 'relu'))
model.add(Dense(units = 1, activation = 'sigmoid'))
model.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])
return model
I then used cross_val_score validation in sklearn to run the ANN.
Keras is also runing on my gpu.
from keras.wrappers.scikit_learn import KerasClassifier
from sklearn.model_selection import cross_val_score
model = KerasClassifier(build_fn = buildModel, batch_size = 10, epochs =100)
accuracies = cross_val_score(estimator = model, X = X_train, y = y_train, cv = 10, n_jobs = -1)
But if i put n_jobs = -1 to try and use all cores i get an error (ps i have 11 features):
Blas GEMM launch failed : a.shape=(10, 11), b.shape=(11, 6), m=10, n=6, k=11
[[node dense_1/MatMul (defined at C:\Users\Brandon Cardillo\AppData\Roaming\Python\Python37\site-packages\tensorflow_core\python\framework\ops.py:1751) ]]
[Op:__inference_keras_scratch_graph_1030]
Function call stack:
keras_scratch_graph
Ps. I am also running on jupyter notebook
Any help is very much appriciated.
Thank you.
I'm following the "How to train Keras model x20 times faster with TPU for free" guide (click here) to run a keras model on google's colab TPU. It works perfectly. But...I like to use cosine restart learning rate decay when I fit my models. I've coded up my own as a keras callback, but it won't work within this framework because the tensorflow TFOptimizer class doesn't have a learning-rate variable that can be reset. I see that tensorflow itself has a bunch of decay function in tf.train, like tf.train.cosine_decay but I can't figure out how to embed it within my model.
Here's the basic code from that blog post. Anyone have a fix?
import tensorflow as tf
import os
from tensorflow.python.keras.layers import Input, LSTM, Bidirectional, Dense, Embedding
def make_model(batch_size=None):
source = Input(shape=(maxlen,), batch_size=batch_size,
dtype=tf.int32, name='Input')
embedding = Embedding(input_dim=max_features,
output_dim=128, name='Embedding')(source)
lstm = LSTM(32, name='LSTM')(embedding)
predicted_var = Dense(1, activation='sigmoid', name='Output')(lstm)
model = tf.keras.Model(inputs=[source], outputs=[predicted_var])
model.compile(
optimizer=tf.train.RMSPropOptimizer(learning_rate=0.01),
loss='binary_crossentropy',
metrics=['acc'])
return model
training_model = make_model(batch_size=128)
# This address identifies the TPU we'll use when configuring TensorFlow.
TPU_WORKER = 'grpc://' + os.environ['COLAB_TPU_ADDR']
tf.logging.set_verbosity(tf.logging.INFO)
tpu_model = tf.contrib.tpu.keras_to_tpu_model(
training_model,
strategy=tf.contrib.tpu.TPUDistributionStrategy(
tf.contrib.cluster_resolver.TPUClusterResolver(TPU_WORKER)))
history = tpu_model.fit(x_train, y_train,
epochs=20,
batch_size=128 * 8,
validation_split=0.2)
One option is to manually set the learning rates - there is a Keras+TPU example with a callback here: https://github.com/tensorflow/tpu/blob/master/models/experimental/resnet50_keras/resnet50.py#L197-L201
The following seems to work, where lr is the initial learning rate you choose and M is the number of initial steps over which you want to the cosine decay to work.
def make_model(batch_size=None,lr=1.e-3,n_steps=2000):
source = Input(shape=(maxlen,), batch_size=batch_size,
dtype=tf.int32, name='Input')
embedding = Embedding(input_dim=max_features,
output_dim=128, name='Embedding')(source)
lstm = LSTM(32, name='LSTM')(embedding)
predicted_var = Dense(1, activation='sigmoid', name='Output')(lstm)
model = tf.keras.Model(inputs=[source], outputs=[predicted_var])
# implement cosine decay or other learning rate decay here
global_step = tf.Variable(0)
global_step=1
learning_rate = tf.train.cosine_decay_restarts(
learning_rate=lr,
global_step=global_step,
first_decay_steps=n_steps,
t_mul= 1.5,
m_mul= 1.,
alpha=0.1
)
# now feed this into the optimizer as shown below
model.compile(
optimizer=tf.train.RMSPropOptimizer(learning_rate=learning_rate),
loss='binary_crossentropy',
metrics=['acc'])
return model
I encountered some problems when I continued training my model and visualized the progress on tensorboard.
My question is how do I resume training from the same step without specifying any epoch manually? If possible, simply by loading the saved model, it somehow could read the global_step from the optimizer saved and continue training from there.
I have provided some codes below to reproduce similar errors.
import tensorflow as tf
from tensorflow.keras.callbacks import TensorBoard
from tensorflow.keras.models import load_model
mnist = tf.keras.datasets.mnist
(x_train, y_train),(x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(512, activation=tf.nn.relu),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(10, activation=tf.nn.softmax)
])
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(x_train, y_train, epochs=10, callbacks=[Tensorboard()])
model.save('./final_model.h5', include_optimizer=True)
del model
model = load_model('./final_model.h5')
model.fit(x_train, y_train, epochs=10, callbacks=[Tensorboard()])
You can run the tensorboard by using the command:
tensorboard --logdir ./logs
You can set the parameter initial_epoch in the function model.fit() to the number of the epoch you want your training to start from. Take into account that the model trains until the epoch of index epochs is reached (and not a number of iterations given by epochs).
In your example, if you want to train for 10 epochs more, it should be:
model.fit(x_train, y_train, initial_epoch=9, epochs=19, callbacks=[Tensorboard()])
It will allow you to visualise your plots on Tensorboard in a correct manner.
More extensive information about these parameters can be found in the docs.
Here is sample code in case someone needs it. It implements the idea proposed by Abhinav Anand:
mca = ModelCheckpoint(join(dir, 'model_{epoch:03d}.h5'),
monitor = 'loss',
save_best_only = False)
tb = TensorBoard(log_dir = join(dir, 'logs'),
write_graph = True,
write_images = True)
files = sorted(glob(join(fold_dir, 'model_???.h5')))
if files:
model_file = files[-1]
initial_epoch = int(model_file[-6:-3])
print('Resuming using saved model %s.' % model_file)
model = load_model(model_file)
else:
model = nn.model()
initial_epoch = 0
model.fit(x_train,
y_train,
epochs = 100,
initial_epoch = initial_epoch,
callbacks = [mca, tb])
Replace nn.model() with your own function for defining the model.
It's very simple. Create checkpoints while training the model and then use those checkpoints to resume training from where you left of.
import tensorflow as tf
from tensorflow.keras.callbacks import TensorBoard
from tensorflow.keras.callbacks import ModelCheckpoint
from tensorflow.keras.models import load_model
mnist = tf.keras.datasets.mnist
(x_train, y_train),(x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(512, activation=tf.nn.relu),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(10, activation=tf.nn.softmax)
])
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(x_train, y_train, epochs=10, callbacks=[Tensorboard()])
model.save('./final_model.h5', include_optimizer=True)
model = load_model('./final_model.h5')
callbacks = list()
tensorboard = Tensorboard()
callbacks.append(tensorboard)
file_path = "model-{epoch:02d}-{loss:.4f}.hdf5"
# now here you can create checkpoints and save according to your need
# here period is the no of epochs after which to save the model every time during training
# another option is save_weights_only, for your case it should be false
checkpoints = ModelCheckpoint(file_path, monitor='loss', verbose=1, period=1, save_weights_only=False)
callbacks.append(checkpoints)
model.fit(x_train, y_train, epochs=10, callbacks=callbacks)
After this just load the checkpoint from where you want to resume training again
model = load_model(checkpoint_of_choice)
model.fit(x_train, y_train, epochs=10, callbacks=callbacks)
And you are done.
Let me know if you have more questions about this.