Why can't I save my model with tensorflow lite? - python

Once the training is finished, what I need is to save and convert the model to later export it, but I get the following error:
converter = tf.lite.TFLiteConverter.from_keras_model_file('models/modelo.h5')
AttributeError: type object 'TFLiteConverterV2' has no attribute 'from_keras_model_file'
to be honest I found a problem similar to this on the web but it doesn't suit my problem. Also who gives the answer is not very explicit.
here my code:
import tensorflow as tf
from tensorflow import keras
#dataset
mnist = keras.datasets.mnist
(x_train, y_train),(x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
#class myCallback(tf.keras.callbacks.Callback):
# def on_epoch_end(self, epoch, logs={}):
# If you are using Tensorflow 1.x, replace 'accuracy' for 'acc' in the next line
# if(logs.get('accuracy')>0.99):
# print("\nReached 99.0% accuracy so cancelling training!")
# self.model.stop_training = True
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(10, activation='softmax')
])
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
# Train the model
model.fit(x_train,
y_train,
epochs=25,)
# callbacks=[myCallback()])
# Evaluate the model
model.evaluate(x_test, y_test)
# Save the model
model.save('models/modelo.h5')
# Convert the model.
converter = tf.lite.TFLiteConverter.from_keras_model_file('models/modelo.h5')
tflite_model = converter.convert()
open("models/converted_mnist_model.tflite", "wb").write(tflite_model)

Related

Stop training model when accuracy reached greater than 0.99

I want to stop my model from getting trained after it reaches a certain threshold. I have written a class for a callback from Tensorflow. I'm training the MNIST dataset. To categorize the handwritten numbers and identify. But for some reason, the training is not getting stopped. I can't find a reason. Here is my code.
import tensorflow as tf
class myCallback(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs={}):
if(logs.get('accuracy')>0.99):
print("\nReached 99% accuracy so cancelling training!")
self.model.stop_training = True
mnist = tf.keras.datasets.mnist
(x_train, y_train),(x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
callbacks = myCallback()
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(512, activation=tf.nn.relu),
tf.keras.layers.Dense(10, activation=tf.nn.softmax)
])
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(x_train, y_train, epochs=10, callbacks=[callbacks])
Try this
class StopOnPoint(tf.keras.callbacks.Callback):
def __init__(self, point):
super(StopOnPoint, self).__init__()
self.point = point
def on_epoch_end(self, epoch, logs=None):
accuracy = logs["accuracy"]
if accuracy >= self.point:
self.model.stop_training = True
callbacks = [StopOnPoint(0.98)] # <- set optimal point

Any difference?

I have 2 pieces of code written using tensorflow.
One is this:
import tensorflow as tf
class myCallback(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs={}):
if(logs.get('accuracy')>0.99):
print("\nReached 99% accuracy so cancelling training!")
self.model.stop_training = True
mnist = tf.keras.datasets.mnist
(x_train, y_train),(x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
callbacks = myCallback()
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(512, activation=tf.nn.relu),
tf.keras.layers.Dense(10, activation=tf.nn.softmax)])
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(x_train, y_train, epochs=10, callbacks=[callbacks])
The other one is this:
import tensorflow as tf
def train_mnist():
class myCallback(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs={}):
if(logs.get('accuracy')>99):
print("\n Se incheie antrenamentul")
self.model.stop_training = True
mnist = tf.keras.datasets.mnist
(x_train, y_train),(x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
callbacks = myCallback()
model = tf.keras.Sequential([
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(512, activation=tf.nn.relu),
tf.keras.layers.Dense(10, activation=tf.nn.softmax)])
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
# model fitting
history = model.fit(x_train, y_train, epochs = 10, callbacks=[callbacks])
# model fitting
return history.epoch, history.history['acc'][-1]
train_mnist()
The first one gives an accuracy of 0.99 after 3 or 4 epochs. The second one gives an accuracy of 0.91 after 10 epochs. Why? They both look the same to me. Any ideas?
They both are almost identical. I have just checked both accuracy. The only reason why your accuracy is showing different is because in 2nd code you have returned
history.history['acc'][-1]
instead of
history.history['accuracy'][-1]
Also you need to save the history of 1st code for comparision like this:
history = model.fit(x_train, y_train, epochs = 10, callbacks=[callbacks])
Also figured out that you have stopped model training outside of if condition in 1st code.
class myCallback(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs={}):
if(logs.get('accuracy')>0.99):
print("\nReached 99% accuracy so cancelling training!")
self.model.stop_training = True
It should be like this:
class myCallback(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs={}):
if(logs.get('accuracy')>0.99):
print("\nReached 99% accuracy so cancelling training!")
self.model.stop_training = True
Both code must be giving around 0.99% accuracy.
Since your 2nd code is not showing the exact accuracy. I am posting the whole modified code for your 2nd code
import tensorflow as tf
from os import path, getcwd, chdir
path = f"{getcwd()}/../tmp2/mnist.npz"
def train_mnist():
class myCallback(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs={}):
if(logs.get('accuracy')>99):
print("\n Se incheie antrenamentul")
self.model.stop_training = True
mnist = tf.keras.datasets.mnist
(x_train, y_train),(x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
callbacks = myCallback()
model = tf.keras.Sequential([
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(512, activation=tf.nn.relu),
tf.keras.layers.Dense(10, activation=tf.nn.softmax)])
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
# model fitting
history = model.fit(x_train, y_train, epochs = 10, callbacks=[callbacks])
# model fitting
return history.epoch, history.history['accuracy'][-1]
train_mnist()

Unable to load trained model in google colab [duplicate]

I have trained a keras CNN monitoring the metrics as follow:
METRICS = [
TruePositives(name='tp'),
FalsePositives(name='fp'),
TrueNegatives(name='tn'),
FalseNegatives(name='fn'),
BinaryAccuracy(name='accuracy'),
Precision(name='precision'),
Recall(name='recall'),
AUC(name='auc'),
]
and then the model.compile:
model.compile(optimizer='nadam', loss='binary_crossentropy',
metrics=METRICS)
it works perfectly and I saved my h5 model (model.h5).
Now I have downloaded the model and I would like to use it in other script importing the model with:
from keras.models import load_model
model = load_model('model.h5')
model.predict(....)
but during the running the compiler returns:
ValueError: Unknown metric function: {'class_name': 'TruePositives', 'config': {'name': 'tp', 'dtype': 'float32', 'thresholds': None}}
How I should manage this issue?
Thank you in advance
When you have custom metrics you need to follow slightly different approach.
Create model, train and save the model
Load the model with custom_objects and compile = False
Finally compile the model with the custom_objects
I am showing the approach here
import tensorflow as tf
from tensorflow import keras
mnist = tf.keras.datasets.mnist
(x_train, y_train),(x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
# Custom Loss1 (for example)
##tf.function()
def customLoss1(yTrue,yPred):
return tf.reduce_mean(yTrue-yPred)
# Custom Loss2 (for example)
##tf.function()
def customLoss2(yTrue, yPred):
return tf.reduce_mean(tf.square(tf.subtract(yTrue,yPred)))
def create_model():
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(512, activation=tf.nn.relu),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(10, activation=tf.nn.softmax)
])
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy', customLoss1, customLoss2])
return model
# Create a basic model instance
model=create_model()
# Fit and evaluate model
model.fit(x_train, y_train, epochs=5)
loss, acc,loss1, loss2 = model.evaluate(x_test, y_test,verbose=1)
print("Original model, accuracy: {:5.2f}%".format(100*acc)) # Original model, accuracy: 98.11%
# saving the model
model.save('./Mymodel',save_format='tf')
# load the model
loaded_model = tf.keras.models.load_model('./Mymodel',custom_objects={'customLoss1':customLoss1,'customLoss2':customLoss2},compile=False)
# compile the model
loaded_model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy', customLoss1, customLoss2])
# loaded model also has same accuracy, metrics and loss
loss, acc,loss1, loss2 = loaded_model.evaluate(x_test, y_test,verbose=1)
print("Loaded model, accuracy: {:5.2f}%".format(100*acc)) #Loaded model, accuracy: 98.11%
custom_objects['METRICS'] = METRICS
model = load_model('model.h5', custom_objects=custom_objects)
It looks like you are playing with a tensorflow tutorial. I also used these exact metrics and had the same problem. What worked for me was to load the model with compile = False and then compile it with the custom metrics. Then you should be able to use model.predict(....) as expected.
import keras
model = keras.models.load_model('model.h5', compile = False)
METRICS = [
keras.metrics.TruePositives(name='tp'),
keras.metrics.FalsePositives(name='fp'),
keras.metrics.TrueNegatives(name='tn'),
keras.metrics.FalseNegatives(name='fn'),
keras.metrics.BinaryAccuracy(name='accuracy'),
keras.metrics.Precision(name='precision'),
keras.metrics.Recall(name='recall'),
keras.metrics.AUC(name='auc'),
]
model.compile(optimizer = keras.optimizers.Adam(learning_rate=1e-4),
loss = 'binary_crossentropy',
metrics = METRICS
)

Cifar100 only has 16 training images and 16 training labels

I'm using Tensorflow with Python 3.7, and I am trying to make an image classifier with CIFAR-100. I want to stay away from Keras as much as possible because it only has a limited amount of datasets that I can use. This is my code:
import tensorflow as tf
import tensorflow_datasets as tfds
import numpy as np
import matplotlib.pyplot as plt
import PIL.Image as Image
from tensorflow import keras
tf.compat.v1.enable_eager_execution()
shape = (224, 224)
labels = '/home/pi/tf/cifar_labels.txt'
labels = np.array(open(labels).read().splitlines())
img = '/home/pi/tf/lobster.jpeg'
img = Image.open(img).resize(shape)
img = np.array(img)/255.0
img = np.reshape(img, (224, 224, 3))
train = tfds.load(name="cifar100", split="train")
test = tfds.load(name="cifar100", split="test")
train = train.shuffle(1024).batch(32).prefetch(tf.data.experimental.AUTOTUNE)
test = test.shuffle(1024).batch(32).prefetch(tf.data.experimental.AUTOTUNE)
for features in train:
train_images, train_labels = features["image"], features["label"]
for features in test:
test_images, test_labels = features["image"], features["label"]
model = keras.Sequential([
keras.layers.Flatten(input_shape=(32, 32, 3)),
keras.layers.Dense(128, activation='relu'),
keras.layers.Dense(100, activation='softmax')
])
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
history = model.fit(train_images, train_labels, epochs=200, verbose=2)
test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)
print('\nTest accuracy:', test_acc)
I'm guessing that there is something wrong with the for features in train for loop. When I print the len of the training images/labels, I get 16. Due to this, my model is getting a training accuracy of 0% and a loss of 16.1181%. Can anybody help?
To use CIFAR-100 in your keras model directly you should call tfds.load function with as_supervised=True parameter. It will then load the dataset with only 'image' and 'label' keys. You can see that CIFAR-100 dataset contains three keys:
FeaturesDict({
'coarse_label': ClassLabel(shape=(), dtype=tf.int64, num_classes=20),
'image': Image(shape=(32, 32, 3), dtype=tf.uint8),
'label': ClassLabel(shape=(), dtype=tf.int64, num_classes=100),
})
Therefore it cannot be fed into model.fit() directly. With as_supervised set as True, the returned dataset will only contain (u'image', u'label') keys.
To sum up,
import tensorflow_datasets as tfds
from tensorflow import keras
tf.compat.v1.enable_eager_execution()
train= tfds.load(name="cifar100", split="train", as_supervised=True)
test = tfds.load(name="cifar100", split="test", as_supervised=True)
train = train.shuffle(1024).batch(32).prefetch(tf.data.experimental.AUTOTUNE)
test = test.shuffle(1024).batch(32).prefetch(tf.data.experimental.AUTOTUNE)
model = keras.Sequential([
keras.layers.Flatten(input_shape=(32, 32, 3)),
keras.layers.Dense(128, activation='relu'),
keras.layers.Dense(100, activation='softmax')
])
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
history = model.fit(train, epochs=200, verbose=1)
test_loss, test_acc = model.evaluate(test, verbose=1)
print('\nTest accuracy:', test_acc)
Note:
To use the dataset without as_supervised set to True, you can use model.train_on_batch function. e.g.
import tensorflow_datasets as tfds
from tensorflow import keras
tf.compat.v1.enable_eager_execution()
train= tfds.load(name="cifar100", split="train")
test = tfds.load(name="cifar100", split="test")
train = train.shuffle(1024).repeat(200).batch(32).prefetch(tf.data.experimental.AUTOTUNE)
test = test.shuffle(1024).batch(32).prefetch(tf.data.experimental.AUTOTUNE)
model = keras.Sequential([
keras.layers.Flatten(input_shape=(32, 32, 3)),
keras.layers.Dense(128, activation='relu'),
keras.layers.Dense(100, activation='softmax')
])
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
for epoch in range(200):
for features in train:
image_batch, label_batch = features["image"], features["label"]
loss, acc = model.train_on_batch(image_batch, label_batch)
for features in test:
image_batch, label_batch = features["image"], features["label"]
loss, acc = model.test_on_batch(image_batch, label_batch)

Resume Training tf.keras Tensorboard

I encountered some problems when I continued training my model and visualized the progress on tensorboard.
My question is how do I resume training from the same step without specifying any epoch manually? If possible, simply by loading the saved model, it somehow could read the global_step from the optimizer saved and continue training from there.
I have provided some codes below to reproduce similar errors.
import tensorflow as tf
from tensorflow.keras.callbacks import TensorBoard
from tensorflow.keras.models import load_model
mnist = tf.keras.datasets.mnist
(x_train, y_train),(x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(512, activation=tf.nn.relu),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(10, activation=tf.nn.softmax)
])
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(x_train, y_train, epochs=10, callbacks=[Tensorboard()])
model.save('./final_model.h5', include_optimizer=True)
del model
model = load_model('./final_model.h5')
model.fit(x_train, y_train, epochs=10, callbacks=[Tensorboard()])
You can run the tensorboard by using the command:
tensorboard --logdir ./logs
You can set the parameter initial_epoch in the function model.fit() to the number of the epoch you want your training to start from. Take into account that the model trains until the epoch of index epochs is reached (and not a number of iterations given by epochs).
In your example, if you want to train for 10 epochs more, it should be:
model.fit(x_train, y_train, initial_epoch=9, epochs=19, callbacks=[Tensorboard()])
It will allow you to visualise your plots on Tensorboard in a correct manner.
More extensive information about these parameters can be found in the docs.
Here is sample code in case someone needs it. It implements the idea proposed by Abhinav Anand:
mca = ModelCheckpoint(join(dir, 'model_{epoch:03d}.h5'),
monitor = 'loss',
save_best_only = False)
tb = TensorBoard(log_dir = join(dir, 'logs'),
write_graph = True,
write_images = True)
files = sorted(glob(join(fold_dir, 'model_???.h5')))
if files:
model_file = files[-1]
initial_epoch = int(model_file[-6:-3])
print('Resuming using saved model %s.' % model_file)
model = load_model(model_file)
else:
model = nn.model()
initial_epoch = 0
model.fit(x_train,
y_train,
epochs = 100,
initial_epoch = initial_epoch,
callbacks = [mca, tb])
Replace nn.model() with your own function for defining the model.
It's very simple. Create checkpoints while training the model and then use those checkpoints to resume training from where you left of.
import tensorflow as tf
from tensorflow.keras.callbacks import TensorBoard
from tensorflow.keras.callbacks import ModelCheckpoint
from tensorflow.keras.models import load_model
mnist = tf.keras.datasets.mnist
(x_train, y_train),(x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(512, activation=tf.nn.relu),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(10, activation=tf.nn.softmax)
])
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(x_train, y_train, epochs=10, callbacks=[Tensorboard()])
model.save('./final_model.h5', include_optimizer=True)
model = load_model('./final_model.h5')
callbacks = list()
tensorboard = Tensorboard()
callbacks.append(tensorboard)
file_path = "model-{epoch:02d}-{loss:.4f}.hdf5"
# now here you can create checkpoints and save according to your need
# here period is the no of epochs after which to save the model every time during training
# another option is save_weights_only, for your case it should be false
checkpoints = ModelCheckpoint(file_path, monitor='loss', verbose=1, period=1, save_weights_only=False)
callbacks.append(checkpoints)
model.fit(x_train, y_train, epochs=10, callbacks=callbacks)
After this just load the checkpoint from where you want to resume training again
model = load_model(checkpoint_of_choice)
model.fit(x_train, y_train, epochs=10, callbacks=callbacks)
And you are done.
Let me know if you have more questions about this.

Categories

Resources