Why does my TensorFlow model loose its accuracy after loading - python

So I am training on the MNIST dataset and the code is down below.
The issue is that, on the first run, it calculates everything and gives me a fair accuracy.
But on the second run (when it's supposed to load from the saved file) the accuracy drops considerably.
Is it something wrong with my code or any practices I'm not following here ?
import tensorflow as tf
from tensorflow import keras
import numpy as np
import matplotlib.pyplot as plt
from os import environ, sep
environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
MODELFILENAME = 'TF_ZTH_02_model' + sep
labels = {
0:'T-shirt/Top',
1:'Trouser',
2:'Pullover',
3:'Dress',
4:'Coat',
5:'Sandal',
6:'Shirt',
7:'Sneaker',
8:'Bag',
9:'Ankle Boot'
}
def main():
fashionmnist = keras.datasets.fashion_mnist
(trainimages, trainlabels), (testimages, testlabels) = fashionmnist.load_data()
trainimages, testimages = trainimages/255., testimages/255.
try:
#try load model
model = keras.models.load_model(MODELFILENAME)
#files doesn't exist, train model
except:
#activation functions
#relu - rectified linear unit - return value if its greater than 0 or 0
#softmax - picks biggest number in set
model = keras.Sequential([
keras.layers.Flatten(input_shape=(28, 28)), #size of image
keras.layers.Dense(128, activation=tf.nn.relu),
keras.layers.Dense(10, activation=tf.nn.softmax) #ten clothing
])
model.compile(
optimizer = 'adam',
loss = 'sparse_categorical_crossentropy',
metrics = 'accuracy'
)
model.fit(trainimages, trainlabels, epochs=5)
#save to file
model.save(MODELFILENAME)
testloss, testacc = model.evaluate(testimages, testlabels)
print('\nEvaluation, loss and accuracy : ', testloss, testacc)
predictions = model.predict(testimages)
# predictions = model.predict(np.asarray([testimages[0]]))
while True:
x = int(input('\nEnter image number (<%d) : '%len(testimages)))
print('\nPredictions : ',
predictions[x],
predictions[x].argmax(),
labels[predictions[x].argmax()]
)
print('Actual : ', testlabels[x], labels[testlabels[x]])
plt.ioff()
plt.imshow(testimages[x])
plt.title(labels[predictions[x].argmax()])
plt.show()
#but this ds has objects centered
#in the case of an unprocessed ds, you'd need to SPOT FEATURES
#with the help of convolutional networks
try:
main()
except Exception as e:
print(e)
finally:
input()
Output on First Run
Output on Second Run

Related

When training my model in tensorflow I can't see progress outputs

I am training a model for speech recognition however I can't show the output progress when training the model.
my code
import numpy as np
import tensorflow as tf
import automatic_speech_recognition as asr
dataset = asr.dataset.Audio.from_csv('test.csv', batch_size=1)
dev_dataset = asr.dataset.Audio.from_csv('test.csv', batch_size=1)
alphabet = asr.text.Alphabet(lang='en')
features_extractor = asr.features.FilterBanks(
features_num=160,
winlen=0.02,
winstep=0.01,
winfunc=np.hanning
)
model = asr.model.get_deepspeech2(
input_dim=160,
output_dim=29,
rnn_units=800,
is_mixed_precision=False
)
optimizer = tf.optimizers.Adam(
lr=1e-4,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-8
)
decoder = asr.decoder.GreedyDecoder()
pipeline = asr.pipeline.CTCPipeline(
alphabet, features_extractor, model, optimizer, decoder
)
pipeline.fit(dataset, dev_dataset, epochs=5)
pipeline.save('/checkpoint')
Not sure but I think you'r missing "verbose" which takes values 0-2, 0 shows nothing while 1 shows the bar progress and 2 shows the number of epochs.
try:
pipeline.fit(dataset, dev_dataset, epochs=5, verbose=1)

Keras autoencoder is not reconstructing 1D signals

I would like to train an autoencoder neural network.
Assume I have a 1D signal:
Then in order to create a dataset, I split this signal into several thousands of (overlapping) segments (each with 1024 samples). Each segment is saved into a separate .txt file.
My code is following
import tensorflow as tf
import tensorflow.keras as keras
import matplotlib.pyplot as plt
import glob
import numpy as np
def load_dataset(dataset_path, train_val_ratio=0.8):
filenames = glob.glob(dataset_path+"\\*.txt")
windows_amount = len(filenames)
lines = [line.rstrip('\n') for line in open(filenames[0])]
window_length = len(lines)
train_windows_amount = int(np.ceil(windows_amount*train_val_ratio))
val_windows_amount = int(np.ceil(windows_amount*(1-train_val_ratio)))
train_windows = np.zeros((train_windows_amount, window_length), dtype=np.float)
val_windows = np.zeros((val_windows_amount, window_length), dtype=np.float)
for j in range(len(filenames)):
if j%100 == 0:
print(str(j/windows_amount*100)+"%")
lines = [line.rstrip('\n') for line in open(filenames[j])]
if j < train_windows_amount:
for i in range(len(lines)):
train_windows[j, i] = float(lines[i])
else:
for i in range(len(lines)):
val_windows[j-train_windows_amount, i] = float(lines[i])
return train_windows, val_windows
train_path = ".\\dataset\\dataset_1\\train"
train_windows, val_windows = load_dataset(train_path)
train_windows = train_windows/1000000
val_windows = val_windows/1000000
treshold_path = ".\\dataset\\dataset_1\\treshold"
treshold_windows, _ = load_dataset(treshold_path)
treshold_windows = treshold_windows/1000000
model = keras.Sequential([
keras.layers.Dense(1024, activation='relu', name="encoder"),
keras.layers.Dense(512, activation='relu'),
keras.layers.Dense(256, activation='relu', name="bottleneck"),
keras.layers.Dense(512, activation='relu'),
keras.layers.Dense(1024, activation='sigmoid', name="decoder")
])
model.compile(optimizer = "adam",
loss='binary_crossentropy',
metrics=['accuracy'])
history = model.fit(train_windows, train_windows,
batch_size=128,
epochs=200,
verbose=1,
shuffle=True,
validation_data=(val_windows, val_windows))
plt.plot(history.history['loss'], label='loss')
plt.plot(history.history['accuracy'], label='train')
plt.plot(history.history['val_accuracy'], label='val')
plt.grid()
plt.legend()
plt.show()
test_loss, test_acc = model.evaluate(treshold_windows, treshold_windows, verbose=2)
print('Test loss:', test_loss)
model.save_weights('model.h5')
reconstructed = model.predict(treshold_windows)
plt.plot(treshold_windows[10])
plt.plot(reconstructed[0])
plt.plot(reconstructed[10])
plt.plot(reconstructed[20])
plt.plot(reconstructed[30])
plt.plot(reconstructed[40])
plt.grid()
plt.legend()
plt.show()
After the autoencoder training I get the following history plot:
And when I try to use the trained autoencoder on a segment of the previously unseen signal, I get the following plot:
where the upper signal is the input segment,
the yellow signal is the output signal of the neural network,
the other color signals are neural network outputs for other input segments.
I would rather expect that the autoencoder produced the output that is similar to its input, but it is not the case here. What am I doing wrong?

skopt's gp_minimize() function raises ValueError: array must not contain infs or NaNs

I am currently using the skopt (scikit-optimize) package for hyperparameter tuning of a neural network (I am trying to minimize -1* accuracy). It seems to run fine (and successfully prints to the console) for several iterations before it raises Value Error: array must not contain infs or NaNs.
What are some possible causes of this? My data does not contain infs or NaNs and neither do my search parameter ranges. The neural network code is quite long, so for brevity, I will paste the relevant sections:
Imports:
import pandas as pd
import numpy as np
from skopt import gp_minimize
from skopt.utils import use_named_args
from skopt.space import Real, Categorical, Integer
from tensorflow.python.framework import ops
from sklearn.model_selection import train_test_split
import tensorflow
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Conv1D, Dropout, MaxPooling1D, Flatten
from keras import backend as K
Creation of search parameters:
dim_num_filters_L1 = Integer(low=1, high=50, name='num_filters_L1')
#dim_kernel_size_L1 = Integer(low=1, high=70, name='kernel_size_L1')
dim_activation_L1 = Categorical(categories=['relu', 'linear', 'softmax'], name='activation_L1')
dim_num_filters_L2 = Integer(low=1, high=50, name='num_filters_L2')
#dim_kernel_size_L2 = Integer(low=1, high=70, name='kernel_size_L2')
dim_activation_L2 = Categorical(categories=['relu', 'linear', 'softmax'], name='activation_L2')
dim_num_dense_nodes = Integer(low=1, high=28, name='num_dense_nodes')
dim_activation_L3 = Categorical(categories=['relu', 'linear', 'softmax'], name='activation_L3')
dim_dropout_rate = Real(low = 0, high = 0.5, name = 'dropout_rate')
dim_learning_rate = Real(low=1e-4, high=1e-2, name='learning_rate')
dimensions = [dim_num_filters_L1,
#dim_kernel_size_L1,
dim_activation_L1,
dim_num_filters_L2,
#dim_kernel_size_L2,
dim_activation_L2,
dim_num_dense_nodes,
dim_activation_L3,
dim_dropout_rate,
dim_learning_rate,
]
Function that creates all models that will be tested:
def create_model(num_filters_L1, #kernel_size_L1,
activation_L1,
num_filters_L2, #kernel_size_L2,
activation_L2,
num_dense_nodes, activation_L3,
dropout_rate,
learning_rate):
input_shape = (X_train.shape[1], 1)
model = Sequential()
model.add(Conv1D(num_filters_L1, kernel_size = 40, activation = activation_L1, input_shape = input_shape))
model.add(MaxPooling1D(pool_size=2))
model.add(Conv1D(num_filters_L2, kernel_size=20, activation=activation_L2))
model.add(MaxPooling1D(pool_size=2))
model.add(Flatten())
model.add(Dense(num_dense_nodes, activation = activation_L3))
model.add(Dropout(dropout_rate))
model.add(Dense(y_train.shape[1], activation='linear'))
adam = tensorflow.keras.optimizers.Adam(learning_rate = learning_rate)
model.compile(optimizer=adam, loss='mean_squared_error', metrics=['accuracy'])
return model
Define fitness function:
#use_named_args(dimensions=dimensions)
def fitness(num_filters_L1, #kernel_size_L1,
activation_L1,
num_filters_L2, #kernel_size_L2,
activation_L2,
num_dense_nodes, activation_L3,
dropout_rate,
learning_rate):
model = create_model(num_filters_L1, #kernel_size_L1,
activation_L1,
num_filters_L2, #kernel_size_L2,
activation_L2,
num_dense_nodes, activation_L3,
dropout_rate,
learning_rate)
history_opt = model.fit(x=X_train,
y=y_train,
validation_data=(X_val,y_val),
shuffle=True,
verbose=2,
epochs=10
)
#return the validation accuracy for the last epoch.
accuracy_opt = model.evaluate(X_test,y_test)[1]
# Print the classification accuracy:
print("Experimental Model Accuracy: {0:.2%}".format(accuracy_opt))
# Delete the Keras model with these hyper-parameters from memory:
del model
# Clear the Keras session, otherwise it will keep adding new models to the same TensorFlow graph each time we create model with a different set of hyper-parameters.
K.clear_session()
ops.reset_default_graph()
# the optimizer aims for the lowest score, so return negative accuracy:
return -accuracy # or sum(RMSE)?
Run hyperparameter search:
gp_result = gp_minimize(func=fitness,
dimensions=dimensions)
print("best accuracy was " + str(round(gp_result.fun *-100,2))+"%.")
Your activation function is not converging in a random acquisition function call. I encountered this problem and removed 'relu' function from search space.

tf.keras.Sequential binary classification model predicting [0.5, 0.5] or close to

I am currently trying to build a model to classify whether or not the outcome of a given football match will be above or below 2.5 goals, based on the Home team, Away team & game league, using a tf.keras.Sequential model in TensorFlow 2.0RC.
The problem I am encountering is that my softmax results converge on [0.5,0.5] when using the model.predict method. What makes this odd is that my validation & test accuracy and losses are about 0.94 & 0.12 respectively after 1000 epochs of training, otherwise I would have put this down to an overfitting problem. I am aware that 1000 epochs is extremely likely to overfit, however, I want to understand why my accuracy increases until about 800 epochs in. My loss flattens at about 300 epochs.
I have tried to alter the number of layers, number of units in each layer, the activation functions, optimizers and loss functions, number of epochs and learning rates, but can only seem to increase the losses.
The results still seem to converge toward [0.5,0.5] regardless.
The full code can be viewed at https://github.com/AhmUgEk/tensorflow_football_predictions, but below is an extract showing model composition.
# Create Keras Sequential model:
model = keras.Sequential()
model.add(feature_layer) # Input processing layer.
model.add(Dense(units=32, activation='relu')) # Hidden Layer 1.
model.add(Dropout(rate=0.4))
model.add(BatchNormalization())
model.add(Dense(units=32, activation='relu')) # Hidden Layer 2.
model.add(Dropout(rate=0.4))
model.add(BatchNormalization())
model.add(Dense(units=2, activation='softmax')) # Output layer.
# Compile the model:
model.compile(
optimizer=keras.optimizers.Adam(learning_rate=0.0001),
loss=keras.losses.MeanSquaredLogarithmicError(),
metrics=['accuracy']
)
# Compile the model:
model.compile(
optimizer=keras.optimizers.Adam(learning_rate=0.0001),
loss=keras.losses.MeanSquaredLogarithmicError(),
metrics=['accuracy']
)
# Fit the model to the training dataset and validate against the
validation dataset between epochs:
model.fit(
train_dataset,
validation_data=val_dataset,
epochs=1000,
callbacks=[tensorboard_callback]
)
I would expect to receive a result of [0.282, 0.718] for example for an input of:
model.predict_classes([np.array(['E0'], dtype='object'),
np.array(['Liverpool'], dtype='object'),
np.array(['Newcastle'], dtype='object')])[0]
but as per the above, receive a result of say [0.5, 0.5].
Am I missing something obvious here?
I had made some minor changes in the model. Now, I am not getting exactly [0.5, 0.5].
Result:
[[0.61482537 0.3851746 ]
[0.5121426 0.48785746]
[0.48058605 0.51941395]
[0.48913187 0.51086813]
[0.45480043 0.5451996 ]
[0.48933673 0.5106633 ]
[0.43431875 0.5656812 ]
[0.55314165 0.4468583 ]
[0.5365097 0.4634903 ]
[0.54371756 0.45628244]]
Implementation:
import datetime
import os
import numpy as np
import pandas as pd
import tensorflow as tf
from gpu_limiter import limit_gpu
from pipe_functions import csv_to_df, dataframe_to_dataset
from sklearn.model_selection import train_test_split
from tensorflow import keras
from tensorflow.keras.layers import BatchNormalization, Dense, DenseFeatures, Dropout, Input
from tensorflow.keras.callbacks import TensorBoard, ModelCheckpoint
import tensorflow.keras.backend as K
from tensorflow.data import Dataset
# Test GPU availability and instantiate memory growth limitation if True:
if tf.test.is_gpu_available():
print('GPU Available\n')
limit_gpu()
else:
print('Running on CPU')
df = csv_to_df("./csv_files")
# Format & organise imported data, making the "Date" column the new index:
df['Date'] = pd.to_datetime(df['Date'])
df = df[['Date', 'Div', 'HomeTeam', 'AwayTeam', 'FTHG', 'FTAG']].dropna().set_index('Date').sort_index()
df['Over_2.5'] = (df['FTHG'] + df['FTAG'] > 2.5).astype(int)
df = df.drop(['FTHG', 'FTAG'], axis=1)
# Split data into training, validation and testing data:
# Note: random_state variable set to ensure reproducibility.
train, test = train_test_split(df, test_size=0.05, random_state=42)
train, val = train_test_split(train, test_size=0.05, random_state=42)
# print(df['Over_2.5'].value_counts()) # Check that data is balanced.
# Create datasets from train, val & test dataframes:
target_col = 'Over_2.5'
batch_size = 32
def df_to_dataset(features: np.ndarray, labels: np.ndarray, shuffle=True, batch_size=8) -> Dataset:
ds = Dataset.from_tensor_slices(({"feature": features}, {"target": labels}))
if shuffle:
ds = ds.shuffle(buffer_size=len(features))
ds = ds.batch(batch_size)
return ds
def get_feature_transform() -> DenseFeatures:
# Format features into feature columns to ensure data is in the correct format for feeding into the model:
feature_cols = []
for column in filter(lambda x: x != target_col, df.columns):
feature_cols.append(tf.feature_column.embedding_column(tf.feature_column.categorical_column_with_vocabulary_list(
key=column, vocabulary_list=df[column].unique()), dimension=5))
return DenseFeatures(feature_cols)
# Transforms all features into dense tensors.
feature_transform = get_feature_transform()
train_features = feature_transform(dict(train)).numpy()
val_features = feature_transform(dict(val)).numpy()
test_features = feature_transform(dict(test)).numpy()
train_dataset = df_to_dataset(train_features, train[target_col].values, shuffle=True, batch_size=batch_size)
val_dataset = df_to_dataset(val_features, val[target_col].values, shuffle=True, batch_size=batch_size) # Shuffle not required to validation data.
test_dataset = df_to_dataset(test_features, test[target_col].values, shuffle=True, batch_size=batch_size) # Shuffle not required to test data.
# Create Keras Functional API:
# Create a feature layer from the feature columns, to be placed at the input layer of the model:
def build_model(input_shape: tuple) -> keras.Model:
input_layer = keras.Input(shape=input_shape, name='feature')
model = Dense(units=1028, activation='relu', kernel_initializer='normal', name='dense0')(input_layer) # Hidden Layer 1.
model = BatchNormalization(name='bc0')(model)
model = Dense(units=1028, activation='relu', kernel_initializer='normal', name='dense1')(model) # Hidden Layer 2.
model = Dropout(rate=0.1)(model)
model = BatchNormalization(name='bc1')(model)
model = Dense(units=100, activation='relu', kernel_initializer='normal', name='dense2')(model) # Hidden Layer 3.
model = Dropout(rate=0.25)(model)
model = BatchNormalization(name='bc2')(model)
model = Dense(units=50, activation='relu', kernel_initializer='normal', name='dense3')(model) # Hidden Layer 4.
model = Dropout(rate=0.4)(model)
model = BatchNormalization(name='bc3')(model)
output_layer = Dense(units=2, activation='softmax', kernel_initializer='normal', name='target')(model) # Output layer.
model = keras.Model(inputs=input_layer, outputs=output_layer, name='better-than-chance')
# Compile the model:
model.compile(
optimizer=keras.optimizers.Adam(learning_rate=0.001),
loss='mse',
metrics=['accuracy']
)
return model
# # Create a TensorBoard log file (time appended) directory for every run of the model:
# directory = ".\\logs\\" + str(datetime.datetime.now().strftime("%Y%m%d-%H%M%S"))
# os.mkdir(directory)
# # Create a TensorBoard callback to log a record of model performance for every 1 epoch:
# tensorboard_callback = TensorBoard(log_dir=directory, histogram_freq=1, write_graph=True, write_images=True)
# Run "tensorboard --logdir .\logs" in anaconda prompt to review & compare logged results.
# Note: Make sure that the correct environment is activated before running.
model = build_model((train_features.shape[1],))
model.summary()
# checkpoint = ModelCheckpoint('model-{epoch:03d}.h5', verbose=1, monitor='val_loss',save_best_only=True, mode='auto')
# Fit the model to the training dataset and validate against the validation dataset between epochs:
model.fit(
train_dataset,
validation_data=val_dataset,
epochs=10)
# callbacks=[checkpoint]
# Saves and reloads model.
# model.save("./model.h5")
# model_from_saved = keras.models.load_model("./model.h5")
# Evaluate model accuracy against test dataset:
# scores, accuracy = model.evaluate(train_dataset)
# print('Accuracy:', accuracy)
##############
## OPTIONAL ##
##############
# DUBUGGING
# inp = model.input # input placeholder
# outputs = [layer.output for layer in model.layers] # all layer outputs
# functors = [K.function([inp], [out]) for out in outputs] # evaluation functions
# # Testing
# layer_outs = [func([test_features]) for func in functors]
# print(layer_outs)
# # # Form a prediction based on inputs:
prediction = model.predict({"feature": test_features[:10]})
print(prediction)
One thing you can do is to try some ensemble Learning methods like
RandomForest
and
XGBoost
and compare the results.
You should try is to add other Key Performance Indicators(KPI)s in
your data and then try to fit the model.

Make predictions using a tensorflow graph from a keras model

I have a model trained using Keras with Tensorflow as my backend, but now I need to turn my model into a tensorflow graph for a certain application. I attempted to do this and make predictions to insure that it is working correctly, but when comparing to the results gathered from model.predict() I get very different values. For instance:
from keras.models import load_model
import tensorflow as tf
model = load_model('model_file.h5')
x_placeholder = tf.placeholder(tf.float32, shape=(None,7214,1))
y = model(x_placeholder)
x = np.ones((1,7214,1))
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
print("Predictions from:\ntf graph: "+str(sess.run(y, feed_dict={x_placeholder:x})))
print("keras predict: "+str(model.predict(x)))
returns:
Predictions from:
tf graph: [[-0.1015993 0.07432419 0.0592984 ]]
keras predict: [[ 0.39339241 0.57949686 -3.67846966]]
The values from keras predict are correct, but the tf graph results are not.
If it helps to know the final intended application, I am creating a jacobian matrix with the tf.gradients() function, but currently it does not return the correct results when comparing to theano's jacobian function, which gives the correct jacobian. Here is my tensorflow jacobian code:
x = tf.placeholder(tf.float32, shape=(None,7214,1))
y = tf.reshape(model(x)[0],[-1])
y_list = tf.unstack(y)
jacobian_list = [tf.gradients(y_, x)[0] for y_ in y_list]
jacobian = tf.stack(jacobian_list)
EDIT: Model code
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, InputLayer, Flatten
from keras.layers.convolutional import Conv1D
from keras.layers.convolutional import MaxPooling1D
from keras.optimizers import Adam
from keras.callbacks import EarlyStopping, ReduceLROnPlateau
# activation function used following every layer except for the output layers
activation = 'relu'
# model weight initializer
initializer = 'he_normal'
# shape of input data that is fed into the input layer
input_shape = (None,7214,1)
# number of filters used in the convolutional layers
num_filters = [4,16]
# length of the filters in the convolutional layers
filter_length = 8
# length of the maxpooling window
pool_length = 4
# number of nodes in each of the hidden fully connected layers
num_hidden_nodes = [256,128]
# number of samples fed into model at once during training
batch_size = 64
# maximum number of interations for model training
max_epochs = 30
# initial learning rate for optimization algorithm
lr = 0.0007
# exponential decay rate for the 1st moment estimates for optimization algorithm
beta_1 = 0.9
# exponential decay rate for the 2nd moment estimates for optimization algorithm
beta_2 = 0.999
# a small constant for numerical stability for optimization algorithm
optimizer_epsilon = 1e-08
model = Sequential([
InputLayer(batch_input_shape=input_shape),
Conv1D(kernel_initializer=initializer, activation=activation, padding="same", filters=num_filters[0], kernel_size=filter_length),
Conv1D(kernel_initializer=initializer, activation=activation, padding="same", filters=num_filters[1], kernel_size=filter_length),
MaxPooling1D(pool_size=pool_length),
Flatten(),
Dense(units=num_hidden_nodes[0], kernel_initializer=initializer, activation=activation),
Dense(units=num_hidden_nodes[1], kernel_initializer=initializer, activation=activation),
Dense(units=3, activation="linear", input_dim=num_hidden_nodes[1]),
])
# compile model
loss_function = mean squared error
early_stopping_min_delta = 0.0001
early_stopping_patience = 4
reduce_lr_factor = 0.5
reuce_lr_epsilon = 0.0009
reduce_lr_patience = 2
reduce_lr_min = 0.00008
optimizer = Adam(lr=lr, beta_1=beta_1, beta_2=beta_2, epsilon=optimizer_epsilon, decay=0.0)
early_stopping = EarlyStopping(monitor='val_loss', min_delta=early_stopping_min_delta,
patience=early_stopping_patience, verbose=2, mode='min')
reduce_lr = ReduceLROnPlateau(monitor='loss', factor=0.5, epsilon=reuce_lr_epsilon,
patience=reduce_lr_patience, min_lr=reduce_lr_min, mode='min', verbose=2)
model.compile(optimizer=optimizer, loss=loss_function)
model.fit(train_x, train_y, validation_data=(cv_x, cv_y),
epochs=max_epochs, batch_size=batch_size, verbose=2,
callbacks=[reduce_lr,early_stopping])
model.save('model_file.h5')
#frankyjuang linked me to here
https://github.com/amir-abdi/keras_to_tensorflow
and combining this with code from
https://github.com/metaflow-ai/blog/blob/master/tf-freeze/load.py
and
https://github.com/tensorflow/tensorflow/issues/675
I have found a solution to both predicting using a tf graph and creating the jacobian function:
import tensorflow as tf
import numpy as np
# Create function to convert saved keras model to tensorflow graph
def convert_to_pb(weight_file,input_fld='',output_fld=''):
import os
import os.path as osp
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import graph_io
from keras.models import load_model
from keras import backend as K
# weight_file is a .h5 keras model file
output_node_names_of_input_network = ["pred0"]
output_node_names_of_final_network = 'output_node'
# change filename to a .pb tensorflow file
output_graph_name = weight_file[:-2]+'pb'
weight_file_path = osp.join(input_fld, weight_file)
net_model = load_model(weight_file_path)
num_output = len(output_node_names_of_input_network)
pred = [None]*num_output
pred_node_names = [None]*num_output
for i in range(num_output):
pred_node_names[i] = output_node_names_of_final_network+str(i)
pred[i] = tf.identity(net_model.output[i], name=pred_node_names[i])
sess = K.get_session()
constant_graph = graph_util.convert_variables_to_constants(sess, sess.graph.as_graph_def(), pred_node_names)
graph_io.write_graph(constant_graph, output_fld, output_graph_name, as_text=False)
print('saved the constant graph (ready for inference) at: ', osp.join(output_fld, output_graph_name))
return output_fld+output_graph_name
Call:
tf_model_path = convert_to_pb('model_file.h5','/model_dir/','/model_dir/')
Create function to load the tf model as a graph:
def load_graph(frozen_graph_filename):
# We load the protobuf file from the disk and parse it to retrieve the
# unserialized graph_def
with tf.gfile.GFile(frozen_graph_filename, "rb") as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
# Then, we can use again a convenient built-in function to import a graph_def into the
# current default Graph
with tf.Graph().as_default() as graph:
tf.import_graph_def(
graph_def,
input_map=None,
return_elements=None,
name="prefix",
op_dict=None,
producer_op_list=None
)
input_name = graph.get_operations()[0].name+':0'
output_name = graph.get_operations()[-1].name+':0'
return graph, input_name, output_name
Create a function to make model predictions using the tf graph
def predict(model_path, input_data):
# load tf graph
tf_model,tf_input,tf_output = load_graph(model_path)
# Create tensors for model input and output
x = tf_model.get_tensor_by_name(tf_input)
y = tf_model.get_tensor_by_name(tf_output)
# Number of model outputs
num_outputs = y.shape.as_list()[0]
predictions = np.zeros((input_data.shape[0],num_outputs))
for i in range(input_data.shape[0]):
with tf.Session(graph=tf_model) as sess:
y_out = sess.run(y, feed_dict={x: input_data[i:i+1]})
predictions[i] = y_out
return predictions
Make predictions:
tf_predictions = predict(tf_model_path,test_data)
Jacobian function:
def compute_jacobian(model_path,input_data):
tf_model,tf_input,tf_output = load_graph(model_path)
x = tf_model.get_tensor_by_name(tf_input)
y = tf_model.get_tensor_by_name(tf_output)
y_list = tf.unstack(y)
num_outputs = y.shape.as_list()[0]
jacobian = np.zeros((num_outputs,input_data.shape[0],input_data.shape[1]))
for i in range(input_data.shape[0]):
with tf.Session(graph=tf_model) as sess:
y_out = sess.run([tf.gradients(y_, x)[0] for y_ in y_list], feed_dict={x: input_data[i:i+1]})
jac_temp = np.asarray(y_out)
jacobian[:,i:i+1,:]=jac_temp[:,:,:,0]
return jacobian
Compute Jacobian Matrix:
jacobians = compute_jacobian(tf_model_path,test_data)

Categories

Resources