Extract features into a dataset from keras model - python

I use the following code (courtesy to here) which runs CNN for training MNIST images:
from __future__ import print_function
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras import backend as K
batch_size = 128
num_classes = 10
epochs = 1
# input image dimensions
img_rows, img_cols = 28, 28
# the data, split between train and test sets
(x_train, y_train), (x_test, y_test) = mnist.load_data()
if K.image_data_format() == 'channels_first':
x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3),
activation='relu',
input_shape=input_shape))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test))
print(model.save_weights('file.txt')) # <<<<<----
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
My goal is to use CNN model to extract MNIST features into a dataset that I can use as an input for another classifier. In this example, I don't care about the classification operation since all I need is the features of the trained images. The only method I found is save_weights as:
print(model.save_weights('file.txt'))
How can I extract features into a dataset from keras model?

After training or loading the existing trained model, you can create another model:
extract = Model(model.inputs, model.layers[-3].output) # Dense(128,...)
features = extract.predict(data)
and use the .predict method to return the vectors from a specific layer, in this case every image will become (128,), the output of the Dense(128, ...) layer.
You can also train these networks jointly with 2 outputs using the functional API. Follow the guide and you'll see that you can chain models together and have multiple outputs each possibly with a separate loss. This will allow your model to learn shared features that is useful for both classifying the MNIST image and your task at the same time.

Related

Keras-viz throwing InvalidArgumentError

I am running a simple CNN model in Keras. Code:
from __future__ import print_function
import numpy as np
import keras
from keras.datasets import mnist
from keras.models import Sequential, Model
from keras.layers import Dense, Dropout, Flatten, Activation, Input
from keras.layers import Conv2D, MaxPooling2D
from keras import backend as K
import tensorflow as tf
batch_size = 128
num_classes = 10
epochs = 1
# input image dimensions
img_rows, img_cols = 28, 28
# the data, shuffled and split between train and test sets
(x_train, y_train), (x_test, y_test) = mnist.load_data()
if K.image_data_format() == 'channels_first':
x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
y_train = tf.keras.utils.to_categorical(y_train, num_classes)
y_test = tf.keras.utils.to_categorical(y_test, num_classes)
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3),
activation='relu',
input_shape=input_shape))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax', name='preds'))
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=tf.keras.optimizers.Adam(),
metrics=['accuracy'])
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test))
I want to visualize the dense layers. For this, I am using keras-vis
While running the following code:
from vis.visualization import visualize_activation
from vis.utils import utils
from keras import activations
from matplotlib import pyplot as plt
%matplotlib inline
plt.rcParams['figure.figsize'] = (18, 6)
# Utility to search for layer index by name.
# Alternatively we can specify this as -1 since it corresponds to the last layer.
layer_idx = utils.find_layer_idx(model, 'preds')
# Swap softmax with linear
model.layers[layer_idx].activation = activations.linear
model = utils.apply_modifications(model)
# This is the output node we want to maximize.
filter_idx = 0
img = visualize_activation(model, layer_idx, filter_indices=filter_idx)
plt.imshow(img[..., 0])
I am getting the following error:
InvalidArgumentError: conv2d_2_input_2:0 is both fed and fetched.
Solutions that I have tried
Installing Keras-vis from source for the latest build
Applying Changes from PR mentioned in issues
Version:
Keras : 2.7
Keras-Vis : 0.5

Why are the predictions going wrong with MNIST CNN?

I trained the CNN on MNIST dataset with training and validation accuracy of ~0.99.
I followed the exact steps from the example given at the Keras documentation of implementing CNN with MNIST dataset:
import cv2
import numpy as np
import tensorflow.keras as keras
import math
from __future__ import print_function
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras import backend as K
batch_size = 128
num_classes = 10
epochs = 12
# input image dimensions
img_rows, img_cols = 28, 28
# the data, split between train and test sets
(x_train, y_train), (x_test, y_test) = mnist.load_data()
if K.image_data_format() == 'channels_first':
x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3),
activation='relu',
input_shape=input_shape))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test))
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
When I tested the following image:
using the following test code:
img = cv2.imread("m9.png", 0)
img = cv2.resize(img, (28,28))
img = img / 255.
prob = model.predict_proba(img.reshape((1,28, 28, 1)))
print(prob)
model.predict_classes(img.reshape((1,28, 28, 1)))
The class it prints out is array([1]) , denoting number 1. I could not understand the reason for it. Did I try to predict in an incorrect way?
Exactly same class array([1]) was predicted for number 8 as shown below:
It looks like I have made an error during prediction? I tried to understand what could be happening but could not understand.
There is no error, its just that your images don't look at all like the ones in the MNIST dataset. This dataset is not meant to train a general digit recognition algorithm, it will only work with similar images.
In your case the digits will be very small in a 28x28 image, so the predictions are kind of random.
You are resizing the input image to 28 X 28. Instead you should first crop the image around the digit to make it look like the data-set in MNIST. Otherwise in resized image, the digit will occupy very small portion and results will be arbitrary.

Issue with Keras and graph disconnected when using Tensorflow tensors

I'm trying to create a Keras model that uses some Tensorflow functions in it.
The model is based on the Keras MNIST CNN example and it consists of a simple stack of convolutional layers followed by the estimation of a Gaussian. To evaluate the Gaussian function, I estimate its mean and standard deviation using Dense(2). The two parameters are then combined using tf.range and tf.exp to form a Gaussian function. This Gaussian tensor is then multiplied by the network to produce the final output.
If I run the model without the Gaussian multiplication, everything works fine. When I introduce the Gaussian bit, I get the error:
RuntimeError: Graph disconnected: cannot obtain value for tensor
Tensor("Exp_2:0", shape=(?, 9216), dtype=float32) at layer "tf". The
following previous layers were accessed without issue: ['input_2',
'conv2d_3', 'conv2d_4', 'max_pooling2d_2', 'dropout_3', 'flatten_2']
Here is a fully reproducible script:
'''Trains a simple convnet on the MNIST dataset.
Gets to 99.25% test accuracy after 12 epochs
(there is still a lot of margin for parameter tuning).
16 seconds per epoch on a GRID K520 GPU.
'''
from __future__ import print_function
import keras
from keras.datasets import mnist
from keras.models import Model, Input
from keras.layers import Dense, Dropout, Flatten, Multiply
from keras.layers import Conv2D, MaxPooling2D
from keras import backend as K
batch_size = 128
num_classes = 10
epochs = 12
# input image dimensions
img_rows, img_cols = 28, 28
# the data, split between train and test sets
(x_train, y_train), (x_test, y_test) = mnist.load_data()
if K.image_data_format() == 'channels_first':
x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
inputs = Input(shape=input_shape)
net = Conv2D(32, kernel_size=(3, 3),
activation='relu')(inputs)
net = Conv2D(64, (3, 3), activation='relu')(net)
net = MaxPooling2D(pool_size=(2, 2))(net)
net = Dropout(0.25)(net)
net = Flatten()(net)
gauss_par = Dense(2, activation='relu')(net)
# Define a vector from 1 to n_frames
x1d = K.tf.range(0, 9216, dtype='float32')
# Transform the vector into a tensor of shape [batch_size, n_frames]
x = K.tf.tile(K.tf.reshape(x1d, [1, -1]), [K.tf.shape(inputs)[0], 1])
# Gaussian function
gaussian = K.tf.exp(-K.tf.pow(x - gauss_par[:, 0], 2) / 2 / K.tf.pow(gauss_par[:, 1], 2))
# Transform the tensorflow tensor into a keras tensor
gaussian_layer = Input(tensor=gaussian, name='tf')
net = Multiply()([net, gaussian_layer])
net = Dense(128, activation='relu')(net)
net = Dropout(0.5)(net)
outputs = Dense(num_classes, activation='softmax')(net)
model = Model(inputs=inputs, outputs=outputs)
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])
model.summary()
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test))
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
Do you have any idea what might be causing this error?

How to improve model for digit recognition?

I am making a simple application to implement digit recognition. The problem is that it works wonderfully on the mnist dataset but predicts horribly on random images downloaded from google. What should I do to improve the accuracy of my model?
Another question : Someone suggested to add more layers in the model. If so, how do I add more layers to my model?
File in which model is trained:
from __future__ import print_function
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras import backend as K
import cv2
import matplotlib.pyplot as plt
from keras.models import load_model
import pickle
import h5py
import numpy as np
from keras.callbacks import ModelCheckpoint
batch_size = 128
num_classes = 10
epochs = 12
# input image dimensions
img_rows, img_cols = 28, 28
model=load_model('my_model.h5')
the data, split between train and test sets
(x_train, y_train), (x_test, y_test) = mnist.load_data()
print('initial shape')
print('x_test ',x_test.shape)
print('y_test ',y_test.shape)
# print(x_test);
# print(y_test.shape)
if K.image_data_format() == 'channels_first':
print('reshape1')
x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
print('reshape2')
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print('x_test shape: ' , x_test.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
print('x_test final : ')
print(x_test)
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3),
activation='relu',
input_shape=input_shape))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.10))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.20))
model.add(Dense(256,activation='relu'))
model.add(Dropout(0.40))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test))
model.save('my_model.h5')
score = model.evaluate(x_test, y_test, verbose=0)
# print(x_test.shape)
# print('\n')
# print(y_test.shape)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
The below code is used for testing my model with my own image
image = cv2.imread("2.jpg")
img_rows, img_cols = 28, 28
x_test1 = cv2.resize(image, (28, 28))
x_test1 = cv2.cvtColor(x_test1,cv2.COLOR_RGB2GRAY)
print(x_test1.shape)
if K.image_data_format() == 'channels_first':
print('reshape1')
x_test1 = x_test1.reshape(1, 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
print('reshape2')
x_test1 = x_test1.reshape(1, img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
x_test1 = x_test1.astype('float32')
x_test1 /= 255
y_test1 = np.array([[ 0., 0., 1., 0., 0., 0., 0., 0., 0., 0.]])
score = model.evaluate(x_test1, y_test1, verbose=0)
print('done')
print('score of image = ')
print(score[1])
print(score[0])
score=model.predict_classes(x_test1)
print(score)
The below code is used to load a previous trained model and continue training it from the previous checkpoint. If there are any mistakes please do suggest them.
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3),
activation='relu',
input_shape=input_shape))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])
filepath="my_model.h5"
checkpoint = ModelCheckpoint(filepath, monitor='acc', verbose=1, save_best_only=True, mode='max')
callbacks_list = [checkpoint]
score=model.fit(x_test1, y_test1,epochs=12, batch_size=128, callbacks=callbacks_list, verbose=0)
new_model = load_model("my_model.h5")
np.testing.assert_allclose(model.predict(x_test1),
new_model.predict(x_test1),
1e-5)
checkpoint = ModelCheckpoint(filepath, monitor='loss', verbose=1, save_best_only=True, mode='min')
callbacks_list = [checkpoint]
new_model.fit(x_train, y_train, epochs=12, batch_size=128, callbacks=callbacks_list)
Does the above code cause over-fitting of the model ? If yes how do I make it more efficient so that it is able to predict any kind of data ? Help Required!!
You should not be surprised by these results because you train your model on one data domain but test it on a different one. Yes, you may get slightly better performance if you have a better-designed network, but this gap still exists.
To close this gap, you can improve the following things :
improve your training data: (a) train your model with more real-looking digit data, for example, the street view house number (SVHN) dataset, or the digits in the char74K dataset, and (b) train your model with better data augmentation techniques, for example, blend a digit sample with a random background image.
adapt your testing data: preprocess testing samples such that they look similar to those in your training data, for example, binzarize a testing sample and let it look similar to that of MNIST, before you feed it to your network for prediction.
improve your model by explicitly considering the variations in real data: for example, if you think it is rotation in testing data degrades your model performance, then you can consider this factor in your model training by adding a side task to prediction digit rotation; or you believe a good model should only predict digits but not the dataset that this training sample belongs to, then you can add an adversarial task to force the network forget this information.

Keras mnist tutorial giving me very low accuracy

I am trying out the mnist tutorial using keras at https://github.com/fchollet/keras/blob/master/examples/mnist_cnn.py but am getting really low accuracy. I got a test accuracy of 83% on my 1st trial after 12 epochs and 75% on my 2nd trial. The page claims that it is able to get 99.25% test accuracy after 12 epochs. I am not sure why this is happening because I literally copy pasted the code into PyCharm.
I am using Keras version 2.0.0 with Tensorflow version 0.12.1 as its backend. Training is done via CPU.
Could the reason be due to the version? Should I upgrade to the latest versions of Keras and Tensorflow? I hope someone can run the exact code and let me know of their results after 12 epochs with the Keras and Tensorflow version.
Or could the reason be due to the variability in deep learning and that I might eventually get a ~99% test accuracy after 12 epochs if I run enough simulations. This is the code in case anyone doesn't wanna get into the link
'''Trains a simple convnet on the MNIST dataset.
Gets to 99.25% test accuracy after 12 epochs
(there is still a lot of margin for parameter tuning).
16 seconds per epoch on a GRID K520 GPU.
'''
from __future__ import print_function
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras import backend as K
batch_size = 128
num_classes = 10
epochs = 12
# input image dimensions
img_rows, img_cols = 28, 28
# the data, shuffled and split between train and test sets
(x_train, y_train), (x_test, y_test) = mnist.load_data()
if K.image_data_format() == 'channels_first':
x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3),
activation='relu',
input_shape=input_shape))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test))
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])

Categories

Resources