I am learning image classification with tensorflow. Below is my program . for same test image if i am passing again and again it gives sometimes different labels . It is not predicting correctly
import tensorflow as tf
import numpy as np
import os
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Flatten,Activation
from keras.layers.convolutional import Conv2D, MaxPooling2D
from keras.constraints import maxnorm
from keras.utils import np_utils
classifier = Sequential()
classifier.add(Conv2D(32, (3, 3), input_shape = (64,64,3 ),activation="relu"))
classifier.add(MaxPooling2D(pool_size = (2,2)))
classifier.add(Flatten())
classifier.add(Dense(128 , kernel_initializer ='uniform' , activation = 'relu'))
classifier.add(Dense(10 , kernel_initializer ='uniform' , activation = 'softmax'))
classifier.compile(optimizer = 'rmsprop', loss = 'categorical_crossentropy' , metrics = ['accuracy'])
from keras_preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1./255)
training_set = train_datagen.flow_from_directory(
'/code/train',
shuffle=True,
target_size=(64,64),
batch_size=5,
class_mode='categorical',
classes=["shiv", "kart", "nall","surendra","harshi","nag","saura","rajan","manoj","abhimanyu"])
test_set = test_datagen.flow_from_directory(
'/code/validation',
shuffle=True,
target_size=(64,64),
batch_size=5,
class_mode='categorical',
classes=["shiv", "kart", "nall","surendra","harshi","nag","saura","rajan","manoj","abhimanyu"])
from IPython.display import display
from PIL import Image
classifier.fit(
training_set,
steps_per_epoch=80,
epochs=12,
validation_data=test_set,
validation_steps=100)
from keras_preprocessing import image
files_dir = '/code/test_image_clasification1'
files = os.listdir(files_dir)
np.set_printoptions(precision=3)
for f in files:
image_path = files_dir + '/' + f
test_image = image.load_img(image_path,target_size = (64, 64))
test_image = image.img_to_array(test_image)
test_image = np.expand_dims(test_image, axis = 0)
result = classifier.predict(test_image)
#classes = classifier.predict_classes(test_image)
#print (classes)
labels = ["shivaji","kartik","nallayan","surendar","harshita","nagendar","saurabh","rajan","manoj","abhimanyu"]
indx = np.argmax(result)
#score = np.argmax(np.round(result*100,2))
#print(np.round(result,2))
match_percentage=np.max(result)
match_class=labels[indx]
print("the image " + f + " is matching with "+ match_class + " having matching percentage " +"{:.2%}".format(match_percentage) )
#print(list(zip(labels,result)))
#print(f,labelsindx])
Can any one help me if anything wrong in model training . For information, i have 122 image in train set and 48 in validation set.
You need to apply the same rescaling factor to your images, this is currently missing and therefore leads to wrong probability predictions. So you also need to rescale to [0,1] by applying 1./255 to your test_image.
You can try changing the loss and the optimizer.
classifier.compile(optimizer = 'sgd', loss = 'sparse_categorical_crossentropy' , metrics = ['accuracy'])
Related
I'm using transfer learning to tackle a classification problem and its training process and the validation process work without any problem. I am using Tensorflow 1.13.2 and I am forced to use it since it is the only Tensorflow version which is supported by my hardware. The problem is when I use model.save('saved_model_1') it does not save a .pb file as expected. What should I do to save the weight file properly.
import os
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.applications import MobileNet
from PIL import Image
from sklearn.metrics import classification_report, confusion_matrix
from tensorflow.python.tools import freeze_graph
def prepare_df(data_type):
X = []
y = []
path = '../tf_files/hand_orientations/' + data_type + '/'
for i in os.listdir(path):
# Image
X.append(i)
# Label
y.append(i.split('_')[0])
X = np.array(X)
y = np.array(y)
df = pd.DataFrame()
df['filename'] = X
df['label'] = y
return df
df_train = prepare_df('training')
df_val = prepare_df('validation')
df_test = prepare_df('evaluation')
# Create the ImageDataGenerator object
train_datagen = ImageDataGenerator(
featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=20,
width_shift_range=0.2,
height_shift_range=0.2,
)
val_datagen = ImageDataGenerator(
featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=20,
width_shift_range=0.2,
height_shift_range=0.2,
)
# Generate batches and augment the images
train_generator = train_datagen.flow_from_dataframe(
df_train,
directory='../tf_files/hand_orientations/training/',
x_col='filename',
y_col='label',
class_mode='binary',
target_size=(224, 224),
)
val_generator = train_datagen.flow_from_dataframe(
df_val,
directory='../tf_files/hand_orientations/validation/',
x_col='filename',
y_col='label',
class_mode='binary',
target_size=(224, 224),
)
# Initialize the Pretrained Model
base_model = MobileNet(weights='imagenet',
input_shape=(224, 224, 3),
include_top=False)
print(base_model.summary())
# Set this parameter to make sure it's not being trained
base_model.trainable = False
# Set the input layer
input_ = tf.keras.Input(shape=(224, 224, 3))
# Set the feature extractor layer
x = base_model(input_, training=False)
# Set the pooling layer
x = tf.keras.layers.GlobalAveragePooling2D()(x)
# Set the final layer with sigmoid activation function
output_ = tf.keras.layers.Dense(1, activation='sigmoid')(x)
# Create the new model object
model = tf.keras.Model(input_, output_)
# Compile it
model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy'])
# Print The Summary of The Model
model.summary()
model.fit(train_generator, epochs=20, validation_data=val_generator)
y_true = []
y_pred = []
for i in os.listdir('../tf_files/hand_orientations/evaluation'):
img = Image.open('../tf_files/hand_orientations/evaluation/' + i)
img = img.resize((224, 224))
img = np.array(img)
img = np.expand_dims(img, 0)
y_true.append((i.split('_')[0]))
y_pred.append('right' if model.predict(img) > 0.5 else 'left')
print(classification_report(y_true, y_pred))
print()
print(confusion_matrix(y_true, y_pred))
print(y_true)
print(y_pred)
model.save('saved_model_1')
As it is Tensorflow 1.x model, Please save your model using:
tf.saved_model.save(model, model_save_path) # model and path to save this model
To check saved_model format
ls {model_save_path}
Output:
assets/ saved_model.pb variables/
To reload the model you can use below method:
tf.saved_model.load(model_save_path)
You can check this reference for more details.
We are currently working on an image classification task for detecting tuberculosis from chest x-ray images. You can see our code below. We used 0.7 for the train set, 0.2 for the validation set, and 0.1 for the test set. Our training and validation loss is here
But when we try it on our test data set, this is what we got:
Is there something wrong with our code? Thank you in advance.
from tensorflow import keras
from keras.applications.mobilenet_v2 import MobileNetV2
from keras.applications.mobilenet_v2 import preprocess_input
from keras.layers import Dense, Flatten
from keras.models import Sequential
from keras.losses import BinaryCrossentropy
from tensorflow.keras.optimizers import Adam
from keras.preprocessing.image import ImageDataGenerator
from datetime import datetime, date
from keras.callbacks import ModelCheckpoint
from sklearn.metrics import confusion_matrix, classification_report
import numpy as np
#Loading a pre-trained model
image_size = 224
base_model = MobileNetV2(input_shape=(image_size,image_size,3), weights='imagenet', include_top=False)
for layer in base_model.layers:
layer.trainable = False
model = Sequential()
model.add(base_model)
model.add(Flatten())
model.add(Dense(1000, activation='relu'))
model.add(Dense(2, activation="sigmoid"))
loss_func = BinaryCrossentropy()
opt = Adam(learning_rate=0.001)
model.compile(loss=loss_func,
optimizer=opt,
metrics=['accuracy'])
#Training
test_path = '...'
val_path = '...'
datagen = ImageDataGenerator(rescale = 1./255,horizontal_flip = True, shear_range = 0.2, zoom_range=0.2)
batch_size=32
validation_size=8
train_set = datagen.flow_from_directory(test_path,
target_size = (image_size, image_size),
batch_size=batch_size,
class_mode = 'categorical')
validation_set = datagen.flow_from_directory(val_path,
target_size = (image_size, image_size),
batch_size=validation_size,
class_mode = 'categorical')
#Fitting the data to the model
model_name = 'MobileNetV2'
date_today= date.today().strftime('%m_%d_%Y')
checkpoint = ModelCheckpoint(filepath=f'Models/{model_name}_{date_today}.h5',
monitor='val_loss',
mode='min',
verbose=1,
save_best_only=True)
model_history = model.fit(train_set,
validation_data=validation_set,
epochs=100,
steps_per_epoch=len(train_set)//batch_size,
validation_steps=len(validation_set)//validation_size,
callbacks=[checkpoint],
verbose=1)
#Testing the model on the test set
test_path = '...'
test_datagen = ImageDataGenerator()
test_set = test_datagen.flow_from_directory(test_path,
target_size = (image_size, image_size),
class_mode = 'categorical')
predictions = model.predict(test_set, verbose=1)
y_pred = np.argmax(predictions, axis=1)
class_labels = list(test_set.class_indices.keys())
print('Classification Report')
clsf = classification_report(test_set.classes, y_pred, target_names=class_labels)
print(clsf)
print('\n')
print('Confusion Matrix')
cfm = confusion_matrix(test_set.classes, y_pred)
print(cfm)
The code is correct but, there is one little mistake I found and that is you have assigned 2 units in sigmoid output layer. That's not correct; there should be 1 unit because it's a binary classification problem. Like this:
model.add(Dense(1, activation="sigmoid"))
Tuberculosis is a complex object with sophisticated features. Therefore, the testing set may produce unexpected results. To circumvent this, you must modify your network and incorporate additional training images. You can experiment with transfer learning, but if the network from which you want to transfer the parameters was trained on objects entirely unrelated to tuberculosis, it might not be appropriate.
enter code here
from keras.models import Sequential
from keras.layers import Convolution2D
from keras.layers import Flatten
from keras.layers import Dense
from keras.layers import MaxPooling2D
classifier = Sequential()
classifier.add(Convolution2D(32,(3,3),input_shape = (64,64,3), activation = 'relu'))
classifier.add(MaxPooling2D(pool_size = (2, 2)))
classifier.add(Convolution2D(32,(3,3), activation = 'relu'))
classifier.add(MaxPooling2D(pool_size = (2, 2)))
classifier.add(Flatten())
classifier.add(Dense(units=32,activation = 'relu'))
classifier.add(Dense(units=64,activation = 'relu'))
classifier.add(Dense(units=128,activation = 'relu'))
classifier.add(Dense(units=256,activation = 'relu'))
classifier.add(Dense(units=256,activation = 'relu'))
classifier.add(Dense(units=6,activation = 'softmax'))
classifier.compile(optimizer = 'adam', loss = 'categorical_crossentropy', metrics = ['accuracy'])
from keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(rescale = 1./255, # To rescaling the image in range of [0,1]
shear_range = 0.2, # To randomly shear the images
zoom_range = 0.2, # To randomly zoom the images
horizontal_flip = True) # for randomly flipping half of the images
horizontally
test_datagen = ImageDataGenerator(rescale = 1./255)
print("\nTraining the data...\n")
training_set = train_datagen.flow_from_directory('train',
target_size=(64,64),
batch_size=12, #Total no. of batches
class_mode='categorical')
test_set = test_datagen.flow_from_directory('test',
target_size=(64,64),
batch_size=12,
class_mode='categorical')
classifier.fit_generator(training_set,
steps_per_epoch=len(training_set), # Total training images
epochs = 20, # Total no. of epochs
validation_data = test_set,
validation_steps = len(test_set)) # Total testing images
classifier.save("model.h5")
#Prediction
classes = ['Fresh Apple','Fresh Banana','Fresh Orange','Rotten Apple','Rotten Banana','Rotten
Orange']
from keras.preprocessing import image
from keras.models import load_model
import numpy as np
new_model = load_model('model.h5')
filename = 'a1.jpeg'
new_model.summary()
test_image = image.load_img('images\\a1.jpg',target_size=(64,64))
test_image = image.img_to_array(test_image)
test_image = np.expand_dims(test_image, axis = 0)
result = new_model(test_image)
result1 = result[0]
for i in range(6):
if result1[i] == 1.:
break;
prediction = classes[i]
print(prediction)
My model is giving the same output for any input. The errors and warnings have been removed but the output still remains the same. Earlier the model was giving same value 'A'(example) before removing Warnings and after removing Warnings, the model is giving same value 'B'. I don't know where is the problem in my code whether it is in model or whether it is in #Prediction.
A couple of things. In your generators you set a batch size of 12. then in model.fit you have steps_per_epoch=len(training_set). This means you will go through your training set 12 times per epoch. I usually leave steps per epoch and validation steps as None. model.fit will determine the value internally but if you want to then set
steps_per_epoch = int(len(train_set/batch_size) + 1
validation_steps= int(len(test_set/batch_size) +1
Now in predictions. You scaled your train and test images by 1/255. You need to do the same for images you wish to predict. So right after the code to expand dimension add code
test_image=test_image/255
Training and Validation curves have spikes for loss and accuracy when training vgg16. I am using transfer learning technique and have changed the classifier for binary class problem of classifying genders. Can someone suggest me why am i getting such spikes and how can i reduce it.
The code is as follows :
from keras.layers import Input, Lambda, Dense, Flatten, Dropout
from keras.models import Model
from keras.applications.vgg16 import VGG16
from keras.applications.vgg16 import preprocess_input
from keras.preprocessing import image
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
import numpy as np
from glob import glob
import matplotlib.pyplot as plt
# re-size all the images to this
IMAGE_SIZE = [224, 224]
train_path = 'E:/decompressed_images/data_set/train'
valid_path = 'E:/decompressed_images/data_set/validation'
# add preprocessing layer to the front of VGG
vgg = VGG16(input_shape=IMAGE_SIZE + [3], weights='imagenet', include_top=False)
# don't train existing weights
for layer in vgg.layers:
layer.trainable = False
# useful for getting number of classes
folders = glob('E:/decompressed_images/data_set/train*')
x = Flatten()(vgg.output)
# x = Dense(1000, activation='relu')(x)
prediction = Dense(len(folders), activation='sigmoid')(x)
# create a model object
model = Model(inputs=vgg.input, outputs=prediction)
# view the structure of the model
model.summary()
# tell the model what cost and optimization method to use
model.compile(
loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy']
)
from keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(rescale = 1./255,
horizontal_flip = True,
vertical_flip = True,
width_shift_range = 0.1,
height_shift_range = 0.1,
zoom_range = 0.1,
rotation_range = 10)
test_datagen = ImageDataGenerator(rescale = 1./255)
training_set = train_datagen.flow_from_directory('E:/Ullu/new_trial__/balanced_dataset/train',
target_size = (224, 224),
batch_size = 64,
class_mode = 'binary')
test_set = test_datagen.flow_from_directory('E:/Ullu/new_trial__/balanced_dataset/test',
target_size = (224, 224),
batch_size = 64,
class_mode = 'binary')
r = model.fit_generator(
training_set,
validation_data=test_set,
epochs=100,
steps_per_epoch=len(training_set),
validation_steps=len(test_set)
)
plt.plot(r.history['loss'], label='train loss')
plt.plot(r.history['val_loss'], label='val loss')
plt.legend()
plt.show()
plt.savefig('E:/Model_128_30/LossVal_loss.png')
# accuracies
plt.plot(r.history['accuracy'], label='train acc')
plt.plot(r.history['val_accuracy'], label='val acc')
plt.legend()
plt.show()
plt.savefig('E:/Model_128_30/AccVal_acc.png')
import tensorflow as tf
from keras.models import load_model
model.save('E:/Model_128_30/128_30_wt.h5')
High and fluctuating training and validation accuracy image
High and fluctuating training and validation loss image
I tried using dropout layer(0.5) for the final layers but my accuracy and loss for training and validation are the same. Could anyone please suggest me where i am going wrong. Thanks.
I have done a prediction for car damages whether they are severe or not based on images in Keras using CNN. Predicted class and accuracy changes every time I run the code for the same dataset and with no other parameters changed. I have tried restarting the kernal and also setting seed for the model with a hope of getting consistent results. I am new to python, so kindly help me in the getting same results every time.
import random
random.seed(801)
# Importing the Keras libraries and packages
from keras.models import Sequential
from keras.layers import Conv2D
from keras.layers import MaxPooling2D
from keras.layers import Flatten
from keras.layers import Dense
from keras.layers import Dropout
# Initialising the CNN
classifier = Sequential()
# Step 1 - Convolution
classifier.add(Conv2D(64, (2, 2), input_shape = (64, 64, 3), activation = 'relu'))
# Step 2 - Pooling
classifier.add(MaxPooling2D(pool_size = (2, 2)))
# Adding a second convolutional layer
classifier.add(Conv2D(64, (2, 2), activation = 'relu'))
classifier.add(MaxPooling2D(pool_size = (2, 2)))
# Step 3 - Flattening
classifier.add(Flatten())
# Adding dropout
classifier.add(Dropout(0.2))
# Step 4 - Full connection
classifier.add(Dense(units = 128, activation = 'relu'))
# Adding dropout
classifier.add(Dropout(0.2))
classifier.add(Dense(units = 1, activation = 'sigmoid'))
# Compiling the CNN
classifier.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])
# Part 2 - Fitting the CNN to the images
from keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(rescale = 1./255,
# shear_range = 0.2,
# zoom_range = 0.2,
horizontal_flip = True)
test_datagen = ImageDataGenerator(rescale = 1./255)
#train_labels = keras.utils.to_categorical(train_labels,num_classes)
#test_labels = keras.utils.to_categorical(test_labels,num_classes)
training_set = train_datagen.flow_from_directory('C:/Users/Allianz/Desktop/Image Processing/car-damage-detective-neokt/app/2 category/training',
target_size = (64, 64),
batch_size = 16,
class_mode = 'binary')
test_set = test_datagen.flow_from_directory('C:/Users/Allianz/Desktop/Image Processing/car-damage-detective-neokt/app/2 category/validation',
target_size = (64, 64),
batch_size = 16,
class_mode = 'binary')
batch_size=16
classifier.fit_generator(training_set,
steps_per_epoch = 605//batch_size,
epochs = 9,
validation_data = test_set,
validation_steps = 5//batch_size
)
#classifier.save('first_model.h5')
classifier.save('first.h5')
# finding the number associated classes
#classes=training_set.class_indices
#print(classes)
# extracting file names of images
import os
from PIL import Image
import numpy as np
path='C:/Users/Allianz/Desktop/Image Processing/car-damage-detective-neokt/app/data3a_full/validation/01-minor'
img_names = [f for f in os.listdir(path) if os.path.splitext(f)[-1] == '.JPEG']
#print(img_names[1])
img_names=np.asarray(img_names) #converting list to array
# predicting classes for multiple images
import numpy as np
from keras.models import load_model
from keras.preprocessing import image
#os.chdir('C:/Users/Allianz/Desktop/Image Processing/car-damage-detective-neokt/app/2nd check/pred')
os.chdir('C:/Users/Allianz/Desktop/Image Processing/car-damage-detective-neokt/app/data3a_full/validation/01-minor')
a=load_model('first.h5')
classes=[]
result=[]
for i in range(len(img_names)):
img=image.load_img(img_names[i],
target_size=(64,64))
test_image = image.img_to_array(img)
test_image = np.expand_dims(test_image, axis = 0)
result = a.predict(test_image)
#print(result)
if result[0] >= 0.5:
prediction = 'severe'
else:
prediction = 'not severe'
classes.append(prediction)
#print(classes)
#prediction2=print(classes)
import pandas as pd
dfn=pd.DataFrame({'image':img_names,
'prediction':classes
})
len(dfn.loc[dfn['prediction']=='not severe'])
len(dfn.loc[dfn['prediction']=='severe'])
It looks like you're training the model every time you classify! This is what's causing the inconsistency. The reason why this yields different results, despite you setting the seed, can be found (here)[Why can't I get reproducible results in Keras even though I set the random seeds?.
I suggest you separate the two files so that you train in one script and load then test in another. This way you will get more consistent results.
I had similar problems with loading weights. The problem is that when you load the weights keras radomly assigns the weights because of the model declaration. I switched to using checkpoints for storing my weights and model.load_weights(checkpoints_directory) to load the weights. You will have to use a callback for this. Here is a short code snippet for this task (Google has a nice video on his topic).
from keras.callbacks import ModelCheckpoint
callbacks = [ModelCheckpoint(checkpoints_directory, monitor='val_loss', save_weights_only=True, save_best_only=True, period=period)]
model.fit(..., callbacks=callbacks, ...)