I'm using transfer learning to tackle a classification problem and its training process and the validation process work without any problem. I am using Tensorflow 1.13.2 and I am forced to use it since it is the only Tensorflow version which is supported by my hardware. The problem is when I use model.save('saved_model_1') it does not save a .pb file as expected. What should I do to save the weight file properly.
import os
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.applications import MobileNet
from PIL import Image
from sklearn.metrics import classification_report, confusion_matrix
from tensorflow.python.tools import freeze_graph
def prepare_df(data_type):
X = []
y = []
path = '../tf_files/hand_orientations/' + data_type + '/'
for i in os.listdir(path):
# Image
X.append(i)
# Label
y.append(i.split('_')[0])
X = np.array(X)
y = np.array(y)
df = pd.DataFrame()
df['filename'] = X
df['label'] = y
return df
df_train = prepare_df('training')
df_val = prepare_df('validation')
df_test = prepare_df('evaluation')
# Create the ImageDataGenerator object
train_datagen = ImageDataGenerator(
featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=20,
width_shift_range=0.2,
height_shift_range=0.2,
)
val_datagen = ImageDataGenerator(
featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=20,
width_shift_range=0.2,
height_shift_range=0.2,
)
# Generate batches and augment the images
train_generator = train_datagen.flow_from_dataframe(
df_train,
directory='../tf_files/hand_orientations/training/',
x_col='filename',
y_col='label',
class_mode='binary',
target_size=(224, 224),
)
val_generator = train_datagen.flow_from_dataframe(
df_val,
directory='../tf_files/hand_orientations/validation/',
x_col='filename',
y_col='label',
class_mode='binary',
target_size=(224, 224),
)
# Initialize the Pretrained Model
base_model = MobileNet(weights='imagenet',
input_shape=(224, 224, 3),
include_top=False)
print(base_model.summary())
# Set this parameter to make sure it's not being trained
base_model.trainable = False
# Set the input layer
input_ = tf.keras.Input(shape=(224, 224, 3))
# Set the feature extractor layer
x = base_model(input_, training=False)
# Set the pooling layer
x = tf.keras.layers.GlobalAveragePooling2D()(x)
# Set the final layer with sigmoid activation function
output_ = tf.keras.layers.Dense(1, activation='sigmoid')(x)
# Create the new model object
model = tf.keras.Model(input_, output_)
# Compile it
model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy'])
# Print The Summary of The Model
model.summary()
model.fit(train_generator, epochs=20, validation_data=val_generator)
y_true = []
y_pred = []
for i in os.listdir('../tf_files/hand_orientations/evaluation'):
img = Image.open('../tf_files/hand_orientations/evaluation/' + i)
img = img.resize((224, 224))
img = np.array(img)
img = np.expand_dims(img, 0)
y_true.append((i.split('_')[0]))
y_pred.append('right' if model.predict(img) > 0.5 else 'left')
print(classification_report(y_true, y_pred))
print()
print(confusion_matrix(y_true, y_pred))
print(y_true)
print(y_pred)
model.save('saved_model_1')
As it is Tensorflow 1.x model, Please save your model using:
tf.saved_model.save(model, model_save_path) # model and path to save this model
To check saved_model format
ls {model_save_path}
Output:
assets/ saved_model.pb variables/
To reload the model you can use below method:
tf.saved_model.load(model_save_path)
You can check this reference for more details.
Related
I implement code from pyimagesearch for object detection with R-CNN.
I would like to train my data with R-CNN approach with MobileNetV2.
I have 30,000 images. It works for me with fewer images, but it gives me an error with a large number of images.
I don't understand at all why my data isn't save list in the label.
from pyimagesearch import config
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.applications import MobileNetV2
from tensorflow.keras.layers import AveragePooling2D
from tensorflow.keras.layers import Dropout
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Input
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.applications.mobilenet_v2 import preprocess_input
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.preprocessing.image import load_img
from tensorflow.keras.utils import to_categorical
from sklearn.preprocessing import LabelBinarizer
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from imutils import paths
import matplotlib.pyplot as plt
import numpy as np
import argparse
import pickle
import os
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-p", "--plot", type=str, default="plot.png",
help="path to output loss/accuracy plot")
args = vars(ap.parse_args())
# initialize the initial learning rate, number of epochs to train for,
# and batch size
INIT_LR = 1e-4
EPOCHS = 5
BS = 32
# grab the list of images in our dataset directory, then initialize
# the list of data (i.e., images) and class labels
print("[INFO] loading images...")
imagePaths = list(paths.list_images(config.BASE_PATH))
data = []
labels = []
print(labels)
# loop over the image paths
for imagePath in imagePaths:
# extract the class label from the filename
label = imagePath.split(os.path.sep)[-2]
# load the input image (224x224) and preprocess it
image = load_img(imagePath, target_size=config.INPUT_DIMS)
image = img_to_array(image)
image = preprocess_input(image)
# update the data and labels lists, respectively
data.append(image)
labels.append(label)
# convert the data and labels to NumPy arrays
data = np.array(data, dtype="float32")
labels = np.array(labels)
# perform one-hot encoding on the labels
lb = LabelBinarizer()
labels = lb.fit_transform(labels)
labels = to_categorical(labels)
# partition the data into training and testing splits using 75% of
# the data for training and the remaining 25% for testing
(trainX, testX, trainY, testY) = train_test_split(data, labels,
test_size=0.20, stratify=labels, random_state=42)
# construct the training image generator for data augmentation
aug = ImageDataGenerator(
rotation_range=20,
zoom_range=0.15,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.15,
horizontal_flip=True,
fill_mode="nearest")
# load the MobileNetV2 network, ensuring the head FC layer sets are
# left off
baseModel = MobileNetV2(weights="imagenet", include_top=False,
input_tensor=Input(shape=(224, 224, 3)))
# construct the head of the model that will be placed on top of the
# the base model
headModel = baseModel.output
headModel = AveragePooling2D(pool_size=(7, 7))(headModel)
headModel = Flatten(name="flatten")(headModel)
headModel = Dense(128, activation="relu")(headModel)
headModel = Dropout(0.5)(headModel)
headModel = Dense(2, activation="softmax")(headModel)
# place the head FC model on top of the base model (this will become
# the actual model we will train)
model = Model(inputs=baseModel.input, outputs=headModel)
# loop over all layers in the base model and freeze them so they will
# *not* be updated during the first training process
for layer in baseModel.layers:
layer.trainable = False
# compile our model
print("[INFO] compiling model...")
opt = Adam(lr=INIT_LR)
model.compile(loss="binary_crossentropy", optimizer=opt,
metrics=["accuracy"])
# train the head of the network
print("[INFO] training head...")
H = model.fit(
aug.flow(trainX, trainY, batch_size=BS),
steps_per_epoch=len(trainX) // BS,
validation_data=(testX, testY),
validation_steps=len(testX) // BS,
epochs=EPOCHS)
# make predictions on the testing set
print("[INFO] evaluating network...")
predIdxs = model.predict(testX, batch_size=BS)
# for each image in the testing set we need to find the index of the
# label with corresponding largest predicted probability
predIdxs = np.argmax(predIdxs, axis=1)
# show a nicely formatted classification report
print(classification_report(testY.argmax(axis=1), predIdxs,
target_names=lb.classes_))
# serialize the model to disk
print("[INFO] saving mask detector model...")
model.save(config.MODEL_PATH, save_format="h5")
# serialize the label encoder to disk
print("[INFO] saving label encoder...")
f = open(config.ENCODER_PATH, "wb")
f.write(pickle.dumps(lb))
f.close()
Traceback (most recent call last):
File "/content/gdrive/MyDrive/misa/rcnn/fine_tune_rcnn.py", line 69, in <module>
labels = lb.fit_transform(labels)
File "/usr/local/lib/python3.7/dist-packages/sklearn/preprocessing/_label.py", line 324, in fit_transform
return self.fit(y).transform(y)
File "/usr/local/lib/python3.7/dist-packages/sklearn/preprocessing/_label.py", line 298, in fit
raise ValueError("y has 0 samples: %r" % y)
ValueError: y has 0 samples: array([], dtype=float64)
Can you know help me with this?
I having trouble in here:
About the code: I create a model here. The first step is to initialize the model with Sequential(). After that, we flatten our data and add our additional 3 (or more) hidden layers.
import pandas as pd
import numpy as np
import itertools
import keras
from sklearn import metrics
from sklearn.metrics import confusion_matrix
from keras.preprocessing.image import ImageDataGenerator, img_to_array, load_img
from keras.models import Sequential
from keras import optimizers
from keras.preprocessing import image
from keras.layers import Dropout, Flatten, Dense
from keras import applications
from keras.utils.np_utils import to_categorical
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
%matplotlib inline
import math
import datetime
import time
import tensorflow as tf
img_width, img_height = 224, 224
#Create a bottleneck file
top_model_weights_path = 'model_.h5'
train_data_dir = 'data/train'
#validation_data_dir = ‘data/validation’
test_data_dir = 'data/test'
epochs = 7 #this has been changed after multiple model run
# batch size used by flow_from_directory and predict_generator
batch_size = 50
VGG16 = tf.keras.applications.VGG16(include_top=False, weights='imagenet')
datagen = ImageDataGenerator(rescale=1. / 255)
start = datetime.datetime.now()
generator = datagen.flow_from_directory(
train_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode=None,
shuffle=False)
nb_train_samples = len(generator.filenames)
num_classes = len(generator.class_indices)
predict_size_train = int(math.ceil(nb_train_samples / batch_size))
bottleneck_features_train = VGG16.predict_generator(generator, predict_size_train)
np.save('bottleneck_features_train.npy', bottleneck_features_train)
end= datetime.datetime.now()
elapsed= end-start
print ('Time: ', elapsed)
generator_top = datagen.flow_from_directory(
train_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='categorical',
shuffle=False)
nb_train_samples = len(generator_top.filenames)
num_classes = len(generator_top.class_indices)
# load the bottleneck features saved earlier
train_data = np.load('bottleneck_features_train.npy')
# get the class labels for the training data, in the original order
train_labels = generator_top.classes
# convert the training labels to categorical vectors
train_labels = to_categorical(train_labels, num_classes=num_classes)
start = datetime.datetime.now()
model = Sequential()
model.add(Flatten(input_shape=train_data.shape[1:]))
model.add(Dense(100, activation=keras.layers.LeakyReLU(alpha=0.3)))
model.add(Dropout(0.5))
model.add(Dense(50, activation=keras.layers.LeakyReLU(alpha=0.3)))
model.add(Dropout(0.3))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=tf.keras.optimizers.RMSprop(lr=1e-4),
metrics=['acc'])
history = model.fit(train_data, train_labels,
epochs=7,
batch_size=batch_size,
validation_data=(validation_data, validation_labels))
model.save_weights(top_model_weights_path)
(eval_loss, eval_accuracy) = model.evaluate(
validation_data, validation_labels, batch_size=batch_size, verbose=1)
print("[INFO] accuracy: {:.2f}%".format(eval_accuracy * 100))
print("[INFO] Loss: {}".format(eval_loss))
end= datetime.datetime.now()
elapsed= end-start
print ('Time: ', elapsed)
My error is :
ValueError: Failed to find data adapter that can handle input: <class 'NoneType'>, <class 'NoneType'>
How I can fix this: I need to know how to fix this error. I was tried but still could not fix it.
I did like this. This is working properly. Pls try to do this.
import cv2
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from sklearn.metrics import confusion_matrix
import itertools
import os, glob
from tqdm import tqdm
from efficientnet.tfkeras import EfficientNetB4
img_path = '.\\Data\\test\\PNEUMONIA\\PNEUMONIA.jpg'
model_path = '.\\model.h5'
# labels used while training
labels = {0: 'NORMAL', 1: 'PNEUMONIA', 2: 'COVID'}
# loading the model
model = tf.keras.models.load_model(model_path, compile=False)
# model allowable image dimensions
img_width, img_height = 224, 224
# loading the image
image = cv2.imread(img_path)
# resizing the image
image = cv2.resize(image, (img_width, img_height))
# coverting BGR to RGB format
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# adding a new axis [batch axis]
image = image[np.newaxis, ...]
# rescale image from [0,255] to [0,1]
image = image / 255.
# predicting the output
prediction = model.predict(image)
# squeeze/remove an extra axis
prediction = np.squeeze(prediction)
for i in labels:
print(labels[i], ':', prediction[i])
# picking the class with max. score
prediction = np.argmax(prediction)
# selecting the class with max. score
output = labels[prediction]
#print('Prediction:',output)
print()
print('The image which you entered, prediction is:',output)
We are currently working on an image classification task for detecting tuberculosis from chest x-ray images. You can see our code below. We used 0.7 for the train set, 0.2 for the validation set, and 0.1 for the test set. Our training and validation loss is here
But when we try it on our test data set, this is what we got:
Is there something wrong with our code? Thank you in advance.
from tensorflow import keras
from keras.applications.mobilenet_v2 import MobileNetV2
from keras.applications.mobilenet_v2 import preprocess_input
from keras.layers import Dense, Flatten
from keras.models import Sequential
from keras.losses import BinaryCrossentropy
from tensorflow.keras.optimizers import Adam
from keras.preprocessing.image import ImageDataGenerator
from datetime import datetime, date
from keras.callbacks import ModelCheckpoint
from sklearn.metrics import confusion_matrix, classification_report
import numpy as np
#Loading a pre-trained model
image_size = 224
base_model = MobileNetV2(input_shape=(image_size,image_size,3), weights='imagenet', include_top=False)
for layer in base_model.layers:
layer.trainable = False
model = Sequential()
model.add(base_model)
model.add(Flatten())
model.add(Dense(1000, activation='relu'))
model.add(Dense(2, activation="sigmoid"))
loss_func = BinaryCrossentropy()
opt = Adam(learning_rate=0.001)
model.compile(loss=loss_func,
optimizer=opt,
metrics=['accuracy'])
#Training
test_path = '...'
val_path = '...'
datagen = ImageDataGenerator(rescale = 1./255,horizontal_flip = True, shear_range = 0.2, zoom_range=0.2)
batch_size=32
validation_size=8
train_set = datagen.flow_from_directory(test_path,
target_size = (image_size, image_size),
batch_size=batch_size,
class_mode = 'categorical')
validation_set = datagen.flow_from_directory(val_path,
target_size = (image_size, image_size),
batch_size=validation_size,
class_mode = 'categorical')
#Fitting the data to the model
model_name = 'MobileNetV2'
date_today= date.today().strftime('%m_%d_%Y')
checkpoint = ModelCheckpoint(filepath=f'Models/{model_name}_{date_today}.h5',
monitor='val_loss',
mode='min',
verbose=1,
save_best_only=True)
model_history = model.fit(train_set,
validation_data=validation_set,
epochs=100,
steps_per_epoch=len(train_set)//batch_size,
validation_steps=len(validation_set)//validation_size,
callbacks=[checkpoint],
verbose=1)
#Testing the model on the test set
test_path = '...'
test_datagen = ImageDataGenerator()
test_set = test_datagen.flow_from_directory(test_path,
target_size = (image_size, image_size),
class_mode = 'categorical')
predictions = model.predict(test_set, verbose=1)
y_pred = np.argmax(predictions, axis=1)
class_labels = list(test_set.class_indices.keys())
print('Classification Report')
clsf = classification_report(test_set.classes, y_pred, target_names=class_labels)
print(clsf)
print('\n')
print('Confusion Matrix')
cfm = confusion_matrix(test_set.classes, y_pred)
print(cfm)
The code is correct but, there is one little mistake I found and that is you have assigned 2 units in sigmoid output layer. That's not correct; there should be 1 unit because it's a binary classification problem. Like this:
model.add(Dense(1, activation="sigmoid"))
Tuberculosis is a complex object with sophisticated features. Therefore, the testing set may produce unexpected results. To circumvent this, you must modify your network and incorporate additional training images. You can experiment with transfer learning, but if the network from which you want to transfer the parameters was trained on objects entirely unrelated to tuberculosis, it might not be appropriate.
I am trying to run a deep learning code that I found in a tutorial in order to familiarise myself with resnet50, keras and tensorflow with python 3.7. When I run my code, I get the following error:
TypeError: Cannot convert a symbolic Keras input/output to a numpy array. This error may indicate that you're trying to pass a symbolic value to a NumPy call, which is not supported. Or, you may be trying to pass Keras symbolic inputs/outputs to a TF API that does not register dispatching, preventing Keras from automatically converting the API call to a lambda layer in the Functional Model.
I tried to use the following fix as mentioned on stack overflow:
from tensorflow.python.framework.ops import disable_eager_execution
disable_eager_execution()
Without any success. My full code can be seen below:
from keras.applications.resnet50 import ResNet50
from keras.layers import Dense, GlobalAveragePooling2D
from keras.models import Model
from keras.optimizers import SGD
from keras.preprocessing.image import ImageDataGenerator
import numpy as np
from keras.preprocessing import image
from sklearn.linear_model import LogisticRegression
from tensorflow.python.framework.ops import disable_eager_execution
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# Download the architecture of ResNet50 with ImageNet weights
base_model = ResNet50(include_top=False, weights='imagenet')
# Taking the output of the last convolution block in ResNet50
x = base_model.output
# Adding a Global Average Pooling layer
x = GlobalAveragePooling2D()(x)
# Adding a fully connected layer having 1024 neurons
x = Dense(1024, activation='relu')(x)
# Adding a fully connected layer having 2 neurons which will
# give probability of image having either dog or cat
predictions = Dense(2, activation='softmax')(x)
# Model to be trained
model = Model(inputs=base_model.input, outputs=predictions)
# Training only top layers i.e. the layers which we have added in the end
for layer in base_model.layers:
layer.trainable = False
# Compiling the model
model.compile(optimizer=SGD(lr=0.0001, momentum=0.9), loss='categorical_crossentropy', metrics = ['accuracy'],
experimental_run_tf_function=False)
# Creating objects for image augmentations
train_datagen = ImageDataGenerator(rescale = 1./255,
shear_range = 0.2,
zoom_range = 0.2,
horizontal_flip = True)
test_datagen = ImageDataGenerator(rescale = 1./255)
# Proving the path of training and test dataset
# Setting the image input size as (224, 224)
# We are using class mode as binary because there are only two classes in our data
training_set = train_datagen.flow_from_directory('training_set',
target_size = (224, 224),
batch_size = 32,
class_mode = 'categorical')
test_set = test_datagen.flow_from_directory('test_set',
target_size = (224, 224),
batch_size = 32,
class_mode = 'categorical')
# Training the model for 5 epochs
model.fit_generator(training_set,
steps_per_epoch = 8000,
epochs = 5,
validation_data = test_set,
validation_steps = 2000)
# We will try to train the last stage of ResNet50
for layer in base_model.layers[0:143]:
layer.trainable = False
for layer in base_model.layers[143:]:
layer.trainable = True
# Training the model for 10 epochs
model.fit_generator(training_set,
steps_per_epoch = 8000,
epochs = 10,
validation_data = test_set,
validation_steps = 2000)
# Saving the weights in the current directory
model.save_weights("resnet50_weights.h5")
# Predicting the final result of image
test_image = image.load_img('cat_or_dog_test.jpg', target_size = (224, 224))
test_image = image.img_to_array(test_image)\
# Expanding the 3-d image to 4-d image.
# The dimensions will be Batch, Height, Width, Channel
test_image = np.expand_dims(test_image, axis = 0)
# Predicting the final class
classifier = LogisticRegression()
result = classifier.predict(test_image)
# Fetching the class labels
labels = training_set.class_indices
labels = list(labels.items())
# Printing the final label
for label, i in labels:
if i == result:
print("The test image has: ", label)
break
I had the same problem when using: from keras import Input;
But, when I change to: from tensorflow.keras import Input, it works!
I assume that the following line is where the error occurs:
test_image = np.expand_dims(test_image, axis = 0)
The reason is probably that you try to apply a numpy function to a tensor. Don't do that. Either convert your tensor to numpy or use a function that work on tensors. Normally, I'd say prefer the second option over the first one (it will prevent unnecessary conversions and make your code more efficient). In your case you will need to convert your tensor to numpy because you are using sklearn afterward:
test_image = np.expand_dims(test_image.numpy(), axis=0)
I am new to DL and I received a similar error a nd the following has helped me.
Try:
del base_model
Before:
base_model = ResNet50(include_top=False, weights='imagenet')
and also simultaneously:
Try:
del model
Before:
model = Model(inputs=base_model.input, outputs=predictions)
Please let me know if this has helped you or hasn't :) .
Try using tensorflow.keras.something instead of keras.something.
It worked for me.
Ofcourse you have to also import tensorlfow
I am learning image classification with tensorflow. Below is my program . for same test image if i am passing again and again it gives sometimes different labels . It is not predicting correctly
import tensorflow as tf
import numpy as np
import os
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Flatten,Activation
from keras.layers.convolutional import Conv2D, MaxPooling2D
from keras.constraints import maxnorm
from keras.utils import np_utils
classifier = Sequential()
classifier.add(Conv2D(32, (3, 3), input_shape = (64,64,3 ),activation="relu"))
classifier.add(MaxPooling2D(pool_size = (2,2)))
classifier.add(Flatten())
classifier.add(Dense(128 , kernel_initializer ='uniform' , activation = 'relu'))
classifier.add(Dense(10 , kernel_initializer ='uniform' , activation = 'softmax'))
classifier.compile(optimizer = 'rmsprop', loss = 'categorical_crossentropy' , metrics = ['accuracy'])
from keras_preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1./255)
training_set = train_datagen.flow_from_directory(
'/code/train',
shuffle=True,
target_size=(64,64),
batch_size=5,
class_mode='categorical',
classes=["shiv", "kart", "nall","surendra","harshi","nag","saura","rajan","manoj","abhimanyu"])
test_set = test_datagen.flow_from_directory(
'/code/validation',
shuffle=True,
target_size=(64,64),
batch_size=5,
class_mode='categorical',
classes=["shiv", "kart", "nall","surendra","harshi","nag","saura","rajan","manoj","abhimanyu"])
from IPython.display import display
from PIL import Image
classifier.fit(
training_set,
steps_per_epoch=80,
epochs=12,
validation_data=test_set,
validation_steps=100)
from keras_preprocessing import image
files_dir = '/code/test_image_clasification1'
files = os.listdir(files_dir)
np.set_printoptions(precision=3)
for f in files:
image_path = files_dir + '/' + f
test_image = image.load_img(image_path,target_size = (64, 64))
test_image = image.img_to_array(test_image)
test_image = np.expand_dims(test_image, axis = 0)
result = classifier.predict(test_image)
#classes = classifier.predict_classes(test_image)
#print (classes)
labels = ["shivaji","kartik","nallayan","surendar","harshita","nagendar","saurabh","rajan","manoj","abhimanyu"]
indx = np.argmax(result)
#score = np.argmax(np.round(result*100,2))
#print(np.round(result,2))
match_percentage=np.max(result)
match_class=labels[indx]
print("the image " + f + " is matching with "+ match_class + " having matching percentage " +"{:.2%}".format(match_percentage) )
#print(list(zip(labels,result)))
#print(f,labelsindx])
Can any one help me if anything wrong in model training . For information, i have 122 image in train set and 48 in validation set.
You need to apply the same rescaling factor to your images, this is currently missing and therefore leads to wrong probability predictions. So you also need to rescale to [0,1] by applying 1./255 to your test_image.
You can try changing the loss and the optimizer.
classifier.compile(optimizer = 'sgd', loss = 'sparse_categorical_crossentropy' , metrics = ['accuracy'])