My image classification model written in tensorflow don't learn - python

im trying to build image classification model that will predict if you are wearing a mask.This is a first time i make my own model and when im training it the accuracy jump arround 50% and if i predict it always says "no mask" i tryied changing number of epoches, batch size, number of training data changing model code and nothing works. This is my code:
import os
import cv2
import random
import numpy as np
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Flatten,Dropout
from tensorflow.keras.optimizers import SGD
def preproccesImage(img):
img = cv2.resize(img,dsize=(150,150 ), interpolation = cv2.INTER_CUBIC)
return img
def getData():
training = []
for image in os.listdir("src/data/with_mask"):
img = cv2.imread(f"src/data/with_mask/{image}",cv2.IMREAD_GRAYSCALE)
proccesed = preproccesImage(img)
training.append([proccesed.tolist(),1])
for image in os.listdir("src/data/without_mask"):
img = cv2.imread(f"src/data/without_mask/{image}",cv2.IMREAD_GRAYSCALE)
proccesed = preproccesImage(img)
training.append([proccesed.tolist(),0])
random.shuffle(training)
train_x = np.array([x[0] for x in training],dtype=np.float32)
train_y = np.array([x[1] for x in training],dtype=np.float32)
print(train_x)
print(train_y)
return (train_x ,train_y)
train_x , train_y = getData()
model = Sequential()
model.add(Dense(32,input_shape=(len(train_x[0]),150),activation="relu"))
model.add(Flatten())
model.add(Dense(128,activation="relu"))
model.add(Dropout(0.2))
model.add(Dense(128,activation="relu"))
model.add(Flatten())
model.add(Dense(1,activation="softmax"))
model.compile(loss="categorical_crossentropy",
optimizer="adam", metrics=["accuracy"])
hist = model.fit(
train_x,train_y,
epochs=200, batch_size=2, verbose=1)
model.save("model.h5", hist)
img = cv2.imread("src/me.png",cv2.IMREAD_GRAYSCALE)
resized = cv2.resize(img,dsize=(150,150 ), interpolation = cv2.INTER_CUBIC)
def predict():
res = model.predict([resized.tolist()])[0]
resoult = [[i, r] for i, r in enumerate(res)]
predicted = []
for r in resoult:
predicted.append({"intent": 1 if r[0] == 1 else 0, "probability": r[1]})
if predicted[0]["intent"] == 1:
print("mask on")
else:
print('no mask')
predict()
I will be pleased if someone can help

This is not the correct combination for binary classification; you should change your loss to binary_crossentropy and the activation of your last layer to sigmoid, i.e.:
model.add(Dense(1,activation="sigmoid")) # last layer
model.compile(loss="binary_crossentropy",
optimizer="adam", metrics=["accuracy"])
More generally, you could really benefit from some convolutional layers in the early stages of your model; notice also that placing Flatten layers after Dense ones does not make any sense.

Related

CNN model predicting the same output for any inputs

enter code here
from keras.models import Sequential
from keras.layers import Convolution2D
from keras.layers import Flatten
from keras.layers import Dense
from keras.layers import MaxPooling2D
classifier = Sequential()
classifier.add(Convolution2D(32,(3,3),input_shape = (64,64,3), activation = 'relu'))
classifier.add(MaxPooling2D(pool_size = (2, 2)))
classifier.add(Convolution2D(32,(3,3), activation = 'relu'))
classifier.add(MaxPooling2D(pool_size = (2, 2)))
classifier.add(Flatten())
classifier.add(Dense(units=32,activation = 'relu'))
classifier.add(Dense(units=64,activation = 'relu'))
classifier.add(Dense(units=128,activation = 'relu'))
classifier.add(Dense(units=256,activation = 'relu'))
classifier.add(Dense(units=256,activation = 'relu'))
classifier.add(Dense(units=6,activation = 'softmax'))
classifier.compile(optimizer = 'adam', loss = 'categorical_crossentropy', metrics = ['accuracy'])
from keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(rescale = 1./255, # To rescaling the image in range of [0,1]
shear_range = 0.2, # To randomly shear the images
zoom_range = 0.2, # To randomly zoom the images
horizontal_flip = True) # for randomly flipping half of the images
horizontally
test_datagen = ImageDataGenerator(rescale = 1./255)
print("\nTraining the data...\n")
training_set = train_datagen.flow_from_directory('train',
target_size=(64,64),
batch_size=12, #Total no. of batches
class_mode='categorical')
test_set = test_datagen.flow_from_directory('test',
target_size=(64,64),
batch_size=12,
class_mode='categorical')
classifier.fit_generator(training_set,
steps_per_epoch=len(training_set), # Total training images
epochs = 20, # Total no. of epochs
validation_data = test_set,
validation_steps = len(test_set)) # Total testing images
classifier.save("model.h5")
#Prediction
classes = ['Fresh Apple','Fresh Banana','Fresh Orange','Rotten Apple','Rotten Banana','Rotten
Orange']
from keras.preprocessing import image
from keras.models import load_model
import numpy as np
new_model = load_model('model.h5')
filename = 'a1.jpeg'
new_model.summary()
test_image = image.load_img('images\\a1.jpg',target_size=(64,64))
test_image = image.img_to_array(test_image)
test_image = np.expand_dims(test_image, axis = 0)
result = new_model(test_image)
result1 = result[0]
for i in range(6):
if result1[i] == 1.:
break;
prediction = classes[i]
print(prediction)
My model is giving the same output for any input. The errors and warnings have been removed but the output still remains the same. Earlier the model was giving same value 'A'(example) before removing Warnings and after removing Warnings, the model is giving same value 'B'. I don't know where is the problem in my code whether it is in model or whether it is in #Prediction.
A couple of things. In your generators you set a batch size of 12. then in model.fit you have steps_per_epoch=len(training_set). This means you will go through your training set 12 times per epoch. I usually leave steps per epoch and validation steps as None. model.fit will determine the value internally but if you want to then set
steps_per_epoch = int(len(train_set/batch_size) + 1
validation_steps= int(len(test_set/batch_size) +1
Now in predictions. You scaled your train and test images by 1/255. You need to do the same for images you wish to predict. So right after the code to expand dimension add code
test_image=test_image/255

Cannot convert a symbolic input/output to a numpy array

I am trying to run a deep learning code that I found in a tutorial in order to familiarise myself with resnet50, keras and tensorflow with python 3.7. When I run my code, I get the following error:
TypeError: Cannot convert a symbolic Keras input/output to a numpy array. This error may indicate that you're trying to pass a symbolic value to a NumPy call, which is not supported. Or, you may be trying to pass Keras symbolic inputs/outputs to a TF API that does not register dispatching, preventing Keras from automatically converting the API call to a lambda layer in the Functional Model.
I tried to use the following fix as mentioned on stack overflow:
from tensorflow.python.framework.ops import disable_eager_execution
disable_eager_execution()
Without any success. My full code can be seen below:
from keras.applications.resnet50 import ResNet50
from keras.layers import Dense, GlobalAveragePooling2D
from keras.models import Model
from keras.optimizers import SGD
from keras.preprocessing.image import ImageDataGenerator
import numpy as np
from keras.preprocessing import image
from sklearn.linear_model import LogisticRegression
from tensorflow.python.framework.ops import disable_eager_execution
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# Download the architecture of ResNet50 with ImageNet weights
base_model = ResNet50(include_top=False, weights='imagenet')
# Taking the output of the last convolution block in ResNet50
x = base_model.output
# Adding a Global Average Pooling layer
x = GlobalAveragePooling2D()(x)
# Adding a fully connected layer having 1024 neurons
x = Dense(1024, activation='relu')(x)
# Adding a fully connected layer having 2 neurons which will
# give probability of image having either dog or cat
predictions = Dense(2, activation='softmax')(x)
# Model to be trained
model = Model(inputs=base_model.input, outputs=predictions)
# Training only top layers i.e. the layers which we have added in the end
for layer in base_model.layers:
layer.trainable = False
# Compiling the model
model.compile(optimizer=SGD(lr=0.0001, momentum=0.9), loss='categorical_crossentropy', metrics = ['accuracy'],
experimental_run_tf_function=False)
# Creating objects for image augmentations
train_datagen = ImageDataGenerator(rescale = 1./255,
shear_range = 0.2,
zoom_range = 0.2,
horizontal_flip = True)
test_datagen = ImageDataGenerator(rescale = 1./255)
# Proving the path of training and test dataset
# Setting the image input size as (224, 224)
# We are using class mode as binary because there are only two classes in our data
training_set = train_datagen.flow_from_directory('training_set',
target_size = (224, 224),
batch_size = 32,
class_mode = 'categorical')
test_set = test_datagen.flow_from_directory('test_set',
target_size = (224, 224),
batch_size = 32,
class_mode = 'categorical')
# Training the model for 5 epochs
model.fit_generator(training_set,
steps_per_epoch = 8000,
epochs = 5,
validation_data = test_set,
validation_steps = 2000)
# We will try to train the last stage of ResNet50
for layer in base_model.layers[0:143]:
layer.trainable = False
for layer in base_model.layers[143:]:
layer.trainable = True
# Training the model for 10 epochs
model.fit_generator(training_set,
steps_per_epoch = 8000,
epochs = 10,
validation_data = test_set,
validation_steps = 2000)
# Saving the weights in the current directory
model.save_weights("resnet50_weights.h5")
# Predicting the final result of image
test_image = image.load_img('cat_or_dog_test.jpg', target_size = (224, 224))
test_image = image.img_to_array(test_image)\
# Expanding the 3-d image to 4-d image.
# The dimensions will be Batch, Height, Width, Channel
test_image = np.expand_dims(test_image, axis = 0)
# Predicting the final class
classifier = LogisticRegression()
result = classifier.predict(test_image)
# Fetching the class labels
labels = training_set.class_indices
labels = list(labels.items())
# Printing the final label
for label, i in labels:
if i == result:
print("The test image has: ", label)
break
I had the same problem when using: from keras import Input;
But, when I change to: from tensorflow.keras import Input, it works!
I assume that the following line is where the error occurs:
test_image = np.expand_dims(test_image, axis = 0)
The reason is probably that you try to apply a numpy function to a tensor. Don't do that. Either convert your tensor to numpy or use a function that work on tensors. Normally, I'd say prefer the second option over the first one (it will prevent unnecessary conversions and make your code more efficient). In your case you will need to convert your tensor to numpy because you are using sklearn afterward:
test_image = np.expand_dims(test_image.numpy(), axis=0)
I am new to DL and I received a similar error a nd the following has helped me.
Try:
del base_model
Before:
base_model = ResNet50(include_top=False, weights='imagenet')
and also simultaneously:
Try:
del model
Before:
model = Model(inputs=base_model.input, outputs=predictions)
Please let me know if this has helped you or hasn't :) .
Try using tensorflow.keras.something instead of keras.something.
It worked for me.
Ofcourse you have to also import tensorlfow

Keras: unsupervised pre-training kills performance

I'm trying to train a deep classifier in Keras both with and without pretraining of the hidden layers via stacked autoencoders. My problem is that the pretraining seems to drastically degrade performance (i.e. if pretrain is set to False in the code below the training error of the final classification layer converges much faster). This seems completely outrageous to me given that pretraining should only initialize the weights of the hidden layers and I don't see how that could completely kill the models performance even if that initialization does not work very well. I can not include the specific dataset I used but the effect should occur for any appropriate dataset (e.g. minist). What is going on here and how can I fix it?
EDIT: code is now reproducible with the MNIST data, final line prints change in loss function, which is significantly lower with pre-training.
I have also slightly modified the code and added sample learning curves below:
from functools import partial
import matplotlib.pyplot as plt
from keras.datasets import mnist
from keras.layers import Dense
from keras.models import Sequential
from keras.optimizers import SGD
from keras.regularizers import l2
from keras.utils import to_categorical
(inputs_train, targets_train), _ = mnist.load_data()
inputs_train = inputs_train[:1000].reshape(1000, 784)
targets_train = to_categorical(targets_train[:1000])
hidden_nodes = [256] * 4
learning_rate = 0.01
regularization = 1e-6
epochs = 30
def train_model(pretrain):
model = Sequential()
layer = partial(Dense,
activation='sigmoid',
kernel_initializer='random_normal',
kernel_regularizer=l2(regularization))
for i, hn in enumerate(hidden_nodes):
kwargs = dict(units=hn, name='hidden_{}'.format(i + 1))
if i == 0:
kwargs['input_dim'] = inputs_train.shape[1]
model.add(layer(**kwargs))
if pretrain:
# train autoencoders
inputs_train_ = inputs_train.copy()
for i, hn in enumerate(hidden_nodes):
autoencoder = Sequential()
autoencoder.add(layer(units=hn,
input_dim=inputs_train_.shape[1],
name='hidden'))
autoencoder.add(layer(units=inputs_train_.shape[1],
name='decode'))
autoencoder.compile(optimizer=SGD(lr=learning_rate, momentum=0.9),
loss='binary_crossentropy')
autoencoder.fit(
inputs_train_,
inputs_train_,
batch_size=32,
epochs=epochs,
verbose=0)
autoencoder.pop()
model.layers[i].set_weights(autoencoder.layers[0].get_weights())
inputs_train_ = autoencoder.predict(inputs_train_)
num_classes = targets_train.shape[1]
model.add(Dense(units=num_classes,
activation='softmax',
name='classify'))
model.compile(optimizer=SGD(lr=learning_rate, momentum=0.9),
loss='categorical_crossentropy')
h = model.fit(
inputs_train,
targets_train,
batch_size=32,
epochs=epochs,
verbose=0)
return h.history['loss']
plt.plot(train_model(pretrain=False), label="Without Pre-Training")
plt.plot(train_model(pretrain=True), label="With Pre-Training")
plt.xlabel("Epoch")
plt.ylabel("Cross-Entropy")
plt.legend()
plt.show()

Inconsistent results in CNN using keras

I have done a prediction for car damages whether they are severe or not based on images in Keras using CNN. Predicted class and accuracy changes every time I run the code for the same dataset and with no other parameters changed. I have tried restarting the kernal and also setting seed for the model with a hope of getting consistent results. I am new to python, so kindly help me in the getting same results every time.
import random
random.seed(801)
# Importing the Keras libraries and packages
from keras.models import Sequential
from keras.layers import Conv2D
from keras.layers import MaxPooling2D
from keras.layers import Flatten
from keras.layers import Dense
from keras.layers import Dropout
# Initialising the CNN
classifier = Sequential()
# Step 1 - Convolution
classifier.add(Conv2D(64, (2, 2), input_shape = (64, 64, 3), activation = 'relu'))
# Step 2 - Pooling
classifier.add(MaxPooling2D(pool_size = (2, 2)))
# Adding a second convolutional layer
classifier.add(Conv2D(64, (2, 2), activation = 'relu'))
classifier.add(MaxPooling2D(pool_size = (2, 2)))
# Step 3 - Flattening
classifier.add(Flatten())
# Adding dropout
classifier.add(Dropout(0.2))
# Step 4 - Full connection
classifier.add(Dense(units = 128, activation = 'relu'))
# Adding dropout
classifier.add(Dropout(0.2))
classifier.add(Dense(units = 1, activation = 'sigmoid'))
# Compiling the CNN
classifier.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])
# Part 2 - Fitting the CNN to the images
from keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(rescale = 1./255,
# shear_range = 0.2,
# zoom_range = 0.2,
horizontal_flip = True)
test_datagen = ImageDataGenerator(rescale = 1./255)
#train_labels = keras.utils.to_categorical(train_labels,num_classes)
#test_labels = keras.utils.to_categorical(test_labels,num_classes)
training_set = train_datagen.flow_from_directory('C:/Users/Allianz/Desktop/Image Processing/car-damage-detective-neokt/app/2 category/training',
target_size = (64, 64),
batch_size = 16,
class_mode = 'binary')
test_set = test_datagen.flow_from_directory('C:/Users/Allianz/Desktop/Image Processing/car-damage-detective-neokt/app/2 category/validation',
target_size = (64, 64),
batch_size = 16,
class_mode = 'binary')
batch_size=16
classifier.fit_generator(training_set,
steps_per_epoch = 605//batch_size,
epochs = 9,
validation_data = test_set,
validation_steps = 5//batch_size
)
#classifier.save('first_model.h5')
classifier.save('first.h5')
# finding the number associated classes
#classes=training_set.class_indices
#print(classes)
# extracting file names of images
import os
from PIL import Image
import numpy as np
path='C:/Users/Allianz/Desktop/Image Processing/car-damage-detective-neokt/app/data3a_full/validation/01-minor'
img_names = [f for f in os.listdir(path) if os.path.splitext(f)[-1] == '.JPEG']
#print(img_names[1])
img_names=np.asarray(img_names) #converting list to array
# predicting classes for multiple images
import numpy as np
from keras.models import load_model
from keras.preprocessing import image
#os.chdir('C:/Users/Allianz/Desktop/Image Processing/car-damage-detective-neokt/app/2nd check/pred')
os.chdir('C:/Users/Allianz/Desktop/Image Processing/car-damage-detective-neokt/app/data3a_full/validation/01-minor')
a=load_model('first.h5')
classes=[]
result=[]
for i in range(len(img_names)):
img=image.load_img(img_names[i],
target_size=(64,64))
test_image = image.img_to_array(img)
test_image = np.expand_dims(test_image, axis = 0)
result = a.predict(test_image)
#print(result)
if result[0] >= 0.5:
prediction = 'severe'
else:
prediction = 'not severe'
classes.append(prediction)
#print(classes)
#prediction2=print(classes)
import pandas as pd
dfn=pd.DataFrame({'image':img_names,
'prediction':classes
})
len(dfn.loc[dfn['prediction']=='not severe'])
len(dfn.loc[dfn['prediction']=='severe'])
It looks like you're training the model every time you classify! This is what's causing the inconsistency. The reason why this yields different results, despite you setting the seed, can be found (here)[Why can't I get reproducible results in Keras even though I set the random seeds?.
I suggest you separate the two files so that you train in one script and load then test in another. This way you will get more consistent results.
I had similar problems with loading weights. The problem is that when you load the weights keras radomly assigns the weights because of the model declaration. I switched to using checkpoints for storing my weights and model.load_weights(checkpoints_directory) to load the weights. You will have to use a callback for this. Here is a short code snippet for this task (Google has a nice video on his topic).
from keras.callbacks import ModelCheckpoint
callbacks = [ModelCheckpoint(checkpoints_directory, monitor='val_loss', save_weights_only=True, save_best_only=True, period=period)]
model.fit(..., callbacks=callbacks, ...)

ConvNet Which has 98% Test Accuracy, Always wrong at predictions

I'm currently building a convolutional neural network to distinguish clear ECG images from ECG images with noise.
With Noise :
Without Noise :
My Problem
So I did build a convnet using keras above tensorflow and trained it several times but all the time, it has like 99% of Training Accuracy, 99% Validation Accuracy and 98% of Testing accuracy. But when I predict an image, it always give me [0].
Most of the times, my model early stops at epoch 3 or 4 with 99% of accuracy in both training and validation. It almost all the time given 98% or 99% accuracy in first epoch or second epoch.
My Model
from keras.models import Sequential
from keras.datasets import mnist
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Activation,Dropout,Flatten,Dense
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import TensorBoard
from keras.layers import ZeroPadding2D
from keras.optimizers import Adam
from keras.callbacks import EarlyStopping
tensorboard = TensorBoard(log_dir="./logs",histogram_freq=0,write_graph=True,write_images=True)
earlystop = EarlyStopping(monitor='val_loss',patience=2,verbose=1)
# Variables
batchSize = 15
num_of_samples = 15000
num_of_testing_samples = 3750
num_of_val_samples = 2000
training_imGenProp = ImageDataGenerator(rescale = 1./255,
width_shift_range=0.02,
height_shift_range=0.02,
horizontal_flip=False,
fill_mode='nearest'
)
testing_imGenProp = ImageDataGenerator(
rotation_range=5,
horizontal_flip=False,
fill_mode='nearest'
)
val_imGenProp = ImageDataGenerator(rescale = 1./255,
rotation_range=5,
zoom_range=0.2,
horizontal_flip=False,
fill_mode='nearest'
)
# Create the model
classifier = Sequential()
classifier.add(ZeroPadding2D(padding=(374,0),input_shape=(74,448,3)))
classifier.add(Conv2D(32, (3, 3), activation = 'relu'))
classifier.add(MaxPooling2D(pool_size = (2, 2)))
classifier.add(Conv2D(32, (3, 3), activation = 'relu'))
classifier.add(MaxPooling2D(pool_size = (2, 2)))
classifier.add(Flatten())
classifier.add(Dense(units = 128, activation = 'relu'))
classifier.add(Dropout(0.8))
classifier.add(Dense(units = 1, activation = 'sigmoid'))
classifier.summary()
adam = Adam(lr=0.00005)
classifier.compile(loss='binary_crossentropy',optimizer=adam,metrics=['accuracy'])
training_imGen = training_imGenProp.flow_from_directory(
'Directory\Training',
target_size=(74,448),
batch_size=batchSize,
class_mode='binary',
)
testing_imGen = testing_imGenProp.flow_from_directory(
'Directory\Testing',
target_size=(74,448),
batch_size=batchSize,
class_mode='binary',
)
val_imGen = testing_imGenProp.flow_from_directory(
'Directory\Validation',
target_size=(74,448),
batch_size=batchSize,
class_mode='binary',
)
classifier.fit_generator(
training_imGen,
callbacks = [tensorboard,earlystop],
steps_per_epoch=num_of_samples // batchSize,
epochs=30,
validation_data = val_imGen,
validation_steps = num_of_val_samples // batchSize
)
score, acc = classifier.evaluate_generator(
testing_imGen,
num_of_testing_samples // batchSize,
verbose = 0
)
print('Test score:', score)
print('Test accuracy:', acc)
classifier.save('Directory\Config_10_Model.h5')
Notes
I used 0.0005 Learning rate to stop this model being early stopped at 2nd or 3rd epoch. Also I've separated images for training, testing and validation under three folders and have 1020,375,200 images respectively for training,testing and validation (Which means Training folder alone has 2040 images since I have two classes. Each class have same number of images). So no images will be reused under any circumstances.
Also, before I'm rescaling images by 1./255 in ImageDataGenerator, My model had 50% of accuracy in training, validation and 54% in testing. But after using rescaling, this early stopping happened frequently and accuracy was 99% almost all the time.
I didn't use rescaling for test images purposely. But still receive 98% accuracy and yet fails desperately at predicting. Since I've with noise and without noise folders under training folder, My output class should be with noise or without noise. Since with Noise comes first in alphabetical order, I believe [0] class says With Noise and [1] should be for Without Noise. But if I input without noise image to model, it still gives me [0].
Below is the code I use to predict trained model.
from keras.models import load_model
import numpy as np
from keras.preprocessing import image
model = load_model('Directory\Config_10_Model.h5')
test_image = image.load_img('Path_to_Without_Noise_Image\image3452.png', target_size = (74, 448))
test_image = image.img_to_array(test_image)
test_image = test_image/255
test_image = np.expand_dims(test_image, axis = 0)
result = model.predict(test_image)
y_classes = result.argmax(axis=-1)
print(y_classes)
I don't know why this happenes even though I never used same images for testing, validation or training. Can someone help me with this? I tried everything and trained model with different hyper parameters but everytime this model output [0].
You are doing binary classification. result has shape [batch_size,1]. So if you are doing argmax() you will always get 0.
>>> import numpy as np
>>> result = np.random.rand(5,1)
>>> result
array([[ 0.54719484],
[ 0.31675804],
[ 0.55151251],
[ 0.25014937],
[ 0.00724972]])
>>> result.argmax(axis=-1)
array([0, 0, 0, 0, 0])
>>> (result > 0.5).astype(int)
array([[1],
[0],
[1],
[0],
[0]])
>>>

Categories

Resources