enter code here
from keras.models import Sequential
from keras.layers import Convolution2D
from keras.layers import Flatten
from keras.layers import Dense
from keras.layers import MaxPooling2D
classifier = Sequential()
classifier.add(Convolution2D(32,(3,3),input_shape = (64,64,3), activation = 'relu'))
classifier.add(MaxPooling2D(pool_size = (2, 2)))
classifier.add(Convolution2D(32,(3,3), activation = 'relu'))
classifier.add(MaxPooling2D(pool_size = (2, 2)))
classifier.add(Flatten())
classifier.add(Dense(units=32,activation = 'relu'))
classifier.add(Dense(units=64,activation = 'relu'))
classifier.add(Dense(units=128,activation = 'relu'))
classifier.add(Dense(units=256,activation = 'relu'))
classifier.add(Dense(units=256,activation = 'relu'))
classifier.add(Dense(units=6,activation = 'softmax'))
classifier.compile(optimizer = 'adam', loss = 'categorical_crossentropy', metrics = ['accuracy'])
from keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(rescale = 1./255, # To rescaling the image in range of [0,1]
shear_range = 0.2, # To randomly shear the images
zoom_range = 0.2, # To randomly zoom the images
horizontal_flip = True) # for randomly flipping half of the images
horizontally
test_datagen = ImageDataGenerator(rescale = 1./255)
print("\nTraining the data...\n")
training_set = train_datagen.flow_from_directory('train',
target_size=(64,64),
batch_size=12, #Total no. of batches
class_mode='categorical')
test_set = test_datagen.flow_from_directory('test',
target_size=(64,64),
batch_size=12,
class_mode='categorical')
classifier.fit_generator(training_set,
steps_per_epoch=len(training_set), # Total training images
epochs = 20, # Total no. of epochs
validation_data = test_set,
validation_steps = len(test_set)) # Total testing images
classifier.save("model.h5")
#Prediction
classes = ['Fresh Apple','Fresh Banana','Fresh Orange','Rotten Apple','Rotten Banana','Rotten
Orange']
from keras.preprocessing import image
from keras.models import load_model
import numpy as np
new_model = load_model('model.h5')
filename = 'a1.jpeg'
new_model.summary()
test_image = image.load_img('images\\a1.jpg',target_size=(64,64))
test_image = image.img_to_array(test_image)
test_image = np.expand_dims(test_image, axis = 0)
result = new_model(test_image)
result1 = result[0]
for i in range(6):
if result1[i] == 1.:
break;
prediction = classes[i]
print(prediction)
My model is giving the same output for any input. The errors and warnings have been removed but the output still remains the same. Earlier the model was giving same value 'A'(example) before removing Warnings and after removing Warnings, the model is giving same value 'B'. I don't know where is the problem in my code whether it is in model or whether it is in #Prediction.
A couple of things. In your generators you set a batch size of 12. then in model.fit you have steps_per_epoch=len(training_set). This means you will go through your training set 12 times per epoch. I usually leave steps per epoch and validation steps as None. model.fit will determine the value internally but if you want to then set
steps_per_epoch = int(len(train_set/batch_size) + 1
validation_steps= int(len(test_set/batch_size) +1
Now in predictions. You scaled your train and test images by 1/255. You need to do the same for images you wish to predict. So right after the code to expand dimension add code
test_image=test_image/255
Related
My accuracy is zero for all the 15 epochs in spite of using multiple Conv2D and Max Pooling Layers. I am using ImageDataGenerator for Data Augmentation.
Complete code is given below:
# importing all the required libraries
import tensorflow as tf
from tensorflow.keras.layers import Dense, Conv2D, Flatten, MaxPool2D, Dropout
from tensorflow.keras.models import Sequential
from tensorflow.keras.datasets import cifar10
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import matplotlib.pyplot as plt
# Loading the Data from the in built library
(train_images, train_labels), (test_images, test_labels) = cifar10.load_data()
# Normalize the Pixel Data
train_images = train_images/255.0
test_images = test_images/255.0
# Instantiate the Image Data Generator Class with the Data Augmentation
datagen = ImageDataGenerator(width_shift_range = 0.2, height_shift_range = 0.2,
rotation_range = 20, horizontal_flip = True,
vertical_flip = True, validation_split = 0.2)
# Apply the Data Augmentation to the Training Images
datagen.fit(train_images)
# Create the Generator for the Training Images
train_gen = datagen.flow(train_images, train_labels, batch_size = 32,
subset = 'training')
# Create the Generator for the Validation Images
val_gen = datagen.flow(train_images, train_labels, batch_size = 8,
subset = 'validation')
num_classes = 10
# One Hot Encoding of Labels using to_categorical
train_labels = to_categorical(train_labels, num_classes)
test_labels = to_categorical(test_labels, num_classes)
img_height = 32
img_width = 32
# Building the Keras Model
model = Sequential()
model.add(Conv2D(32, (3, 3), activation='relu', input_shape=(32, 32, 3)))
model.add(MaxPool2D((2, 2)))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPool2D((2, 2)))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(Flatten())
model.add(Dense(64, activation='relu'))
#model.add(Dropout(rate = 0.2))
model.add(Dense(units = num_classes, activation = 'softmax'))
model.summary()
model.compile(loss = 'categorical_crossentropy', optimizer = 'adam',
metrics = ['accuracy'])
steps_per_epoch = len(train_images) * 0.8//32
history = model.fit(train_gen, validation_data = val_gen,
steps_per_epoch = steps_per_epoch, epochs = 15)
Your problem is you ran this code
train_gen = datagen.flow(train_images, train_labels, batch_size = 32,
subset = 'training')
# Create the Generator for the Validation Images
val_gen = datagen.flow(train_images, train_labels, batch_size = 8,
subset = 'validation')
but only after this did you convert the labels to categorical. So take the code
num_classes = 10
# One Hot Encoding of Labels using to_categorical
train_labels = to_categorical(train_labels, num_classes)
test_labels = to_categorical(test_labels, num_classes)
and place it PRIOR to the train_gen and val_gen code. On a finer point you have the code
datagen.fit(train_images)
You only need to fit the generator if you have any of the parameters
featurewise_center, samplewise_center, featurewise_std_normalization, or
samplewise_std_normalization set to true.
Transform your label to one hot right before the .flow.
...
# One Hot Encoding of Labels using to_categorical
train_labels = to_categorical(train_labels, num_classes)
test_labels = to_categorical(test_labels, num_classes)
# Create the Generator for the Training Images
train_gen = datagen.flow(train_images, train_labels, batch_size = 32,
subset = 'training')
# Create the Generator for the Validation Images
val_gen = datagen.flow(train_images, train_labels, batch_size = 8,
subset = 'validation')
...
I am learning image classification with tensorflow. Below is my program . for same test image if i am passing again and again it gives sometimes different labels . It is not predicting correctly
import tensorflow as tf
import numpy as np
import os
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Flatten,Activation
from keras.layers.convolutional import Conv2D, MaxPooling2D
from keras.constraints import maxnorm
from keras.utils import np_utils
classifier = Sequential()
classifier.add(Conv2D(32, (3, 3), input_shape = (64,64,3 ),activation="relu"))
classifier.add(MaxPooling2D(pool_size = (2,2)))
classifier.add(Flatten())
classifier.add(Dense(128 , kernel_initializer ='uniform' , activation = 'relu'))
classifier.add(Dense(10 , kernel_initializer ='uniform' , activation = 'softmax'))
classifier.compile(optimizer = 'rmsprop', loss = 'categorical_crossentropy' , metrics = ['accuracy'])
from keras_preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1./255)
training_set = train_datagen.flow_from_directory(
'/code/train',
shuffle=True,
target_size=(64,64),
batch_size=5,
class_mode='categorical',
classes=["shiv", "kart", "nall","surendra","harshi","nag","saura","rajan","manoj","abhimanyu"])
test_set = test_datagen.flow_from_directory(
'/code/validation',
shuffle=True,
target_size=(64,64),
batch_size=5,
class_mode='categorical',
classes=["shiv", "kart", "nall","surendra","harshi","nag","saura","rajan","manoj","abhimanyu"])
from IPython.display import display
from PIL import Image
classifier.fit(
training_set,
steps_per_epoch=80,
epochs=12,
validation_data=test_set,
validation_steps=100)
from keras_preprocessing import image
files_dir = '/code/test_image_clasification1'
files = os.listdir(files_dir)
np.set_printoptions(precision=3)
for f in files:
image_path = files_dir + '/' + f
test_image = image.load_img(image_path,target_size = (64, 64))
test_image = image.img_to_array(test_image)
test_image = np.expand_dims(test_image, axis = 0)
result = classifier.predict(test_image)
#classes = classifier.predict_classes(test_image)
#print (classes)
labels = ["shivaji","kartik","nallayan","surendar","harshita","nagendar","saurabh","rajan","manoj","abhimanyu"]
indx = np.argmax(result)
#score = np.argmax(np.round(result*100,2))
#print(np.round(result,2))
match_percentage=np.max(result)
match_class=labels[indx]
print("the image " + f + " is matching with "+ match_class + " having matching percentage " +"{:.2%}".format(match_percentage) )
#print(list(zip(labels,result)))
#print(f,labelsindx])
Can any one help me if anything wrong in model training . For information, i have 122 image in train set and 48 in validation set.
You need to apply the same rescaling factor to your images, this is currently missing and therefore leads to wrong probability predictions. So you also need to rescale to [0,1] by applying 1./255 to your test_image.
You can try changing the loss and the optimizer.
classifier.compile(optimizer = 'sgd', loss = 'sparse_categorical_crossentropy' , metrics = ['accuracy'])
I have created a CNN model that can be used to differentiate DOGS and CATS. During the training process my model was showing an training accuracy of 99% and testing accuracy of 81% by the end of 4/25 epoch.
Is this normal? or is there any problem that might occur after completion of all the epoch's?
So I need to use this CNN model to my new inputs that do not belong to my training of test set. How do I use my model to predict some new photos?
I have not used classifier.save( ), so after the training can I just use that command so that model gets saved? or do I have to recompile everything with clssifier.save() at the end?
# Part 1 - Building the CNN
# Importing the Keras libraries and packages
from keras.models import Sequential
from keras.layers import Conv2D
from keras.layers import MaxPooling2D
from keras.layers import Flatten
from keras.layers import Dense
# Initialising the CNN
classifier = Sequential()
# Step 1 - Convolution
classifier.add(Conv2D(32, (3, 3), input_shape = (64, 64, 3), activation = 'relu'))
# Step 2 - Pooling
classifier.add(MaxPooling2D(pool_size = (2, 2)))
# Adding a second convolutional layer
classifier.add(Conv2D(32, (3, 3), activation = 'relu'))
classifier.add(MaxPooling2D(pool_size = (2, 2)))
# Step 3 - Flattening
classifier.add(Flatten())
# Step 4 - Full connection
classifier.add(Dense(units = 128, activation = 'relu'))
classifier.add(Dense(units = 1, activation = 'sigmoid'))
# Compiling the CNN
classifier.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])
# Part 2 - Fitting the CNN to the images
from keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(rescale = 1./255,
shear_range = 0.2,
zoom_range = 0.2,
horizontal_flip = True)
test_datagen = ImageDataGenerator(rescale = 1./255)
training_set = train_datagen.flow_from_directory('dataset/training_set',
target_size = (64, 64),
batch_size = 32,
class_mode = 'binary')
test_set = test_datagen.flow_from_directory('dataset/test_set',
target_size = (64, 64),
batch_size = 32,
class_mode = 'binary')
classifier.fit_generator(training_set,
steps_per_epoch = 8000,
epochs = 25,
validation_data = test_set,
validation_steps = 2000)
The model has a save method that exports the architecture and training configuration of the model to a file which can be later extracted and used. The documentation for the same can be found here.
After importing the model, you can use the model on any data sets that you want to. About the accuracy of the model, it is possible to achieve the same. There is still a huge difference between the train and test accuracy so at the moment it is over-fitting the data. Also, try to randomize the data and train using them to make sure it is not an exceptional case.
I have done a prediction for car damages whether they are severe or not based on images in Keras using CNN. Predicted class and accuracy changes every time I run the code for the same dataset and with no other parameters changed. I have tried restarting the kernal and also setting seed for the model with a hope of getting consistent results. I am new to python, so kindly help me in the getting same results every time.
import random
random.seed(801)
# Importing the Keras libraries and packages
from keras.models import Sequential
from keras.layers import Conv2D
from keras.layers import MaxPooling2D
from keras.layers import Flatten
from keras.layers import Dense
from keras.layers import Dropout
# Initialising the CNN
classifier = Sequential()
# Step 1 - Convolution
classifier.add(Conv2D(64, (2, 2), input_shape = (64, 64, 3), activation = 'relu'))
# Step 2 - Pooling
classifier.add(MaxPooling2D(pool_size = (2, 2)))
# Adding a second convolutional layer
classifier.add(Conv2D(64, (2, 2), activation = 'relu'))
classifier.add(MaxPooling2D(pool_size = (2, 2)))
# Step 3 - Flattening
classifier.add(Flatten())
# Adding dropout
classifier.add(Dropout(0.2))
# Step 4 - Full connection
classifier.add(Dense(units = 128, activation = 'relu'))
# Adding dropout
classifier.add(Dropout(0.2))
classifier.add(Dense(units = 1, activation = 'sigmoid'))
# Compiling the CNN
classifier.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])
# Part 2 - Fitting the CNN to the images
from keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(rescale = 1./255,
# shear_range = 0.2,
# zoom_range = 0.2,
horizontal_flip = True)
test_datagen = ImageDataGenerator(rescale = 1./255)
#train_labels = keras.utils.to_categorical(train_labels,num_classes)
#test_labels = keras.utils.to_categorical(test_labels,num_classes)
training_set = train_datagen.flow_from_directory('C:/Users/Allianz/Desktop/Image Processing/car-damage-detective-neokt/app/2 category/training',
target_size = (64, 64),
batch_size = 16,
class_mode = 'binary')
test_set = test_datagen.flow_from_directory('C:/Users/Allianz/Desktop/Image Processing/car-damage-detective-neokt/app/2 category/validation',
target_size = (64, 64),
batch_size = 16,
class_mode = 'binary')
batch_size=16
classifier.fit_generator(training_set,
steps_per_epoch = 605//batch_size,
epochs = 9,
validation_data = test_set,
validation_steps = 5//batch_size
)
#classifier.save('first_model.h5')
classifier.save('first.h5')
# finding the number associated classes
#classes=training_set.class_indices
#print(classes)
# extracting file names of images
import os
from PIL import Image
import numpy as np
path='C:/Users/Allianz/Desktop/Image Processing/car-damage-detective-neokt/app/data3a_full/validation/01-minor'
img_names = [f for f in os.listdir(path) if os.path.splitext(f)[-1] == '.JPEG']
#print(img_names[1])
img_names=np.asarray(img_names) #converting list to array
# predicting classes for multiple images
import numpy as np
from keras.models import load_model
from keras.preprocessing import image
#os.chdir('C:/Users/Allianz/Desktop/Image Processing/car-damage-detective-neokt/app/2nd check/pred')
os.chdir('C:/Users/Allianz/Desktop/Image Processing/car-damage-detective-neokt/app/data3a_full/validation/01-minor')
a=load_model('first.h5')
classes=[]
result=[]
for i in range(len(img_names)):
img=image.load_img(img_names[i],
target_size=(64,64))
test_image = image.img_to_array(img)
test_image = np.expand_dims(test_image, axis = 0)
result = a.predict(test_image)
#print(result)
if result[0] >= 0.5:
prediction = 'severe'
else:
prediction = 'not severe'
classes.append(prediction)
#print(classes)
#prediction2=print(classes)
import pandas as pd
dfn=pd.DataFrame({'image':img_names,
'prediction':classes
})
len(dfn.loc[dfn['prediction']=='not severe'])
len(dfn.loc[dfn['prediction']=='severe'])
It looks like you're training the model every time you classify! This is what's causing the inconsistency. The reason why this yields different results, despite you setting the seed, can be found (here)[Why can't I get reproducible results in Keras even though I set the random seeds?.
I suggest you separate the two files so that you train in one script and load then test in another. This way you will get more consistent results.
I had similar problems with loading weights. The problem is that when you load the weights keras radomly assigns the weights because of the model declaration. I switched to using checkpoints for storing my weights and model.load_weights(checkpoints_directory) to load the weights. You will have to use a callback for this. Here is a short code snippet for this task (Google has a nice video on his topic).
from keras.callbacks import ModelCheckpoint
callbacks = [ModelCheckpoint(checkpoints_directory, monitor='val_loss', save_weights_only=True, save_best_only=True, period=period)]
model.fit(..., callbacks=callbacks, ...)
I am very new to neural networks and I tried a typical first example with help of some Internet-Blogs: Image Classification of cats or dogs. After training the neural network below I tried to identify some random pictures of cats/dogs which I found on Google and which are neither in my training_set nor in my test_set… I found out, that sometimes the network gives a right prediction (recognizing a dog when showing a dog) and unfortunately sometimes a false prediction i.e. I showed a picture of a cat and the network predicts a ‘dog’. How do I handle such mistakes?
Adding all wrong pictures to the training_set or test_set and do the whole training process again? Or is there any other option to tell the network that it has made a false prediction and should adapt its weights?
#Part 1 - Import
from keras.models import Sequential
from keras.layers import Conv2D
from keras.layers import MaxPooling2D
from keras.layers import Flatten
from keras.layers import Dense
#Part 2 – Build Network
classifier = Sequential()
classifier.add(Conv2D(32, (3, 3), input_shape = (64, 64, 3), activation = 'relu'))
classifier.add(MaxPooling2D(pool_size = (2, 2)))
classifier.add(Conv2D(32, (3, 3), activation = 'relu'))
classifier.add(MaxPooling2D(pool_size = (2, 2)))
classifier.add(Flatten())
classifier.add(Dense(units = 128, activation = 'relu'))
classifier.add(Dense(units = 1, activation = 'sigmoid'))
classifier.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])
#Part 3 - Training
from keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(rescale = 1./255, shear_range = 0.2, zoom_range = 0.2, horizontal_flip = True)
test_datagen = ImageDataGenerator(rescale = 1./255)
training_set = train_datagen.flow_from_directory('C:/…/KNNDaten/training_set', target_size = (64, 64), batch_size = 32, class_mode = 'binary')
test_set = test_datagen.flow_from_directory('C:/…/KNNDaten/test_set', target_size = (64, 64), batch_size = 32, class_mode = 'binary')
classifier.fit_generator(training_set, steps_per_epoch = 8000, epochs = 25, validation_data = test_set, validation_steps = 2000)
#Part 4 – Saving Model and weights
model_json = classifier.to_json()
with open("model1.json", "w") as json_file:
json_file.write(model_json)
classifier.save_weights("model1.h5")
# Part 5 - Making new predictions
import numpy as np
from keras.preprocessing import image
test_image = image.load_img('C:/… /KNNDaten/single_prediction/cat_or_dog_1.jpg', target_size = (64, 64))
test_image = image.img_to_array(test_image)
test_image = np.expand_dims(test_image, axis = 0)
result = classifier.predict(test_image)
training_set.class_indices
if result[0][0] == 1:
prediction = 'dog'
else:
prediction = 'cat'
print("Image contains: " + prediction);
At the moment my training process looks like:
Results of my training process: accuracy, ...
Thank you very much for your help!
The usual process is to add the incorrectly predicted images to the training data set and retrain the network with random weights or using the weights obtained previously with the new images and the old ones.
When training a network you don't need to initiate with random weitghs, you could use the previous weights, this is sometimes called Transfer Learning. It is important if you try to do this to also include the original images used to train the model, or at least a part of it, if you don't want to overfit the model.
As Dascienz comments using data augmentation techniques can also be very useful to get a better generalization, for example adding the new images and variation of them: rotations, translation, symmetries and rescaling.