I am trying to perform image segmentation on cityscapes dataset. Cityscapes dataset contains two directories (train and val). The files in both directories are pair images so I do some preprocessing.
First, I split the image into original images and their masks.
Then I normalize the images.
Then I apply to_categorical to apply the number of classes that I have.
Below you can see my full code. I just don't understand why I am not getting any prediction image.
Training directory
path = "/content/drive/MyDrive/Image Segmentation/cityscapes_data/train/"
train_images_list = []
number = 1000
# creating a list of the images inside train folder
train_images = os.listdir(path)
for images in train_images[0:number]:
# read the images in the file
image = io.imread(path + "/" + images,cv2.COLOR_RGB2GRAY)
train_images_list.append(image)
# convert list to array
train_arr = np.array(train_images_list)
# split the image pair, obtaining all images
train_images = train_arr[:,:,0:256]
# obtaining all the masks
train_masks = train_arr [:,:,256:512]
train_images = np.expand_dims(train_images, axis=3)
train_masks = np.expand_dims(train_masks, axis = 3)
Validation directory
path1 = "/content/drive/MyDrive/Image Segmentation/cityscapes_data/val/"
val_images_list = []
# creating a list of the images inside validation directory
val_images = os.listdir(path1)
for images1 in val_images:
image1 = io.imread(path1 + "/" + images1, cv2.COLOR_RGB2GRAY)
val_images_list.append(image1)
# convert list to array
val_arr = np.array(val_images_list)
val_images = val_arr[:,:,0:256]
val_masks = val_arr [:,:,256:512]
val_images = np.expand_dims(val_images, axis=3)
val_masks = np.expand_dims(val_masks, axis=3)
Model
classes = 14
from tensorflow.keras.utils import normalize, to_categorical
train_masks_cat = to_categorical(train_masks, num_classes=classes)
y_train_cat = train_masks_cat.reshape((train_masks.shape[0],
train_masks.shape[1],
train_masks.shape[2],
classes))
test_masks_cat = to_categorical(val_masks, num_classes=classes)
y_test_cat = test_masks_cat.reshape((val_masks.shape[0],
val_masks.shape[1],
val_masks.shape[2],
classes))
train_images.shape = (1000, 256, 256, 1)
y_train_cat.shape = (1000, 256, 256, 14)
y_test_cat.shape = (500, 256, 256, 14)
# Building Unet using encoder and decoder blocks
from keras.models import Model
from keras.layers import Input, Conv2D, MaxPooling2D, concatenate, Conv2DTranspose, BatchNormalization, Dropout, Lambda
from keras.layers import Activation, MaxPool2D, Concatenate
num_filters =16
def conv_block(input, num_filters):
# first conv layer
x = Conv2D(num_filters, kernel_size = (3,3), padding='same')(input)
x = BatchNormalization()(x)
x = Activation('relu')(x)
# second conv layer
x = Conv2D(num_filters, kernel_size= (3,3), padding='same')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
return x
def encoder_block(input, num_filters):
# conv block
x = conv_block(input,num_filters)
# maxpooling
p = MaxPool2D(strides = (2,2))(x)
#p = Dropout(0.4)(p)
return x,p
def decoder_block(input, skip_features, num_filters):
x = Conv2DTranspose(num_filters, (2,2), strides=2, padding='same')(input)
x = Concatenate()([x, skip_features])
x = conv_block(x, num_filters)
return x
def unet_architect(input_shape, classes):
""" Input Layer """
inputs = Input(input_shape)
""" Encoder """
s1,p1 = encoder_block(inputs, num_filters)
s2,p2 = encoder_block(p1,num_filters *2)
s3,p3 = encoder_block(p2, num_filters *4)
s4,p4 = encoder_block(p3, num_filters * 8)
""" Bridge """
b1 = conv_block(p4,num_filters*16)
""" Decoder """
d1 = decoder_block(b1, s4, num_filters * 8)
d2 = decoder_block(d1, s3, num_filters *4)
d3 = decoder_block(d2, s2, num_filters *2)
d4 = decoder_block(d3, s1, num_filters)
""" Output Layer """
outputs = Conv2D(classes, (1,1), padding='same', activation = 'softmax')(d4)
model = Model(inputs, outputs, name='U-Net')
return model
train_images.shape = (1000, 256, 256, 1)
img_height = train_images.shape[1]
img_width = train_images.shape[2]
img_channels = train_images.shape[3]
input_shape = (img_height, img_width,img_channels)`
model = unet_architect(input_shape, classes)
model.compile(optimizer = 'adam' ,
loss = 'categorical_crossentropy',
metrics=['categorical_accuracy'])
model.summary()
history = model.fit(train_images, y_train_cat,
batch_size = 16,
verbose = 1,
epochs = 5,
validation_data = (val_images,y_test_cat))
pred = model.predict(val_images)
import random
test_img_number = random.randint(0, len(val_images))
test_img = val_images[test_img_number]
ground_truth = val_masks[test_img_number]
predicted_img = pred[test_img_number]
test_img.shape = (256, 256, 1)
ground_truth.shape = (256, 256, 1)
Previously predicted_img was:
predicted_img = (256,256,14)
Now after applying argmax:
predicted_img = np.argmax(predicted_img, -1)
predicted_img = (256,256)
plt.figure(figsize=(15,15))
plt.subplot(131)
plt.title('Original Image')
plt.imshow(test_img[:,:,0])
plt.subplot(132)
plt.title('Masked Image')
plt.imshow(ground_truth[:,:,0])
plt.subplot(133)
plt.title('Predicted Image')
plt.imshow(predicted_img)
plt.show()
WHY IS THE PREDICTION IMAGE SHOWING NOTHING IN THE IMAGE BELOW
I am still new to StackOverflow thats why I cant display the image. Please view the image from the link below
thanks
[2]: https://i.stack.imgur.com/armjk.png
Related
I am trying to build a DeepLabV3+ model from an article tutorial but I'm getting a ValueError related to the Tensorflow input and output. Please look through it and help me understand what I'm getting wrong.
Thank you.
Here is the code:
import os
import cv2
import numpy as np
from glob import glob
from scipy.io import loadmat
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
!gdown https://drive.google.com/uc?id=1B9A9UCJYMwTL4oBEo4RZfbMZMaZhKJaz
!unzip -q instance-level-human-parsing.zip
#Organizing the dataset
IMAGE_SIZE = 512
BATCH_SIZE = 4
NUM_CLASSES = 20
DATA_DIR = "./instance-level_human_parsing/instance-level_human_parsing/Training"
NUM_TRAIN_IMAGES = 1000
NUM_VAL_IMAGES = 50
train_images = sorted(glob(os.path.join(DATA_DIR, "Images/*")))[:NUM_TRAIN_IMAGES]
train_masks = sorted(glob(os.path.join(DATA_DIR, "Category_ids/*")))[:NUM_TRAIN_IMAGES]
val_images = sorted(glob(os.path.join(DATA_DIR, "Images/*")))[
NUM_TRAIN_IMAGES : NUM_VAL_IMAGES + NUM_TRAIN_IMAGES
]
val_masks = sorted(glob(os.path.join(DATA_DIR, "Category_ids/*")))[
NUM_TRAIN_IMAGES : NUM_VAL_IMAGES + NUM_TRAIN_IMAGES
]
def read_image(image_path, mask=False):
image = tf.io.read_file(image_path)
if mask:
image = tf.image.decode_png(image, channels=1)
image.set_shape([None, None, 1])
image = tf.image.resize(images=image, size=[IMAGE_SIZE, IMAGE_SIZE])
else:
image = tf.image.decode_png(image, channels=3)
image.set_shape([None, None, 3])
image = tf.image.resize(images=image, size=[IMAGE_SIZE, IMAGE_SIZE])
image = image / 127.5 - 1
return image
def load_data(image_list, mask_list):
image = read_image(image_list)
mask = read_image(mask_list, mask=True)
return image, mask
def data_generator(image_list, mask_list):
dataset = tf.data.Dataset.from_tensor_slices((image_list, mask_list))
dataset = dataset.map(load_data, num_parallel_calls=tf.data.AUTOTUNE)
dataset = dataset.batch(BATCH_SIZE, drop_remainder=True)
return dataset
train_dataset = data_generator(train_images, train_masks)
val_dataset = data_generator(val_images, val_masks)
print("Train Dataset:", train_dataset)
print("Val Dataset:", val_dataset)
#Building the model
def convolution_block(
block_input,
num_filters=256,
kernel_size=3,
dilation_rate=1,
padding="same",
use_bias=False,
):
x = layers.Conv2D(
num_filters,
kernel_size=kernel_size,
dilation_rate=dilation_rate,
padding="same",
use_bias=use_bias,
kernel_initializer=keras.initializers.HeNormal(),
)(block_input)
x = layers.BatchNormalization()(x)
return tf.nn.relu(x)
def DilatedSpatialPyramidPooling(dspp_input):
dims = dspp_input.shape
x = layers.AveragePooling2D(pool_size=(dims[-3], dims[-2]))(dspp_input)
x = convolution_block(x, kernel_size=1, use_bias=True)
out_pool = layers.UpSampling2D(
size=(dims[-3] // x.shape[1], dims[-2] // x.shape[2]), interpolation="bilinear",
)(x)
out_1 = convolution_block(dspp_input, kernel_size=1, dilation_rate=1)
out_6 = convolution_block(dspp_input, kernel_size=3, dilation_rate=6)
out_12 = convolution_block(dspp_input, kernel_size=3, dilation_rate=12)
out_18 = convolution_block(dspp_input, kernel_size=3, dilation_rate=18)
x = layers.Concatenate(axis=-1)([out_pool, out_1, out_6, out_12, out_18])
output = convolution_block(x, kernel_size=1)
return output
def DeeplabV3Plus(image_size, num_classes):
model_input = keras.Input(shape=(image_size, image_size, 3))
resnet50 = keras.applications.ResNet50(
weights="imagenet", include_top=False, input_tensor=model_input
)
x = resnet50.get_layer("conv4_block6_2_relu").output
x = DilatedSpatialPyramidPooling(x)
input_a = layers.UpSampling2D(
size=(image_size // 4 // x.shape[1], image_size // 4 // x.shape[2]),
interpolation="bilinear",
)(x)
input_b = resnet50.get_layer("conv2_block3_2_relu").output
input_b = convolution_block(input_b, num_filters=48, kernel_size=1)
x = layers.Concatenate(axis=-1)([input_a, input_b])
x = convolution_block(x)
x = convolution_block(x)
x = layers.UpSampling2D(
size=(image_size // x.shape[1], image_size // x.shape[2]),
interpolation="bilinear",
)(x)
model_output = layers.Conv2D(num_classes, kernel_size=(1, 1), padding="same")(x)
return keras.Model(inputs=model_input, outputs=model_output)
model = DeeplabV3Plus(image_size=IMAGE_SIZE, num_classes=NUM_CLASSES)
model.summary()
Running the code produces the error below;
ValueError Traceback (most recent call last)
in
56
57
---> 58 model = DeeplabV3Plus(image_size = IMAGE_SIZE, num_classes = NUM_CLASSES)
59 model.summary()
60
5 frames
/usr/local/lib/python3.7/dist-packages/keras/engine/functional.py in _validate_graph_inputs_and_outputs(self)
738 if not hasattr(x, '_keras_history'):
739 cls_name = self.class.name
--> 740 raise ValueError(f'Output tensors of a {cls_name} model must be '
741 'the output of a TensorFlow Layer '
742 f'(thus holding past layer metadata). Found: {x}')
ValueError: Output tensors of a Functional model must be the output of a TensorFlow Layer (thus holding past layer metadata). Found: <tensorflow.python.keras.layers.convolutional.Conv2D object at 0x7f5c1c3b6310>
I'm trying to build a CNN model for Autoencoder. My code contains 3 functions:
Autoencoder contains the layers of my CNN.
make_autoencoder_model initializes the model.
fit_model_on_cifar10 to fit the model on cifar10.
I encounter the error:
ValueError: No gradients provided for any variable: ['conv2d_38/kernel:0', 'conv2d_38/bias:0', 'batch_normalization_34/gamma:0', 'batch_normalization_34/beta:0', 'conv2d_39/kernel:0', 'conv2d_39/bias:0', 'batch_normalization_35/gamma:0', 'batch_normalization_35/beta:0', 'dense_16/kernel:0', 'dense_16/bias:0', 'conv2d_40/kernel:0', 'conv2d_40/bias:0', 'batch_normalization_36/gamma:0', 'batch_normalization_36/beta:0', 'conv2d_41/kernel:0', 'conv2d_41/bias:0', 'batch_normalization_37/gamma:0', 'batch_normalization_37/beta:0', 'dense_17/kernel:0', 'dense_17/bias:0'].
Could you please elaborate on this error and how to fix it?
from keras.datasets import mnist, fashion_mnist, cifar10
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Model
from keras.layers import Conv2D, Dense, Dropout, MaxPooling2D, Flatten, Input, Reshape, BatchNormalization, UpSampling2D
import numpy as np
from keras.optimizers import SGD, Adam
##################################### Lenet-like
filters = (64, 32)
image_shape = (32, 32, 3)
class Autoencoder:
"""
Autoencoder architecture.
"""
def __init__(self):
"""
Architecture settings.
"""
# nothing to do in the init.
def __call__(self, X):
"""
Call autoencoder layers on the inputs.
"""
# encode
for f in filters:
X = Conv2D(filters = f, kernel_size = (3, 3), activation = 'relu')(X)
X = MaxPooling2D(pool_size = (2, 2), strides = (2, 2), padding = 'valid')(X)
X = BatchNormalization(axis = -1)(X)
X = Flatten()(X)
X = Dense(units = 8 * 8 * 3, activation = 'relu')(X)
X = Reshape(target_shape = (8, 8, 3))(X)
# decode
for f in filters[::-1]:
X = Conv2D(filters = f, kernel_size = (3, 3), activation = 'relu')(X)
X = UpSampling2D(size = (2, 2))(X)
X = BatchNormalization(axis = -1)(X)
X = Flatten()(X)
X = Dense(units = np.prod(image_shape), activation = 'sigmoid')(X)
Y = Reshape(target_shape = image_shape)(X)
return Y
###################################
def make_autoencoder_model():
"""
Create and compile autoencoder keras model.
"""
X = Input(shape = image_shape)
Y = Autoencoder()(X)
model = Model(inputs = X, outputs = Y)
model.compile(optimizer = 'adam',
metrics = ['accuracy'],
loss = 'mean_squared_error')
return model
######################################
def fit_model_on_cifar10(n_epochs = 3,
batch_size = 128,
visualization_size = 5,
verbose = 1):
# create your model and call it on your dataset
model = make_autoencoder_model()
# create a Keras ImageDataGenerator to handle your dataset
datagen = ImageDataGenerator(horizontal_flip = True)
if verbose > 0:
print(model.summary())
(x_train, _), (x_test, _) = cifar10.load_data()
# Be sure that your training/test data is 'float32' and between 0 and 1 (pixel image value)
x_train = x_train.astype('float32') / 255.0
x_test = x_test.astype('float32') / 255.0
try:
history = model.fit(datagen.flow(x_train, batch_size = batch_size),
epochs = n_epochs)
except KeyboardInterrupt:
print("Training interrupted!")
###################################
fit_model_on_cifar10()
In the following code, I save the label to tfrecord and read it again.
(In reality, I save both images and labels to tfrecord, here is a simple example for illustration purpose) .
I got an error ValueError: Shapes (None, 3, 2) and (None, 2) are incompatible, how should I fix this? I am using Tensorflow 2.3. The key part should be in the return statement of parse_examples.
import contextlib2
import numpy as np
import tensorflow as tf
from tensorflow.keras import Model
from tensorflow.keras.layers import GlobalAveragePooling2D, Dense, Dropout
def process_image():
dic={
"image/label": tf.train.Feature(int64_list=tf.train.Int64List(value=[0,1]))
}
return tf.train.Example(features=tf.train.Features(feature=dic))
with contextlib2.ExitStack() as tf_record_close_stack:
output_tfrecords = [tf_record_close_stack.enter_context(tf.io.TFRecordWriter(file_name)) for file_name in
[f"data_train.tfrecord"]]
output_tfrecords[0].write(process_image().SerializeToString())
def parse_examples(examples):
parsed_examples = tf.io.parse_example(examples, features={
"image/label": tf.io.FixedLenFeature(shape=[2], dtype=tf.int64),
})
res = np.random.randint(2, size=3072).reshape(32, 32, 3)
return (res, [parsed_examples["image/label"],parsed_examples["image/label"],parsed_examples["image/label"]])
def process_dataset(dataset):
dataset = dataset.map(parse_examples, num_parallel_calls=tf.data.experimental.AUTOTUNE)
dataset = dataset.batch(1)
return dataset
train_data = tf.data.TFRecordDataset(filenames="data_train.tfrecord")
train_data = process_dataset(train_data)
base_model = tf.keras.applications.EfficientNetB7(input_shape=(32,32, 3), weights='imagenet',
include_top=False) # or weights='noisy-student'
for layer in base_model.layers[:]:
layer.trainable = False
x = GlobalAveragePooling2D()(base_model.output)
dropout_rate = 0.3
x = Dense(256, activation='relu')(x)
x = Dropout(dropout_rate)(x)
x = Dense(256, activation='relu')(x)
x = Dropout(dropout_rate)(x)
all_target = []
loss_list = []
test_metrics = {}
for name, node in [("task1", 2), ("task2", 2), ("task3", 2)]:
y1 = Dense(128, activation='relu')(x)
y1 = Dropout(dropout_rate)(y1)
y1 = Dense(64, activation='relu')(y1)
y1 = Dropout(dropout_rate)(y1)
y1 = Dense(node, activation='softmax', name=name)(y1)
all_target.append(y1)
loss_list.append('categorical_crossentropy')
test_metrics[name] = "accuracy"
# model = Model(inputs=model_input, outputs=[y1, y2, y3])
model = Model(inputs=base_model.input, outputs=all_target)
model.compile(loss=loss_list, optimizer='adam', metrics=test_metrics)
history = model.fit(train_data, epochs=1, verbose=1)
It turns out that, just change the return statement from parse_examples works:
return (res, {"task1":parsed_examples["image/label"],"task2":parsed_examples["image/label"],"task3":parsed_examples["image/label"]})
Where task1,task2,task3 are the names of the softmax layers given by me.
I have the TF 2.30 code below. The model is similarly with the image super-resolution. I have as input two datasets for training, respectively for validation. I want to use model.fit with these datasets.
import tensorflow as tf
from tensorflow.keras.layers import Input, Conv2D, Add, Activation
from tensorflow.keras.models import Model
from tensorflow.keras import optimizers
import pathlib
def build_model():
input_img = Input(shape=(48, 48, 1))
model = Conv2D(64, (3, 3), padding='same', kernel_initializer='he_normal', activation='relu')(input_img)
model = Conv2D(64, (3, 3), padding='same', kernel_initializer='he_normal', activation='relu')(model)
model = Conv2D(64, (3, 3), padding='same', kernel_initializer='he_normal', activation='relu')(model)
model = Conv2D(1, (3, 3), padding='same', kernel_initializer='he_normal')(model)
res_img = model
output_img = Add()([res_img, input_img])
model = Model(inputs=input_img, outputs=output_img)
return model
def load_image(image_path):
image = tf.io.read_file(image_path)
image = tf.io.decode_png(image, channels=1)
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
image -= 0.5
image /= 0.5
return image
def configure_for_performance(ds, batch_size):
ds = ds.cache()
ds = ds.shuffle(buffer_size=1000)
ds = ds.batch(batch_size)
ds = ds.prefetch(buffer_size=AUTOTUNE)
return ds
if __name__ == '__main__':
config = tf.compat.v1.ConfigProto(gpu_options= tf.compat.v1.GPUOptions(per_process_gpu_memory_fraction=0.8))
config.gpu_options.allow_growth = True
session = tf.compat.v1.Session(config=config)
tf.compat.v1.keras.backend.set_session(session)
print('start training...')
BATCH_SIZE = 64
model = build_model()
adam = optimizers.Adam(lr=1e-2)
model.compile(adam, loss='mse')
model.summary()
print('start training....')
data_orig = tf.data.Dataset.list_files(str('C:\\SRColor2\\data\\div2k\\train\\orig\\*.png'), shuffle=False)
data_pred = tf.data.Dataset.list_files(str('C:\\SRColor2\\data\\div2k\\train\\pred\\*.png'), shuffle=False)
valid_orig = tf.data.Dataset.list_files(str('C:\\SRColor2\\data\\div2k\\valid\\orig\\*.png'), shuffle=False)
valid_pred = tf.data.Dataset.list_files(str('C:\\SRColor2\\data\\div2k\\valid\\pred\\*.png'), shuffle=False)
AUTOTUNE = tf.data.experimental.AUTOTUNE
# Set `num_parallel_calls` so multiple images are loaded/processed in parallel.
data_orig = data_orig.map(load_image, num_parallel_calls=AUTOTUNE)
data_pred = data_pred.map(load_image, num_parallel_calls=AUTOTUNE)
valid_orig = valid_orig.map(load_image, num_parallel_calls=AUTOTUNE)
valid_pred = valid_pred.map(load_image, num_parallel_calls=AUTOTUNE)
data_orig = configure_for_performance(data_orig, 64)
data_pred = configure_for_performance(data_pred, 64)
valid_orig = configure_for_performance(valid_orig, 64)
valid_pred = configure_for_performance(valid_pred, 64)
model.fit((data_pred, data_orig),
epochs=40,
batch_size=64,
validation_data=(valid_pred, valid_orig))
print('end training')
print('training ended')
When I run the code I have the following error:ValueError: y argument is not supported when using dataset as input.
What solution I have to run the code?
In my case I generated patches with size 48x48 pixels for each images. The images has high resolution. If I want for an image with resolution (X,Y) to generate
(X//48) x (Y//8) patches with size $48x48$ pixels, then dataset size can be increased dynamically?
The error message says it all. What you can try is zipping the datasets:
data_orig = data_orig.map(load_image, num_parallel_calls=AUTOTUNE)
data_pred = data_pred.map(load_image, num_parallel_calls=AUTOTUNE)
data_ds = tf.data.Dataset.zip((data_orig, data_pred))
data_ds = configure_for_performance(data_ds, 64)
The experiment is carried out on Windows 10 Pro Intel (R) Core (TM) i5-4590 CPU # 3.3 GHz, based on the platform of Anaconda with Spyder Python 3.7.150, it is programming through the Python language and Python library function.
I get the error message:
File "C:/Users/HSIPL/Desktop/Face Recognition With TensorFlow.py", line 102, in
x = layers.Droupout(0.5)(x)**
AttributeError: module 'tensorflow_core.keras.layers' has no attribute 'Droupout'
# Importing Libraries
from matplotlib import pyplot as plt
from tensorflow.keras.preprocessing.image import array_to_img, img_to_array, load_img
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import os
# Preparing Dataset
# Setting names of the directies for both sets
base_dir = 'data'
seta ='Man_One'
setb ='Man_Two'
# Each of the sets has three sub directories train, validation and test
train_dir = os.path.join(base_dir, 'train')
validation_dir = os.path.join(base_dir, 'validation')
test_dir = os.path.join(base_dir, 'test')
def prepare_data(base_dir, seta, setb):
# Take the directory names for the base directory and both the sets
# Returns the paths for train, validation for each of the sets
seta_train_dir = os.path.join(train_dir, seta)
setb_train_dir = os.path.join(train_dir, setb)
seta_valid_dir = os.path.join(validation_dir, seta)
setb_valid_dir = os.path.join(validation_dir, setb)
seta_train_fnames = os.listdir(seta_train_dir)
setb_train_fnames = os.listdir(setb_train_dir)
return seta_train_dir, setb_train_dir, seta_valid_dir, setb_valid_dir, seta_train_fnames, setb_train_fnames
seta_train_dir, setb_train_dir, seta_valid_dir, setb_valid_dir, seta_train_fnames, setb_train_fnames = prepare_data(base_dir, seta, setb)
seta_test_dir = os.path.join(test_dir, seta)
setb_test_dir = os.path.join(test_dir, setb)
test_fnames_seta = os.listdir(seta_test_dir)
test_fnames_setb = os.listdir(setb_test_dir)
datagen = ImageDataGenerator(
height_shift_range = 0.2,
width_shift_range = 0.2,
rotation_range = 40,
shear_range = 0.2,
zoom_range = 0.2,
horizontal_flip = True,
fill_mode = 'nearest')
img_path = os.path.join(seta_train_dir, seta_train_fnames[3])
img = load_img(img_path, target_size = (150, 150))
x = img_to_array(img)
x = x.reshape((1,) + x.shape)
i = 0
for batch in datagen.flow(x, batch_size = 1):
plt.figure(i)
imgplot = plt.imshow(array_to_img(batch[0]))
i += 1
if i % 5 == 0:
break
# Convolutional Neural Network Model
# Import TensorFlow Libraries
from tensorflow.keras import layers
from tensorflow.keras import Model
img_input = layers.Input(shape = (150, 150, 3))
# 2D Convolution Layer with 64 filters of dimension 3x3 and ReLU activation algorithm
x = layers.Conv2D(8, 3, activation = 'relu')(img_input)
# 2D Max Pooling Layer
x = layers.MaxPooling2D(2)(x)
# 2D Convolution Layer with 128 filters of dimension 3x3 and ReLU activation algorithm
x = layers.Conv2D(16, 3, activation = 'relu')(x)
# 2D Max Pooling Layer
x = layers.MaxPooling2D(2)(x)
# 2D Convolution Layer with 256 filters of dimension 3x3 and ReLU activation algorithm
x = layers.Conv2D(32, 3, activation = 'relu')(x)
# 2D Max Pooling Layer
x = layers.MaxPooling2D(2)(x)
# 2D Convolution Layer with 512 filters of dimension 3x3 and ReLU activation algorithm
x = layers.Conv2D(64, 3, activation = 'relu')(x)
# 2D Max Pooling Layer
x = layers.MaxPooling2D(2)(x)
# 2D Convolution Layer with 512 filters of dimension 3x3 and ReLU activation algorithm
x = layers.Conv2D(64, 3, activation = 'relu')(x)
# Flatten Layer
x = layers.Flatten()(x)
# Fully Connected Layers and ReLU activation algorithm
x = layers.Dense(512, activation = 'relu')(x)
x = layers.Dense(512, activation = 'relu')(x)
x = layers.Dense(16, activation = 'relu')(x)
# Dropout Layers for optimisation
x = layers.Droupout(0.5)(x)
# Fully Connected Layers and sigmoid activation algorithm
output = layers.Dense(1, activation = 'sigmoid')(x)
model = Model(img_input, output)
model.summary()
import tensorflow as tf
# Using binary_crossentropy as the loss function and
# Adam Optimizer as the optimizing function when training
model.compile(loss = 'binary_crossentropy',
optimizer = tf.train.AdamOptimizer(learning_rate = 0.0005),
metrics = ['acc'])
from tensorflow.keras.preprocessing.image import ImageDataGenerator
# All images will be rescaled by 1./255
train_datagen = ImageDataGenerator(rescale = 1./255)
test_datagen = ImageDataGenerator(rescale = 1./255)
# Flow training images in batches of 20 using train_datagen generator
train_generator = train_datagen.flow_from_directory(
train_dir,
target_size = (150, 150),
batch_size = 20,
class_mode = 'binary')
validation_generator = test_datagen.flow_from_directory(
validation_dir,
target_size = (150, 150),
batch_size = 20,
class_mode = 'binary')
import matplotlib.image as mpimg
# 4x4 grid
ncols = 5
nrows = 5
pic_index = 0
# Set up matpotlib fig and size it to fit 5x5 pics
fig = plt.gcf()
fig.set_size_inches(ncols = 5, nrows = 5)
pic_index += 10
next_seta_pix = [os.path.join(seta_train_dir, fname)
for fname in seta_train_fnames[pic_index-10:pic_index]]
next_setb_pix = [os.path.join(setb_train_dir, fname)
for fname in setb_train_fnames[pic_index-10:pic_index]]
for i, img_path in enumerate(next_seta_pix+next_setb_pix):
# Set up subplot; subplot indices start at 1
sp = plt.subplot(nrows, ncols, i + 1)
sp.axis('Off')
img =mpimg.imread(img_path)
plt.imshow(img)
plt.show()
# Train the model
mymodel = model.fit_generator(
train_generator,
steps_per_epoch = 10,
epochs = 80,
validation_data = validation_generator,
validation_steps = 7,
verbose = 2)
import numpy as np
import random
from tensorflow.keras.preprocessing.image import img_to_array, load_img
successive_outputs = [layer.output for layer in model.layers[1:]]
visualization_model = Model(img_input, successive_outputs)
a_img_files = [os.path.join(seta_train_dir, f) for f in seta_train_fnames]
b_img_files = [os.path.join(setb_train_dir, f) for f in setb_train_fnames]
img_path = random.choice(a_img_files + b_img_files)
img = load_img(img_path, target_size = (150, 150))
x = img_to_array(img)
x = x.reshape((1,) + x.shape)
x /= 255
successive_feature_maps = visualization_model.predict(x)
layer_names = [layer.name for layer in model.layers]
# Accuracy results for each training and validation epoch
acc = mymodel.history['acc']
val_acc = mymodel.history['val_acc']
# Loss Results for each training and validation epoch
loss = mymodel.history['loss']
val_loss = mymodel.history['val_loss']
epochs = range(len(acc))
# Plot accuracy for each training and validation epoch
plt.plot(epochs, acc)
plt.plot(epochs, val_acc)
plt.title('Training and validation accuracy')
plt.figure()
# Plot loss for each training and validation epoch
plt.plot(epochs, loss)
plt.plot(epochs, val_loss)
plt.title('Training and validation loss')
# Testing model on a random train image from set a
train_img = random.choice(seta_train_fnames)
train_image_path = os.path.join(seta_train_dir, train_img)
train_img = load_img(train_image_path, target_size =(150, 150))
plt.imshow(train_img)
train_img = (np.expand_dims(train_img, 0))
print(train_img.shape)
model.predict(train_img)
# Testing model on a random train image from set b
train_img = random.choice(setb_train_fnames)
train_image_path = os.path.join(setb_train_dir, train_img)
train_img = load_img(train_image_path, target_size =(150, 150))
plt.imshow(train_img)
train_img = (np.expand_dims(train_img, 0))
print(train_img.shape)
model.predict(train_img)
# Testing a random image from the test set a
cal_mo = 0
cal_mt = 0
cal_unconclusive = 0
alist = []
for fname in test_fnames_seta:
if fname.startswitch('.'):
continue
file_path = os.path.join(seta_test_dir, fname)
load_file = load_img(file_path, target_size = (150, 150))
load_file = (np.expand_dims(load_file, 0))
pred_img = model.predict(load_file)
if(pred_img[0]<0.5):
cal_mo+=1
elif(pred_img[0]>0.5):
cal_mt+=1
else:
print(pred_img[0], "\n")
cal_unconclusive+=1
alist.append(file_path)
print(alist)
print("Identified as: \n")
print("Man One:", cal_mo)
print("Man Two:", cal_mt)
print( "Inconclusive:", cal_unconclusive)
print( "Percentage:", (cal_mo/(cal_mo + cal_mt + cal_unconclusive)) * 100)
a = (cal_mo/(cal_mo + cal_mt + cal_unconclusive)) * 100
# Testing a random image from the test set b
cal_mo = 0
cal_mt = 0
cal_unconclusive = 0
alist = []
for fname in test_fnames_setb:
if fname.startswitch('.'):
continue
file_path = os.path.join(setb_test_dir, fname)
load_file = load_img(file_path, target_size = (150, 150))
load_file = (np.expand_dims(load_file, 0))
pred_img = model.predict(load_file)
if(pred_img[0]<0.5):
cal_mo+=1
elif(pred_img[0]>0.5):
cal_mt+=1
else:
print(pred_img[0], "\n")
cal_unconclusive+=1
alist.append(file_path)
print(alist)
print("Identified as: \n")
print("Man One:", cal_mo)
print("Man Two:", cal_mt)
print( "Inconclusive:", cal_unconclusive)
print( "Percentage:", (cal_mt/(cal_mo + cal_mt + cal_unconclusive)) * 100)
b = (cal_mt/(cal_mo + cal_mt + cal_unconclusive)) * 100
avg = (a+b)/2
print("Average Percentage:", avg)
You have a typo - layers.Dropout, not layers.Droupout