Hi everyone, i'm the new on keras and i'm in trouble. I've found to how to combine to cnn model but i cant give datasets to models. is the anyone who can help me ?
Here is my code :
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 16 08:20:24 2019
#author: TECHFEA
"""
from keras import applications
from keras.layers import GlobalAveragePooling2D, Dense,Flatten,Conv2D,MaxPooling2D,Add,Input
from keras.layers import Concatenate
from keras.preprocessing.image import ImageDataGenerator
from sklearn.metrics import log_loss
from keras.models import Model
from keras.optimizers import SGD
from sklearn.metrics import roc_curve, auc
from sklearn.metrics import classification_report,confusion_matrix,accuracy_score
import matplotlib.pyplot as plt
from keras.models import load_model
from scipy import interp
from itertools import cycle
from glob import glob
from keras.optimizers import Adam
train_path ="C:/Users/Monster/Desktop/furkan_ecevit/Datasets/fer_orj/train/"
validation_path ="C:/Users/Monster/Desktop/furkan_ecevit/Datasets/fer_orj/validation/"
train_path2="C:/Users/Monster/Desktop/furkan_ecevit/Datasets/fer_lbp/train_lbp/"
validation_path2="C:/Users/Monster/Desktop/furkan_ecevit/Datasets/fer_lbp/validation_lbp/"
className = glob(train_path + "*/")
numberOfClass = len(className)
batch_size=32
train_datagen = ImageDataGenerator(rescale= 1./255,
vertical_flip=False,
horizontal_flip=True)
validation_datagen = ImageDataGenerator(rescale = 1./255)
train_generator = train_datagen.flow_from_directory(train_path, target_size =(72,72),
batch_size = batch_size,
color_mode = "rgb",
class_mode = "categorical")
validation_generator = validation_datagen.flow_from_directory(validation_path, target_size =(72,72),
batch_size = 10,
color_mode = "rgb",
class_mode = "categorical")
train_generator2 = train_datagen.flow_from_directory(train_path2, target_size =(72,72),
batch_size = batch_size,
color_mode = "rgb",
class_mode = "categorical")
validation_generator2 = validation_datagen.flow_from_directory(validation_path2, target_size =(72,72),
batch_size = 10,
color_mode = "rgb",
class_mode = "categorical")
base_model1 = applications.VGG16(weights='imagenet', include_top=False, input_shape=(72,72,3))
base_model1.summary()
x1=base_model1.output
x1=Flatten()(x1)
x1=Dense(100,activation='relu')(x1)
model1 = Model(inputs=base_model1.input, outputs=x1)
model1.summary()
input_shallow = Input(shape = (72,72,3))
conv1 = Conv2D(16, (3,3), activation = 'relu', padding = "same")(input_shallow)
pool1 = MaxPooling2D(pool_size = (2,2), strides = 2)(conv1)
conv2 = Conv2D(32, (3,3), activation = 'relu', padding = "same")(pool1)
pool2 = MaxPooling2D(pool_size = (2,2), strides = 2)(conv2)
flat1=Flatten()(pool2)
dense_1=Dense(100,activation='relu')(flat1)
model2=Model(inputs=input_shallow,outputs=dense_1)
model2.summary()
mergedOut = Add()([model1.output,model2.output])
out=Dense(2048, activation='relu')(mergedOut)
out = Dense(7, activation='softmax', name='predictions')(out)
model = Model(inputs=[model1.input,model2.input], outputs=out)
model.summary()
opt = Adam(lr=0.001, beta_1=0.9, beta_2=0.999)
model.compile(optimizer=opt, loss="categorical_crossentropy", metrics=["accuracy"])
hist = model.fit_generator(
generator=(train_generator,train_generator2),
steps_per_epoch = 10,
epochs=16,
validation_data =(validation_generator,validation_generator2),
validation_steps = 2,
shuffle=True)
Here is the what i want to do with image :
Here is the what i got error :
'DirectoryIterator' object has no attribute 'ndim'
Related
I have a dataset of around 3500 images, divided into 3 folders, that I loaded into Google Collab from my google drive, and I'm trying to make them into an ML algorithm using keras and tensorflow with the following code:
train = tf.keras.preprocessing.image_dataset_from_directory(
path,
labels = "inferred",
label_mode = "categorical",
color_mode = "rgb",
batch_size = 32,
image_size = (140,140),
seed = 1234,
subset = "training",
validation_split = 0.2
)
shape = tf.TensorShape([None,140,140,3])
print(shape)
valid = tf.keras.preprocessing.image_dataset_from_directory(
path,
labels = "inferred",
label_mode = "categorical",
color_mode = "rgb",
batch_size = 32,
image_size = (140,140),
seed = 1234,
subset = "validation",
validation_split = 0.2
)
print(train)
print(valid)
print(tf.keras.utils.image_dataset_from_directory(path, labels='inferred'))
from keras.models import Sequential
from keras.layers import Dense
from tensorflow import keras
#from tensorflow.keras import layers
model = Sequential()
model.add(Dense(256, activation = "softmax", input_shape = (140,140,3)))
model.add(Dense(64, activation = "softmax"))
model.add(Dense(32, activation = "softmax"))
#model.add(Dense(3, activation = "softmax"))
model.compile(loss = 'categorical_crossentropy', optimizer = 'adam', metrics = ['accuracy'])
print(model.summary())
keras.utils.plot_model(model, "my_first_model_with_shape_info.png", show_shapes=True)
#print(tf.keras.utils.plot_model(model))
model.fit(train, validation_data = valid, epochs = 50, batch_size = 32)
However when I run the code I get this error:
ValueError: Shapes (None, 3) and (None, 140, 140, 32) are incompatible
I tried fixing this by adding the (None,140,140,3) shape to the "train" variable but I'm not sure how to do that, so does anyone know how to make the shape of my "train" and "valid" variables compatible with the model I made? Thank you.
For reference this is the train variable:
train = tf.keras.preprocessing.image_dataset_from_directory(
path,
labels = "inferred",
label_mode = "categorical",
color_mode = "rgb",
batch_size = 32,
image_size = (140,140),
seed = 1234,
subset = "training",
validation_split = 0.2
)
when I print "train" out however, I get this
<BatchDataset element_spec=(TensorSpec(shape=(None, 140, 140, 3), dtype=tf.float32, name=None), TensorSpec(shape=(None, 3), dtype=tf.float32, name=None))>
So can someone also explain what a BatchDataset element is, and how do I edit its shape in the first place? thanks.
No neurons in the last layer should be same as the number of classes you want to classify (it should be 3 if you are trying to classify 3 types of flowers not 32) . Added a few convolution layers and pooling layers to improve the performance too.
import tensorflow as tf
from tensorflow.keras import layers
import pathlib
dataset_url = "https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz"
data_dir = tf.keras.utils.get_file('flower_photos', origin=dataset_url, untar=True)
path = pathlib.Path(data_dir)
train = tf.keras.preprocessing.image_dataset_from_directory(
path,
labels = "inferred",
label_mode = "categorical",
color_mode = "rgb",
batch_size = 32,
image_size = (140,140),
seed = 1234,
subset = "training",
validation_split = 0.2
)
shape = tf.TensorShape([None,140,140,3])
print(shape)
valid = tf.keras.preprocessing.image_dataset_from_directory(
path,
labels = "inferred",
label_mode = "categorical",
color_mode = "rgb",
batch_size = 32,
image_size = (140,140),
seed = 1234,
subset = "validation",
validation_split = 0.2
)
classnames = train.class_names
print(classnames)
print(train)
print(valid)
num_classes = len(classnames)
print(tf.keras.utils.image_dataset_from_directory(path, labels='inferred'))
from keras.models import Sequential
from keras.layers import Dense
from tensorflow import keras
#from tensorflow.keras import layers
model = Sequential([
layers.Rescaling(1./255, input_shape=(140,140, 3)),
layers.Conv2D(16, 3, padding='same', activation='relu'),
layers.MaxPooling2D(),
layers.Conv2D(32, 3, padding='same', activation='relu'),
layers.MaxPooling2D(),
layers.Conv2D(64, 3, padding='same', activation='relu'),
layers.MaxPooling2D(),
layers.Flatten(),
layers.Dense(128, activation='relu'),
layers.Dense(num_classes) // in your case layers.Dense(3)
])
model.compile(loss = tf.keras.losses.CategoricalCrossentropy(),optimizer = 'adam', metrics = ['accuracy'])
print(model.summary())
keras.utils.plot_model(model, "my_first_model_with_shape_info.png", show_shapes=True)
#print(tf.keras.utils.plot_model(model))
model.fit(train, validation_data = valid, epochs = 50, batch_size = 32)
Currently, I am looking for ways to optimize my model (image classification for simple triangles and squares) and I've been stuck on understanding what these file paths are supposed to be referencing. Is it a path on your computer, or is it something else?
checkpoint_filepath = 'C:\tempfile'
model_checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(
filepath=checkpoint_filepath,
save_weights_only=True,
monitor='val_accuracy',
mode='max',
save_best_only=True)
model_fit = model.fit(train_dataset,
steps_per_epoch = 5,
epochs = 30,
validation_data= validation_dataset,
callbacks=[reduce_lr, model_checkpoint_callback]
)
I've had the same issues also with Tensorboard. The code runs just fine when with the reduce_lr callback, but has issues when I add the modelCheckpoint callback.
Here is the rest of my code for reference:
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.preprocessing import image
from tensorflow.keras.optimizers import RMSprop
import tensorflow as tf
import matplotlib.pyplot as plt
import cv2
import os
import numpy as np
from keras.callbacks import ReduceLROnPlateau
from tensorflow.keras import layers
from tensorflow.keras import regularizers
from tensorflow.keras.callbacks import ModelCheckpoint
img = image.load_img('C:/Input_DataTS/Triangles/triangle.jpg')
plt.imshow(img)
cv2.imread('C:\Input_DataTS')
train = ImageDataGenerator(rescale= 1/255)
validation = ImageDataGenerator(rescale= 1/255)
train_dataset = train.flow_from_directory('C:\Input_DataTS',
target_size= (200,200),
batch_size= 3,
class_mode = 'binary')
validation_dataset = train.flow_from_directory('C:\Validiation_DataTS',
target_size= (200,200),
batch_size= 3,
class_mode = 'binary')
model = tf.keras.models.Sequential([ tf.keras.layers.Conv2D(16,(3,3),activation = 'relu', input_shape =(200, 200, 3)),
tf.keras.layers.MaxPool2D(2,2),
#
tf.keras.layers.Conv2D(32,(3,3),activation = 'relu'),
tf.keras.layers.MaxPool2D(2,2),
#
tf.keras.layers.Conv2D(64,(3,3),activation = 'relu'),
tf.keras.layers.MaxPool2D(2,2),
#
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(32,activation = 'relu'),
tf.keras.layers.Dense(1,activation= 'sigmoid')
])
model.compile(loss= 'binary_crossentropy',
optimizer = RMSprop(lr=0.001),
metrics =['accuracy'])
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.2,
patience=5, min_lr=0.001)
checkpoint_filepath = 'C:\tempfile'
model_checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(
filepath=checkpoint_filepath,
save_weights_only=True,
monitor='val_accuracy',
mode='max',
save_best_only=True)
model_fit = model.fit(train_dataset,
steps_per_epoch = 5,
epochs = 30,
validation_data= validation_dataset,
callbacks=[reduce_lr, model_checkpoint_callback]
)
dir_path = 'C:/Testing_DataTS'
for i in os.listdir(dir_path ):
img = image.load_img(dir_path + '//' + i, target_size=(200,200))
plt.imshow(img)
plt.show()
X = image.img_to_array(img)
X = np.expand_dims(X,axis =0)
images = np.vstack([X])
val = model.predict(images)
if val == 0:
print("square")
else:
print("triangle")
Although this isn't needed for this model, I would like to learn how to do it properly for future cases. If anyone can help me with this issue, I'd greatly appreciate it. Thank you for your time!
import tensorflow as tf
from tensorflow.keras.preprocessing.image import ImageDataGenerator
Set the seed
tf.random.set_seed(42)
Preprocess data(get all of the pixel values between 0 & 1, also calles scaling / normalization)
train_datagen = ImageDataGenerator(rescale = 1./255)
valid_datagen = ImageDataGenerator(rescale = 1./255)
Setup path to our data directories
train_dir = '/content/pizza_steak/train'
test_dir = 'pizza_steak/test'
Import data from directories and turn it into batches
train_data = train_datagen.flow_from_directory(directory = train_dir,
batch_size = 32,
target_size = (224, 224),
class_mode = 'binary',
seed = 42)
valid_data = valid_datagen.flow_from_directory(directory = test_dir,
batch_size = 32,
target_size = (224, 224),
class_mode = 'binary',
seed = 42)
Build a CNN model (same as the Tiny VGG on the CNN explain website)
model_1 = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(filters = 10,
kernel_size = 3,
activation = 'relu',
input_shape = (244, 244, 3)),
tf.keras.layers.Conv2D(10, 3, activation = 'relu'),
tf.keras.layers.MaxPool2D(pool_size = 2,
padding = 'valid'),
tf.keras.layers.Conv2D(10, 3, activation = 'relu'),
tf.keras.layers.Conv2D(10, 3, activation = 'relu'),
tf.keras.layers.MaxPool2D(2),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(1, activation = 'sigmoid')
])
Compile our CNN
model_1.compile(
loss = 'binary_crossentropy',
optimizer = tf.keras.optimizers.Adam(),
metrics = ['accuracy']
)
Fit the model
history_1 = model_1.fit(train_data,
epochs = 5,
steps_per_epoch = len(train_data),
validation_data = valid_data,
validation_steps = len(valid_data))
You either need to change your input_shape to (224, 224, 3) or the target_size to (244, 244, 3). It will not work with different shapes. Here is a working example:
import tensorflow as tf
import matplotlib.pyplot as plt
BATCH_SIZE = 32
flowers = tf.keras.utils.get_file(
'flower_photos',
'https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz',
untar=True)
train_datagen = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1./255)
train_data = train_datagen.flow_from_directory(directory = flowers,
batch_size = 32,
target_size = (224, 224),
class_mode = 'sparse',
seed = 42)
model_1 = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(filters = 10,
kernel_size = 3,
activation = 'relu',
input_shape = (224, 224, 3)),
tf.keras.layers.Conv2D(10, 3, activation = 'relu'),
tf.keras.layers.MaxPool2D(pool_size = 2,
padding = 'valid'),
tf.keras.layers.Conv2D(10, 3, activation = 'relu'),
tf.keras.layers.Conv2D(10, 3, activation = 'relu'),
tf.keras.layers.MaxPool2D(2),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(5, activation = 'softmax')
])
model_1.compile(
loss = 'sparse_categorical_crossentropy',
optimizer = tf.keras.optimizers.Adam(),
metrics = ['accuracy']
)
history_1 = model_1.fit(train_data,steps_per_epoch = len(train_data),
epochs = 5)
import numpy as np
import pandas as pd
from pathlib import Path
import os.path
from sklearn.model_selection import train_test_split
import tensorflow as tf
from sklearn.metrics import r2_score
from keras.applications.efficientnet import EfficientNetB3
import gc
from keras.models import Sequential
from keras import layers, models
from keras import Input
from keras.models import Model
from keras.preprocessing.image import ImageDataGenerator
from keras import optimizers, initializers, regularizers, metrics
from keras.callbacks import ModelCheckpoint
import os
from glob import glob
from PIL import Image
import matplotlib.pyplot as plt
import numpy as np
from tensorflow.keras import optimizers
from keras.layers import Conv2D,MaxPool2D,GlobalAveragePooling2D,AveragePooling2D
from keras.layers import Dense,Dropout,Activation,Flatten
import sys
# Repository source: https://github.com/qubvel/efficientnet
sys.path.append(os.path.abspath('../input/efficientnet/efficientnet-master/efficientnet-master/'))
from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau
image_dir = Path('/content/drive/MyDrive/processed')
filepaths = pd.Series(list(image_dir.glob(r'**/*.jpg')), name='Filepath').astype(str)
TS = pd.Series(sorted([int(l.split('TS_')[1].split('/pre')[0]) for l in filepaths]),name='TS').astype(np.int)
images = pd.concat([filepaths, TS], axis=1).sample(frac=1.0,
random_state=1).reset_index(drop=True)
image_df = images.sample(2020, random_state=1).reset_index(drop=True)
train_df, test_df = train_test_split(image_df, train_size=0.7, shuffle=True, random_state=1)
train_generator = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1./255,
validation_split=0.2
)
test_generator = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1./255
)
train_input = train_generator.flow_from_dataframe(
dataframe=train_df,
x_col='Filepath',
y_col='TS',
target_size=(256, 256),
color_mode='grayscale',
class_mode='raw',
batch_size=1,
shuffle=True,
seed=42,
subset='training'
)
val_input = train_generator.flow_from_dataframe(
dataframe=train_df,
x_col='Filepath',
y_col='TS',
target_size=(256, 256),
color_mode='grayscale',
class_mode='raw',
batch_size=1,
shuffle=True,
seed=42,
subset='validation'
)
test_input = test_generator.flow_from_dataframe(
dataframe=test_df,
x_col='Filepath',
y_col='TS',
target_size=(256, 256),
color_mode='grayscale',
class_mode='raw',
batch_size=1,
shuffle=False
)
inputs = tf.keras.Input(shape=(256, 256, 1))
x = tf.keras.layers.Conv2D(filters=32, kernel_size=(3, 3), activation='relu')(inputs)
x = tf.keras.layers.Conv2D(filters=32, kernel_size=(3, 3), activation='relu')(x)
x = tf.keras.layers.MaxPool2D()(x)
x = tf.keras.layers.Conv2D(filters=64, kernel_size=(3, 3), activation='relu')(x)
x = tf.keras.layers.Conv2D(filters=64, kernel_size=(3, 3), activation='relu')(x)
x = tf.keras.layers.MaxPool2D()(x)
x = tf.keras.layers.Conv2D(filters=128, kernel_size=(3, 3), activation='relu')(x)
x = tf.keras.layers.Conv2D(filters=128, kernel_size=(3, 3), activation='relu')(x)
x = tf.keras.layers.MaxPool2D()(x)
x = tf.keras.layers.Flatten()(x)
x = tf.keras.layers.Dense(128, kernel_initializer='he_normal')(x)
x = tf.keras.layers.Dense(64, kernel_initializer='he_normal')(x)
outputs = tf.keras.layers.Dense(1, activation='linear')(x)
model = tf.keras.Model(inputs=inputs, outputs=outputs)
model.compile(
optimizer='adam',
loss='mae'
)
history = model.fit(
train_input,
validation_data=val_input,
epochs=10,
callbacks=[
tf.keras.callbacks.EarlyStopping(
monitor='val_loss',
patience=5,
restore_best_weights=True
)
]
)
#Results
predicted_TS = np.squeeze(model.predict(test_input))
true_TS = test_input.labels
rmse = np.sqrt(model.evaluate(test_input, verbose=0))
print(" Test RMSE: {:.5f}".format(rmse))
r2 = r2_score(true_TS, predicted_TS)
print("Test R^2 Score: {:.5f}".format(r2))
null_rmse = np.sqrt(np.sum((true_TS - np.mean(true_TS))**2) / len(true_TS))
print("Null/Baseline Model Test RMSE: {:.5f}".format(null_rmse))
Image is alloy microstrure and TS is tensile strength of alloy.
I thought if I put images in this model this can predict scattered prediction values.
I can't understand why prediction result have almost same values.
And how can I reduce RMSE?
This results terrible RMSE
I am very new to deep learning. I am training on anime illustration images and I am receiving the error: logits and labels must be broadcastable: logits_size=[214,2] labels_size=[214,173]
I am sure there are other errors in my code, but I am unsure where to look. I ran model.summary() and noticed
Total params: 12,219,618
Trainable params: 7,080,962
Non-trainable params: 5,138,656
I could really appreciate any help. Thanks.
import tensorflow as tf
from tensorflow.keras.applications.inception_v3 import InceptionV3
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Activation, Dropout, Flatten, Dense
from tensorflow.keras import backend as K
from tensorflow.keras import metrics, optimizers
import matplotlib.pyplot as plt
train_datagen = ImageDataGenerator(
rescale=1. / 255,
rotation_range = 30,
zoom_range = 0.2,
width_shift_range=0.1,
height_shift_range=0.1,
validation_split = 0.15)
test_datagen = ImageDataGenerator(rescale=1. / 255)
train_generator = train_datagen.flow_from_directory(
train_dir,
target_size = (75,75),
batch_size = 214,
class_mode = 'categorical',
subset='training')
#validation_generator = test_datagen.flow_from_directory(
# validation_dir,
# target_size = (75,75),
# batch_size = 37,
# class_mode = 'categorical',
# subset = 'validation')
test_generator = test_datagen.flow_from_directory(
test_dir,
target_size=(75,75),
batch_size = 32,
class_mode = 'categorical')
# Inspect batch
sample_training_images, _ = next(train_generator)
from tensorflow.keras.applications.inception_v3 import InceptionV3
def model_output_for_TL (pre_trained_model, last_output):
x = Flatten()(last_output)
# Dense hidden layer
x = Dense(1024, activation='relu')(x)
x = Dropout(0.5)(x)
# Output neuron.
x = Dense(2, activation='softmax')(x)
model = Model(pre_trained_model.input, x)
return model
pre_trained_model = InceptionV3(input_shape = (75, 75, 3),
include_top = False,
classes=173,
weights = 'imagenet')
for layer in pre_trained_model.layers:
layer.trainable = False
last_layer = pre_trained_model.get_layer('mixed5')
last_output = last_layer.output
model_TL = model_output_for_TL(pre_trained_model, last_output)
model_TL.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])
history_TL = model_TL.fit(
train_generator,
steps_per_epoch=10,
epochs=60,
verbose=2)
#validation_data = validation_generator)
tf.keras.models.save_model(model_TL,'my_model.hdf5')