import numpy as np
import pandas as pd
from pathlib import Path
import os.path
from sklearn.model_selection import train_test_split
import tensorflow as tf
from sklearn.metrics import r2_score
from keras.applications.efficientnet import EfficientNetB3
import gc
from keras.models import Sequential
from keras import layers, models
from keras import Input
from keras.models import Model
from keras.preprocessing.image import ImageDataGenerator
from keras import optimizers, initializers, regularizers, metrics
from keras.callbacks import ModelCheckpoint
import os
from glob import glob
from PIL import Image
import matplotlib.pyplot as plt
import numpy as np
from tensorflow.keras import optimizers
from keras.layers import Conv2D,MaxPool2D,GlobalAveragePooling2D,AveragePooling2D
from keras.layers import Dense,Dropout,Activation,Flatten
import sys
# Repository source: https://github.com/qubvel/efficientnet
sys.path.append(os.path.abspath('../input/efficientnet/efficientnet-master/efficientnet-master/'))
from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau
image_dir = Path('/content/drive/MyDrive/processed')
filepaths = pd.Series(list(image_dir.glob(r'**/*.jpg')), name='Filepath').astype(str)
TS = pd.Series(sorted([int(l.split('TS_')[1].split('/pre')[0]) for l in filepaths]),name='TS').astype(np.int)
images = pd.concat([filepaths, TS], axis=1).sample(frac=1.0,
random_state=1).reset_index(drop=True)
image_df = images.sample(2020, random_state=1).reset_index(drop=True)
train_df, test_df = train_test_split(image_df, train_size=0.7, shuffle=True, random_state=1)
train_generator = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1./255,
validation_split=0.2
)
test_generator = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1./255
)
train_input = train_generator.flow_from_dataframe(
dataframe=train_df,
x_col='Filepath',
y_col='TS',
target_size=(256, 256),
color_mode='grayscale',
class_mode='raw',
batch_size=1,
shuffle=True,
seed=42,
subset='training'
)
val_input = train_generator.flow_from_dataframe(
dataframe=train_df,
x_col='Filepath',
y_col='TS',
target_size=(256, 256),
color_mode='grayscale',
class_mode='raw',
batch_size=1,
shuffle=True,
seed=42,
subset='validation'
)
test_input = test_generator.flow_from_dataframe(
dataframe=test_df,
x_col='Filepath',
y_col='TS',
target_size=(256, 256),
color_mode='grayscale',
class_mode='raw',
batch_size=1,
shuffle=False
)
inputs = tf.keras.Input(shape=(256, 256, 1))
x = tf.keras.layers.Conv2D(filters=32, kernel_size=(3, 3), activation='relu')(inputs)
x = tf.keras.layers.Conv2D(filters=32, kernel_size=(3, 3), activation='relu')(x)
x = tf.keras.layers.MaxPool2D()(x)
x = tf.keras.layers.Conv2D(filters=64, kernel_size=(3, 3), activation='relu')(x)
x = tf.keras.layers.Conv2D(filters=64, kernel_size=(3, 3), activation='relu')(x)
x = tf.keras.layers.MaxPool2D()(x)
x = tf.keras.layers.Conv2D(filters=128, kernel_size=(3, 3), activation='relu')(x)
x = tf.keras.layers.Conv2D(filters=128, kernel_size=(3, 3), activation='relu')(x)
x = tf.keras.layers.MaxPool2D()(x)
x = tf.keras.layers.Flatten()(x)
x = tf.keras.layers.Dense(128, kernel_initializer='he_normal')(x)
x = tf.keras.layers.Dense(64, kernel_initializer='he_normal')(x)
outputs = tf.keras.layers.Dense(1, activation='linear')(x)
model = tf.keras.Model(inputs=inputs, outputs=outputs)
model.compile(
optimizer='adam',
loss='mae'
)
history = model.fit(
train_input,
validation_data=val_input,
epochs=10,
callbacks=[
tf.keras.callbacks.EarlyStopping(
monitor='val_loss',
patience=5,
restore_best_weights=True
)
]
)
#Results
predicted_TS = np.squeeze(model.predict(test_input))
true_TS = test_input.labels
rmse = np.sqrt(model.evaluate(test_input, verbose=0))
print(" Test RMSE: {:.5f}".format(rmse))
r2 = r2_score(true_TS, predicted_TS)
print("Test R^2 Score: {:.5f}".format(r2))
null_rmse = np.sqrt(np.sum((true_TS - np.mean(true_TS))**2) / len(true_TS))
print("Null/Baseline Model Test RMSE: {:.5f}".format(null_rmse))
Image is alloy microstrure and TS is tensile strength of alloy.
I thought if I put images in this model this can predict scattered prediction values.
I can't understand why prediction result have almost same values.
And how can I reduce RMSE?
This results terrible RMSE
Related
The code :
from google.colab import drive
import tensorflow as tf
from tensorflow.python.keras.models import Sequential
from tensorflow.python.keras.layers import Dense, Conv2D, MaxPool2D, Flatten
from tensorflow.python.keras.optimizer_v1 import Adam
import numpy as np
import cv2
import matplotlib.pyplot as plt
from tensorflow.python.keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau
device_list = tf.test.gpu_device_name()
if device_list != '/device:GPU:0':
raise SystemError('GPU device not found')
print('Found GPU at: {}'.format(device_list))
datagen_train = tf.keras.preprocessing.image.ImageDataGenerator()
datagen_val = tf.keras.preprocessing.image.ImageDataGenerator()
datagen_test = tf.keras.preprocessing.image.ImageDataGenerator()
size = 128
batch_size=20
tf.compat.v1.disable_eager_execution()
train_set = datagen_train.flow_from_directory("drive/MyDrive/train",
target_size = (size,size),
color_mode = "grayscale",
batch_size = batch_size,
class_mode='categorical',
shuffle=True)
val_set = datagen_val.flow_from_directory("drive/MyDrive/valid",
target_size = (size,size),
color_mode = "grayscale",
batch_size = batch_size,
class_mode='categorical',
shuffle=True)
test_set = datagen_train.flow_from_directory("drive/MyDrive/test",
target_size = (size,size),
color_mode = "grayscale",
batch_size = batch_size,
class_mode='categorical',
shuffle=True)
imgs,labels = next(test_set)
model = Sequential([
Conv2D(filters=64, kernel_size=(3,3), padding='same', activation='relu', input_shape=(128,128,1)),
MaxPool2D(pool_size =(2,2), strides=2),
Conv2D(filters=128, kernel_size=(3,3), padding='same', activation='relu'),
MaxPool2D(pool_size =(2,2), strides=2),
Conv2D(filters=256, kernel_size=(3,3), padding='same', activation='relu'),
MaxPool2D(pool_size =(2,2), strides=2),
Conv2D(filters=512, kernel_size=(3,3), padding='same', activation='relu'),
MaxPool2D(pool_size =(2,2), strides=2),
Flatten(),
Dense(units=256, activation='relu'),
Dense(units=512, activation='relu'),
Dense(units=2, activation='softmax')
])
checkpoint = ModelCheckpoint("./model.h5", monitor = 'val_acc', verbose=1, save_best_only = True, mode='max')
earlystopping = EarlyStopping(monitor='vall_loss', min_delta=0, patience=3, verbose=1,restore_best_weights= True)
reducelearningrate = ReduceLROnPlateau(monitor='val_loss', factor=0.2,patience=3,verbose=1,min_delta=0.0001)
callbacks_list = [earlystopping,checkpoint,reducelearningrate]
ep = 30
opt = Adam(lr=0.0001)
model.summary()
model.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy'])
model.fit(x=train_set, epochs=ep,steps_per_epoch=601, validation_data = val_set, validation_steps = 209, verbose=1)
model.save('Drowsines_Detector2.h5')
model.evaluate(x = imgs, y = labels, verbose = 2)
The program on first run in google colab takes 1hour and 30 mins in first epoch. Then if gets stuck on the first epoch step 601/601. Then by cancelling it ang rerunning it, it completes the first epoch very fast, like in 15 or 16 secs. Then sometimes it gets stuck on step 600/601. And sometimes gets stuck on step 601/601. But it does not continue to second epoch. How can I fix this.
Currently, I am looking for ways to optimize my model (image classification for simple triangles and squares) and I've been stuck on understanding what these file paths are supposed to be referencing. Is it a path on your computer, or is it something else?
checkpoint_filepath = 'C:\tempfile'
model_checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(
filepath=checkpoint_filepath,
save_weights_only=True,
monitor='val_accuracy',
mode='max',
save_best_only=True)
model_fit = model.fit(train_dataset,
steps_per_epoch = 5,
epochs = 30,
validation_data= validation_dataset,
callbacks=[reduce_lr, model_checkpoint_callback]
)
I've had the same issues also with Tensorboard. The code runs just fine when with the reduce_lr callback, but has issues when I add the modelCheckpoint callback.
Here is the rest of my code for reference:
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.preprocessing import image
from tensorflow.keras.optimizers import RMSprop
import tensorflow as tf
import matplotlib.pyplot as plt
import cv2
import os
import numpy as np
from keras.callbacks import ReduceLROnPlateau
from tensorflow.keras import layers
from tensorflow.keras import regularizers
from tensorflow.keras.callbacks import ModelCheckpoint
img = image.load_img('C:/Input_DataTS/Triangles/triangle.jpg')
plt.imshow(img)
cv2.imread('C:\Input_DataTS')
train = ImageDataGenerator(rescale= 1/255)
validation = ImageDataGenerator(rescale= 1/255)
train_dataset = train.flow_from_directory('C:\Input_DataTS',
target_size= (200,200),
batch_size= 3,
class_mode = 'binary')
validation_dataset = train.flow_from_directory('C:\Validiation_DataTS',
target_size= (200,200),
batch_size= 3,
class_mode = 'binary')
model = tf.keras.models.Sequential([ tf.keras.layers.Conv2D(16,(3,3),activation = 'relu', input_shape =(200, 200, 3)),
tf.keras.layers.MaxPool2D(2,2),
#
tf.keras.layers.Conv2D(32,(3,3),activation = 'relu'),
tf.keras.layers.MaxPool2D(2,2),
#
tf.keras.layers.Conv2D(64,(3,3),activation = 'relu'),
tf.keras.layers.MaxPool2D(2,2),
#
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(32,activation = 'relu'),
tf.keras.layers.Dense(1,activation= 'sigmoid')
])
model.compile(loss= 'binary_crossentropy',
optimizer = RMSprop(lr=0.001),
metrics =['accuracy'])
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.2,
patience=5, min_lr=0.001)
checkpoint_filepath = 'C:\tempfile'
model_checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(
filepath=checkpoint_filepath,
save_weights_only=True,
monitor='val_accuracy',
mode='max',
save_best_only=True)
model_fit = model.fit(train_dataset,
steps_per_epoch = 5,
epochs = 30,
validation_data= validation_dataset,
callbacks=[reduce_lr, model_checkpoint_callback]
)
dir_path = 'C:/Testing_DataTS'
for i in os.listdir(dir_path ):
img = image.load_img(dir_path + '//' + i, target_size=(200,200))
plt.imshow(img)
plt.show()
X = image.img_to_array(img)
X = np.expand_dims(X,axis =0)
images = np.vstack([X])
val = model.predict(images)
if val == 0:
print("square")
else:
print("triangle")
Although this isn't needed for this model, I would like to learn how to do it properly for future cases. If anyone can help me with this issue, I'd greatly appreciate it. Thank you for your time!
I made a simple cat vs dog detector using Keras and Tensorflow. Then I saved the model in .h5 format and the training in checkpoints. Now, I want to load the training and saved model and run it live with OpenCV Videocapture using a camera. How can I do this?
Here is the code I used for training:
import tensorflow as tf
from tensorflow import keras
import numpy as np
import matplotlib.pyplot as plt
import IPython.display as display
from PIL import Image, ImageSequence
import os
import pathlib
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import cv2
import datetime
tf.compat.v1.enable_eager_execution()
epochs = 200
steps_per_epoch = 10
batch_size = 20
IMG_HEIGHT = 200
IMG_WIDTH = 200
train_dir = "Data/Train"
test_dir = "Data/Val"
train_image_generator = ImageDataGenerator(rescale=1. / 255)
test_image_generator = ImageDataGenerator(rescale=1. / 255)
train_data_gen = train_image_generator.flow_from_directory(batch_size=batch_size,
directory=train_dir,
shuffle=True,
target_size=(IMG_HEIGHT, IMG_WIDTH),
class_mode='sparse')
test_data_gen = test_image_generator.flow_from_directory(batch_size=batch_size,
directory=test_dir,
shuffle=True,
target_size=(IMG_HEIGHT, IMG_WIDTH),
class_mode='sparse')
model = Sequential([
Conv2D(265, 3, padding='same', activation='relu', input_shape=(IMG_HEIGHT, IMG_WIDTH ,3)),
MaxPooling2D(),
Conv2D(64, 3, padding='same', activation='relu'),
MaxPooling2D(),
Conv2D(32, 3, padding='same', activation='relu'),
MaxPooling2D(),
Flatten(),
keras.layers.Dense(256, activation="relu"),
keras.layers.Dense(144, activation="relu"),
keras.layers.Dense(80, activation="softmax")
])
model.compile(optimizer='adam',
loss="sparse_categorical_crossentropy",
metrics=['accuracy'])
model.summary()
tf.keras.utils.plot_model(model, to_file="model.png", show_shapes=True, show_layer_names=True, rankdir='TB')
checkpoint_path = "training/cp.ckpt"
checkpoint_dir = os.path.dirname(checkpoint_path)
cp_callback = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_path,
save_weights_only=True,
verbose=1)
os.system("rm -r logs")
log_dir = "logs/fit/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)
history = model.fit(train_data_gen,steps_per_epoch=steps_per_epoch,epochs=epochs,validation_data=test_data_gen,validation_steps=10,callbacks=[cp_callback, tensorboard_callback])
model.load_weights(tf.train.latest_checkpoint(checkpoint_dir))
model.save('model.h5', include_optimizer=True)
test_loss, test_acc = model.evaluate(test_data_gen)
print("Tested Acc: ", test_acc)
print("Tested Acc: ", test_acc*100, "%")
I am using the COCO dataset to do image classification. I want to use a standard USB camera with OpenCV to do Videocapture and stream it live to the trained and exported model for prediction. How can I do this?
You can do something like this:
import cv2
import numpy as np
from PIL import Image
from keras import models
import os
import tensorflow as tf
checkpoint_path = "training_gender/cp.ckpt"
checkpoint_dir = os.path.dirname(checkpoint_path)
model = models.load_model('gender.h5')
model.load_weights(tf.train.latest_checkpoint(checkpoint_dir))
video = cv2.VideoCapture(0)
while True:
_, frame = video.read()
#Convert the captured frame into RGB
im = Image.fromarray(frame, 'RGB')
#Resizing into dimensions you used while training
im = im.resize((109,82))
img_array = np.array(im)
#Expand dimensions to match the 4D Tensor shape.
img_array = np.expand_dims(img_array, axis=0)
#Calling the predict function using keras
prediction = model.predict(img_array)#[0][0]
print(prediction)
#Customize this part to your liking...
if(prediction == 1 or prediction == 0):
print("No Human")
elif(prediction < 0.5 and prediction != 0):
print("Female")
elif(prediction > 0.5 and prediction != 1):
print("Male")
cv2.imshow("Prediction", frame)
key=cv2.waitKey(1)
if key == ord('q'):
break
video.release()
cv2.destroyAllWindows()
I am trying to classify the medical images taken from publicly available datasets. I used transfer learning for this task. Initially, when I had used the following code on the same dataset with VGG, Resnet, Densenet, and Inception, the accuracy was above 85% without fine-tuning (TensorFlow version 1.15.2.) Now, after upgrading the TensorFlow to 2.x, when I try the same code on the same dataset, the accuracy is never crossing 32%. Can anyone please help me rectify the issue? Is it something to do with the TensorFlow version or something else? I have tried varying the learning rate, fine-tuning the model, etc. Is this the issue with batch normalization error in Keras?
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "2"
import sys
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.keras.applications.resnet50 import ResNet50, preprocess_input
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.applications.densenet import DenseNet121
from tensorflow.keras.applications.vgg16 import VGG16
from tensorflow.keras.applications.inception_v3 import InceptionV3
import cv2
import glob
from tensorflow.keras.layers import Input, Dense, Flatten, Conv2D, MaxPooling2D, Dropout
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
import tensorflow.keras as keras
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping
import numpy as np
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
from keras import backend as k
from mlxtend.evaluate import confusion_matrix
from mlxtend.plotting import plot_confusion_matrix
import math
import pandas as pd
from openpyxl import load_workbook
save_dir = '../new_tran/'
def extract_outputs(cnf_matrix, mdl_name):
cnf_matrix_T=np.transpose(cnf_matrix)
recall = np.diag(cnf_matrix) / np.sum(cnf_matrix, axis = 1)
precision = np.diag(cnf_matrix) / np.sum(cnf_matrix, axis = 0)
n_class=3
TP=np.zeros(n_class)
FN=np.zeros(n_class)
FP=np.zeros(n_class)
TN=np.zeros(n_class)
for i in range(n_class):
TP[i]=cnf_matrix[i,i]
FN[i]=np.sum(cnf_matrix[i])-cnf_matrix[i,i]
FP[i]=np.sum(cnf_matrix_T[i])-cnf_matrix[i,i]
TN[i]=np.sum(cnf_matrix)-TP[i]-FP[i]-FN[i]
P=TP+FN
N=FP+TN
classwise_sensitivity=np.true_divide(TP,P)
classwise_specificity=np.true_divide(TN,N)
classwise_accuracy=np.true_divide((TP+TN), (P+N))
OS=np.mean(classwise_sensitivity)
OSp=np.mean(classwise_specificity)
OA=np.sum(np.true_divide(TP,(P+N)))
Px=np.sum(P)
TPx=np.sum(TP)
FPx=np.sum(FP)
TNx=np.sum(TN)
FNx=np.sum(FN)
Nx=np.sum(N)
pox=OA
pex=((Px*(TPx+FPx))+(Nx*(FNx+TNx)))/(math.pow((TPx+TNx+FPx+FNx),2))
kappa_overall=[np.true_divide(( pox-pex ), ( 1-pex )),np.true_divide(( pex-pox ), ( 1-pox ))]
kappa=np.max(kappa_overall)
Rcl=np.mean(recall)
Prcn=np.mean(precision)
#######--------------------- Print all scores
print('classwise_sen',classwise_sensitivity*100)
print('classwise_spec',classwise_specificity*100)
print('classwise_acc',classwise_accuracy*100)
print('overall_sen',OS*100)
print('overall_spec',OSp*100)
print('overall_acc',OA*100)
print('overall recall', Rcl)
print('overall precision',Prcn)
f1score=(2 * Prcn * Rcl) / (Prcn + Rcl)
print('overall F1-score',f1score )
print('Kappa',kappa)
def preProcess(X):
X = X.astype('float32')
# scale from [0,255] to [-1,1]
X = (X - 127.5) / 127.5
return X
train_datagen = ImageDataGenerator(preprocessing_function=preProcess)
test_datagen = ImageDataGenerator(preprocessing_function=preProcess)
IMG_SIZE = 256
batch_size = 16
train_data_dir = '../data/train/'
test_dir = '../data/test/'
val_dir = '../data/val/'
train_generator = train_datagen.flow_from_directory(
train_data_dir,
target_size=(IMG_SIZE , IMG_SIZE),
batch_size=16,
class_mode='sparse')
valid_generator = train_datagen.flow_from_directory(
val_dir,
target_size=(IMG_SIZE , IMG_SIZE),
batch_size=16,
class_mode='sparse')
test_generator = train_datagen.flow_from_directory(
test_dir,
target_size=(IMG_SIZE , IMG_SIZE),
batch_size=16,
class_mode='sparse')
test_im=np.concatenate([test_generator.next()[0] for i in range(test_generator.__len__())])
test_lb=np.concatenate([test_generator.next()[1] for i in range(test_generator.__len__())])
t_x, t_y = next(train_generator)
checkpoint1 = ModelCheckpoint(save_dir+"best_res.hdf5", monitor='val_accuracy', verbose=1, save_best_only=True, mode='max')
checkpoint2 = EarlyStopping(monitor='val_loss', patience=10, verbose=0, mode='min')
callbacks_list1 = [checkpoint1, checkpoint2]
def new_model():
img_in = Input(t_x.shape[1:])
model = DenseNet121(include_top= False ,
layers=tf.keras.layers,
weights='imagenet',
input_tensor= img_in,
input_shape= t_x.shape[1:],
pooling ='avg')
x = model.output
predictions = Dense(3, activation="softmax", name="predictions")(x)
model = Model(inputs=img_in, outputs=predictions)
return model
model1 = new_model()
opt = Adam(lr=3E-4)
model1.compile(optimizer = opt, loss = 'sparse_categorical_crossentropy',metrics = ['accuracy'])
history1 = model1.fit(train_generator,
validation_data = valid_generator,
epochs = 200,
callbacks=callbacks_list1)
model1.load_weights(save_dir+'best_res.hdf5')
model1.compile(optimizer = opt, loss = 'sparse_categorical_crossentropy',metrics = ['accuracy'])
y_pred1 = model1.predict(test_im)
pred_class1=np.argmax(y_pred1,axis=1)
print('accuracy = ',accuracy_score(pred_class1,test_lb))
cm = confusion_matrix(y_target=test_lb,y_predicted=pred_class1, binary=False)
print(cm)
fig, ax = plot_confusion_matrix(conf_mat=cm)
plt.gcf().savefig(save_dir+"resnet.png", dpi=144)
plt.close()
extract_outputs(cm, 'Resnet')
Here are some of the screenshots of the output from tensorflow 2.x
Basically the flow_from_directory by default shuffle the data and you didn't change it.
Just add shuffle=False to your test_generator should be enough.
Like
test_generator = train_datagen.flow_from_directory(
test_dir,
target_size=(IMG_SIZE, IMG_SIZE),
batch_size=16,
class_mode='sparse',
shuffle=False)
Or if you really want to have it shuffled then test_im and test_lb have to be in the same order. For example
test_im = []
test_lb = []
for im, lb in test_generator:
test_im.append(im)
test_lb.append(lb)
test_im = np.array(test_im)
test_lb = np.array(test_lb)
Hi everyone, i'm the new on keras and i'm in trouble. I've found to how to combine to cnn model but i cant give datasets to models. is the anyone who can help me ?
Here is my code :
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 16 08:20:24 2019
#author: TECHFEA
"""
from keras import applications
from keras.layers import GlobalAveragePooling2D, Dense,Flatten,Conv2D,MaxPooling2D,Add,Input
from keras.layers import Concatenate
from keras.preprocessing.image import ImageDataGenerator
from sklearn.metrics import log_loss
from keras.models import Model
from keras.optimizers import SGD
from sklearn.metrics import roc_curve, auc
from sklearn.metrics import classification_report,confusion_matrix,accuracy_score
import matplotlib.pyplot as plt
from keras.models import load_model
from scipy import interp
from itertools import cycle
from glob import glob
from keras.optimizers import Adam
train_path ="C:/Users/Monster/Desktop/furkan_ecevit/Datasets/fer_orj/train/"
validation_path ="C:/Users/Monster/Desktop/furkan_ecevit/Datasets/fer_orj/validation/"
train_path2="C:/Users/Monster/Desktop/furkan_ecevit/Datasets/fer_lbp/train_lbp/"
validation_path2="C:/Users/Monster/Desktop/furkan_ecevit/Datasets/fer_lbp/validation_lbp/"
className = glob(train_path + "*/")
numberOfClass = len(className)
batch_size=32
train_datagen = ImageDataGenerator(rescale= 1./255,
vertical_flip=False,
horizontal_flip=True)
validation_datagen = ImageDataGenerator(rescale = 1./255)
train_generator = train_datagen.flow_from_directory(train_path, target_size =(72,72),
batch_size = batch_size,
color_mode = "rgb",
class_mode = "categorical")
validation_generator = validation_datagen.flow_from_directory(validation_path, target_size =(72,72),
batch_size = 10,
color_mode = "rgb",
class_mode = "categorical")
train_generator2 = train_datagen.flow_from_directory(train_path2, target_size =(72,72),
batch_size = batch_size,
color_mode = "rgb",
class_mode = "categorical")
validation_generator2 = validation_datagen.flow_from_directory(validation_path2, target_size =(72,72),
batch_size = 10,
color_mode = "rgb",
class_mode = "categorical")
base_model1 = applications.VGG16(weights='imagenet', include_top=False, input_shape=(72,72,3))
base_model1.summary()
x1=base_model1.output
x1=Flatten()(x1)
x1=Dense(100,activation='relu')(x1)
model1 = Model(inputs=base_model1.input, outputs=x1)
model1.summary()
input_shallow = Input(shape = (72,72,3))
conv1 = Conv2D(16, (3,3), activation = 'relu', padding = "same")(input_shallow)
pool1 = MaxPooling2D(pool_size = (2,2), strides = 2)(conv1)
conv2 = Conv2D(32, (3,3), activation = 'relu', padding = "same")(pool1)
pool2 = MaxPooling2D(pool_size = (2,2), strides = 2)(conv2)
flat1=Flatten()(pool2)
dense_1=Dense(100,activation='relu')(flat1)
model2=Model(inputs=input_shallow,outputs=dense_1)
model2.summary()
mergedOut = Add()([model1.output,model2.output])
out=Dense(2048, activation='relu')(mergedOut)
out = Dense(7, activation='softmax', name='predictions')(out)
model = Model(inputs=[model1.input,model2.input], outputs=out)
model.summary()
opt = Adam(lr=0.001, beta_1=0.9, beta_2=0.999)
model.compile(optimizer=opt, loss="categorical_crossentropy", metrics=["accuracy"])
hist = model.fit_generator(
generator=(train_generator,train_generator2),
steps_per_epoch = 10,
epochs=16,
validation_data =(validation_generator,validation_generator2),
validation_steps = 2,
shuffle=True)
Here is the what i want to do with image :
Here is the what i got error :
'DirectoryIterator' object has no attribute 'ndim'