i'm doing this kaggle contest where i have to classify this x-ray in 3 category bacteria,virus or normal. Problem is that my accuracy is really low like 25% and loss is stuck at 0. I use a pretrained nn using weight that come from a dataset of xray chest images. This nn use keras.losses.CategoricalCrossentropy as loss function and keras.metrics.Accuracy() for accuracy
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
TRAIN_DIR = 'D:/tf/archiveBilanciato/chest_xray/train/PNEUMONIA'
TEST_DIR = 'D:/tf/archiveBilanciato/chest_xray/test'
IMG_SIZE = 224 #224 รจ quella migliore
image_size = (IMG_SIZE, IMG_SIZE)
batch_size = 32
LR = 1e-3
import os
nt = 0
for folder_name in ("bacteria", "normal","virus"):
folder_path = os.path.join("D:/tf/NeoArchiveBilanciato/chest_xray", folder_name)
for fname in os.listdir(folder_path):
fpath = os.path.join(folder_path, fname)
nt += 1
print("Totale immagini: %d" % nt)
train_ds = tf.keras.preprocessing.image_dataset_from_directory(
"D:/tf/NeoArchiveBilanciato/chest_xray",
validation_split=0.2,
subset="training",
seed=1337,
color_mode='rgb',
image_size=image_size,
batch_size=batch_size,
)
val_ds = tf.keras.preprocessing.image_dataset_from_directory(
"D:/tf/NeoArchiveBilanciato/chest_xray",
validation_split=0.2,
subset="validation",
seed=1337,
color_mode='rgb',
image_size=image_size,
batch_size=batch_size,
)
import tensorflow as tf
import numpy as np
from tensorflow import keras
from tensorflow.keras.applications import DenseNet121
from keras.layers import GlobalAveragePooling2D,Dense
def pre_model():
base_model = tf.keras.applications.DenseNet121(
weights='imagenet', include_top=False, input_shape=(224, 224, 3))
x = base_model.output
x = GlobalAveragePooling2D()(x)
predictions = Dense(14, activation="softmax")(x)
pre_model = keras.Model(inputs=base_model.input, outputs=predictions)
return pre_model
base_model = pre_model()
base_model.load_weights("D:/tf/nih_pretrained_chest_model.h5")
#print(base_model.summary())
from keras.layers import Input
#from tensorflow.keras.layers import Input
from kerassurgeon.operations import delete_layer, insert_layer
from keras.models import load_model
new_input = Input(shape=(IMG_SIZE, IMG_SIZE, 3), name='image_input')
model_imp = base_model
model_imp = Dense(3, activation='softmax')(model_imp.layers[-2].output)
base_model.trainable = False
mio_classificatore = Dense(1, activation='softmax')(base_model.layers[-2].output)
nuovo_model = keras.Model(inputs=base_model.input, outputs=mio_classificatore)
#print(nuovo_model.summary())
train = train_ds
val = val_ds
nuovo_model.compile(optimizer=keras.optimizers.Adam(),
loss=keras.losses.CategoricalCrossentropy(),
metrics=[keras.metrics.Accuracy()])
nuovo_model.fit(train,batch_size=32, epochs=14, validation_data=val)
how can I solve this problem?
There are 3 categories to predict, so the last layer in your model should contain 3 neurons(1 for each class), not 1 neuron
Try to change
mio_classificatore = Dense(1, activation='softmax')(base_model.layers[-2].output)
to
mio_classificatore = Dense(3, activation='softmax')(base_model.layers[-2].output)
Related
Currently, I am looking for ways to optimize my model (image classification for simple triangles and squares) and I've been stuck on understanding what these file paths are supposed to be referencing. Is it a path on your computer, or is it something else?
checkpoint_filepath = 'C:\tempfile'
model_checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(
filepath=checkpoint_filepath,
save_weights_only=True,
monitor='val_accuracy',
mode='max',
save_best_only=True)
model_fit = model.fit(train_dataset,
steps_per_epoch = 5,
epochs = 30,
validation_data= validation_dataset,
callbacks=[reduce_lr, model_checkpoint_callback]
)
I've had the same issues also with Tensorboard. The code runs just fine when with the reduce_lr callback, but has issues when I add the modelCheckpoint callback.
Here is the rest of my code for reference:
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.preprocessing import image
from tensorflow.keras.optimizers import RMSprop
import tensorflow as tf
import matplotlib.pyplot as plt
import cv2
import os
import numpy as np
from keras.callbacks import ReduceLROnPlateau
from tensorflow.keras import layers
from tensorflow.keras import regularizers
from tensorflow.keras.callbacks import ModelCheckpoint
img = image.load_img('C:/Input_DataTS/Triangles/triangle.jpg')
plt.imshow(img)
cv2.imread('C:\Input_DataTS')
train = ImageDataGenerator(rescale= 1/255)
validation = ImageDataGenerator(rescale= 1/255)
train_dataset = train.flow_from_directory('C:\Input_DataTS',
target_size= (200,200),
batch_size= 3,
class_mode = 'binary')
validation_dataset = train.flow_from_directory('C:\Validiation_DataTS',
target_size= (200,200),
batch_size= 3,
class_mode = 'binary')
model = tf.keras.models.Sequential([ tf.keras.layers.Conv2D(16,(3,3),activation = 'relu', input_shape =(200, 200, 3)),
tf.keras.layers.MaxPool2D(2,2),
#
tf.keras.layers.Conv2D(32,(3,3),activation = 'relu'),
tf.keras.layers.MaxPool2D(2,2),
#
tf.keras.layers.Conv2D(64,(3,3),activation = 'relu'),
tf.keras.layers.MaxPool2D(2,2),
#
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(32,activation = 'relu'),
tf.keras.layers.Dense(1,activation= 'sigmoid')
])
model.compile(loss= 'binary_crossentropy',
optimizer = RMSprop(lr=0.001),
metrics =['accuracy'])
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.2,
patience=5, min_lr=0.001)
checkpoint_filepath = 'C:\tempfile'
model_checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(
filepath=checkpoint_filepath,
save_weights_only=True,
monitor='val_accuracy',
mode='max',
save_best_only=True)
model_fit = model.fit(train_dataset,
steps_per_epoch = 5,
epochs = 30,
validation_data= validation_dataset,
callbacks=[reduce_lr, model_checkpoint_callback]
)
dir_path = 'C:/Testing_DataTS'
for i in os.listdir(dir_path ):
img = image.load_img(dir_path + '//' + i, target_size=(200,200))
plt.imshow(img)
plt.show()
X = image.img_to_array(img)
X = np.expand_dims(X,axis =0)
images = np.vstack([X])
val = model.predict(images)
if val == 0:
print("square")
else:
print("triangle")
Although this isn't needed for this model, I would like to learn how to do it properly for future cases. If anyone can help me with this issue, I'd greatly appreciate it. Thank you for your time!
I made a simple cat vs dog detector using Keras and Tensorflow. Then I saved the model in .h5 format and the training in checkpoints. Now, I want to load the training and saved model and run it live with OpenCV Videocapture using a camera. How can I do this?
Here is the code I used for training:
import tensorflow as tf
from tensorflow import keras
import numpy as np
import matplotlib.pyplot as plt
import IPython.display as display
from PIL import Image, ImageSequence
import os
import pathlib
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import cv2
import datetime
tf.compat.v1.enable_eager_execution()
epochs = 200
steps_per_epoch = 10
batch_size = 20
IMG_HEIGHT = 200
IMG_WIDTH = 200
train_dir = "Data/Train"
test_dir = "Data/Val"
train_image_generator = ImageDataGenerator(rescale=1. / 255)
test_image_generator = ImageDataGenerator(rescale=1. / 255)
train_data_gen = train_image_generator.flow_from_directory(batch_size=batch_size,
directory=train_dir,
shuffle=True,
target_size=(IMG_HEIGHT, IMG_WIDTH),
class_mode='sparse')
test_data_gen = test_image_generator.flow_from_directory(batch_size=batch_size,
directory=test_dir,
shuffle=True,
target_size=(IMG_HEIGHT, IMG_WIDTH),
class_mode='sparse')
model = Sequential([
Conv2D(265, 3, padding='same', activation='relu', input_shape=(IMG_HEIGHT, IMG_WIDTH ,3)),
MaxPooling2D(),
Conv2D(64, 3, padding='same', activation='relu'),
MaxPooling2D(),
Conv2D(32, 3, padding='same', activation='relu'),
MaxPooling2D(),
Flatten(),
keras.layers.Dense(256, activation="relu"),
keras.layers.Dense(144, activation="relu"),
keras.layers.Dense(80, activation="softmax")
])
model.compile(optimizer='adam',
loss="sparse_categorical_crossentropy",
metrics=['accuracy'])
model.summary()
tf.keras.utils.plot_model(model, to_file="model.png", show_shapes=True, show_layer_names=True, rankdir='TB')
checkpoint_path = "training/cp.ckpt"
checkpoint_dir = os.path.dirname(checkpoint_path)
cp_callback = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_path,
save_weights_only=True,
verbose=1)
os.system("rm -r logs")
log_dir = "logs/fit/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)
history = model.fit(train_data_gen,steps_per_epoch=steps_per_epoch,epochs=epochs,validation_data=test_data_gen,validation_steps=10,callbacks=[cp_callback, tensorboard_callback])
model.load_weights(tf.train.latest_checkpoint(checkpoint_dir))
model.save('model.h5', include_optimizer=True)
test_loss, test_acc = model.evaluate(test_data_gen)
print("Tested Acc: ", test_acc)
print("Tested Acc: ", test_acc*100, "%")
I am using the COCO dataset to do image classification. I want to use a standard USB camera with OpenCV to do Videocapture and stream it live to the trained and exported model for prediction. How can I do this?
You can do something like this:
import cv2
import numpy as np
from PIL import Image
from keras import models
import os
import tensorflow as tf
checkpoint_path = "training_gender/cp.ckpt"
checkpoint_dir = os.path.dirname(checkpoint_path)
model = models.load_model('gender.h5')
model.load_weights(tf.train.latest_checkpoint(checkpoint_dir))
video = cv2.VideoCapture(0)
while True:
_, frame = video.read()
#Convert the captured frame into RGB
im = Image.fromarray(frame, 'RGB')
#Resizing into dimensions you used while training
im = im.resize((109,82))
img_array = np.array(im)
#Expand dimensions to match the 4D Tensor shape.
img_array = np.expand_dims(img_array, axis=0)
#Calling the predict function using keras
prediction = model.predict(img_array)#[0][0]
print(prediction)
#Customize this part to your liking...
if(prediction == 1 or prediction == 0):
print("No Human")
elif(prediction < 0.5 and prediction != 0):
print("Female")
elif(prediction > 0.5 and prediction != 1):
print("Male")
cv2.imshow("Prediction", frame)
key=cv2.waitKey(1)
if key == ord('q'):
break
video.release()
cv2.destroyAllWindows()
I am very new to deep learning. I am training on anime illustration images and I am receiving the error: logits and labels must be broadcastable: logits_size=[214,2] labels_size=[214,173]
I am sure there are other errors in my code, but I am unsure where to look. I ran model.summary() and noticed
Total params: 12,219,618
Trainable params: 7,080,962
Non-trainable params: 5,138,656
I could really appreciate any help. Thanks.
import tensorflow as tf
from tensorflow.keras.applications.inception_v3 import InceptionV3
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Activation, Dropout, Flatten, Dense
from tensorflow.keras import backend as K
from tensorflow.keras import metrics, optimizers
import matplotlib.pyplot as plt
train_datagen = ImageDataGenerator(
rescale=1. / 255,
rotation_range = 30,
zoom_range = 0.2,
width_shift_range=0.1,
height_shift_range=0.1,
validation_split = 0.15)
test_datagen = ImageDataGenerator(rescale=1. / 255)
train_generator = train_datagen.flow_from_directory(
train_dir,
target_size = (75,75),
batch_size = 214,
class_mode = 'categorical',
subset='training')
#validation_generator = test_datagen.flow_from_directory(
# validation_dir,
# target_size = (75,75),
# batch_size = 37,
# class_mode = 'categorical',
# subset = 'validation')
test_generator = test_datagen.flow_from_directory(
test_dir,
target_size=(75,75),
batch_size = 32,
class_mode = 'categorical')
# Inspect batch
sample_training_images, _ = next(train_generator)
from tensorflow.keras.applications.inception_v3 import InceptionV3
def model_output_for_TL (pre_trained_model, last_output):
x = Flatten()(last_output)
# Dense hidden layer
x = Dense(1024, activation='relu')(x)
x = Dropout(0.5)(x)
# Output neuron.
x = Dense(2, activation='softmax')(x)
model = Model(pre_trained_model.input, x)
return model
pre_trained_model = InceptionV3(input_shape = (75, 75, 3),
include_top = False,
classes=173,
weights = 'imagenet')
for layer in pre_trained_model.layers:
layer.trainable = False
last_layer = pre_trained_model.get_layer('mixed5')
last_output = last_layer.output
model_TL = model_output_for_TL(pre_trained_model, last_output)
model_TL.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])
history_TL = model_TL.fit(
train_generator,
steps_per_epoch=10,
epochs=60,
verbose=2)
#validation_data = validation_generator)
tf.keras.models.save_model(model_TL,'my_model.hdf5')
I am trying to classify the medical images taken from publicly available datasets. I used transfer learning for this task. Initially, when I had used the following code on the same dataset with VGG, Resnet, Densenet, and Inception, the accuracy was above 85% without fine-tuning (TensorFlow version 1.15.2.) Now, after upgrading the TensorFlow to 2.x, when I try the same code on the same dataset, the accuracy is never crossing 32%. Can anyone please help me rectify the issue? Is it something to do with the TensorFlow version or something else? I have tried varying the learning rate, fine-tuning the model, etc. Is this the issue with batch normalization error in Keras?
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "2"
import sys
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.keras.applications.resnet50 import ResNet50, preprocess_input
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.applications.densenet import DenseNet121
from tensorflow.keras.applications.vgg16 import VGG16
from tensorflow.keras.applications.inception_v3 import InceptionV3
import cv2
import glob
from tensorflow.keras.layers import Input, Dense, Flatten, Conv2D, MaxPooling2D, Dropout
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
import tensorflow.keras as keras
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping
import numpy as np
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
from keras import backend as k
from mlxtend.evaluate import confusion_matrix
from mlxtend.plotting import plot_confusion_matrix
import math
import pandas as pd
from openpyxl import load_workbook
save_dir = '../new_tran/'
def extract_outputs(cnf_matrix, mdl_name):
cnf_matrix_T=np.transpose(cnf_matrix)
recall = np.diag(cnf_matrix) / np.sum(cnf_matrix, axis = 1)
precision = np.diag(cnf_matrix) / np.sum(cnf_matrix, axis = 0)
n_class=3
TP=np.zeros(n_class)
FN=np.zeros(n_class)
FP=np.zeros(n_class)
TN=np.zeros(n_class)
for i in range(n_class):
TP[i]=cnf_matrix[i,i]
FN[i]=np.sum(cnf_matrix[i])-cnf_matrix[i,i]
FP[i]=np.sum(cnf_matrix_T[i])-cnf_matrix[i,i]
TN[i]=np.sum(cnf_matrix)-TP[i]-FP[i]-FN[i]
P=TP+FN
N=FP+TN
classwise_sensitivity=np.true_divide(TP,P)
classwise_specificity=np.true_divide(TN,N)
classwise_accuracy=np.true_divide((TP+TN), (P+N))
OS=np.mean(classwise_sensitivity)
OSp=np.mean(classwise_specificity)
OA=np.sum(np.true_divide(TP,(P+N)))
Px=np.sum(P)
TPx=np.sum(TP)
FPx=np.sum(FP)
TNx=np.sum(TN)
FNx=np.sum(FN)
Nx=np.sum(N)
pox=OA
pex=((Px*(TPx+FPx))+(Nx*(FNx+TNx)))/(math.pow((TPx+TNx+FPx+FNx),2))
kappa_overall=[np.true_divide(( pox-pex ), ( 1-pex )),np.true_divide(( pex-pox ), ( 1-pox ))]
kappa=np.max(kappa_overall)
Rcl=np.mean(recall)
Prcn=np.mean(precision)
#######--------------------- Print all scores
print('classwise_sen',classwise_sensitivity*100)
print('classwise_spec',classwise_specificity*100)
print('classwise_acc',classwise_accuracy*100)
print('overall_sen',OS*100)
print('overall_spec',OSp*100)
print('overall_acc',OA*100)
print('overall recall', Rcl)
print('overall precision',Prcn)
f1score=(2 * Prcn * Rcl) / (Prcn + Rcl)
print('overall F1-score',f1score )
print('Kappa',kappa)
def preProcess(X):
X = X.astype('float32')
# scale from [0,255] to [-1,1]
X = (X - 127.5) / 127.5
return X
train_datagen = ImageDataGenerator(preprocessing_function=preProcess)
test_datagen = ImageDataGenerator(preprocessing_function=preProcess)
IMG_SIZE = 256
batch_size = 16
train_data_dir = '../data/train/'
test_dir = '../data/test/'
val_dir = '../data/val/'
train_generator = train_datagen.flow_from_directory(
train_data_dir,
target_size=(IMG_SIZE , IMG_SIZE),
batch_size=16,
class_mode='sparse')
valid_generator = train_datagen.flow_from_directory(
val_dir,
target_size=(IMG_SIZE , IMG_SIZE),
batch_size=16,
class_mode='sparse')
test_generator = train_datagen.flow_from_directory(
test_dir,
target_size=(IMG_SIZE , IMG_SIZE),
batch_size=16,
class_mode='sparse')
test_im=np.concatenate([test_generator.next()[0] for i in range(test_generator.__len__())])
test_lb=np.concatenate([test_generator.next()[1] for i in range(test_generator.__len__())])
t_x, t_y = next(train_generator)
checkpoint1 = ModelCheckpoint(save_dir+"best_res.hdf5", monitor='val_accuracy', verbose=1, save_best_only=True, mode='max')
checkpoint2 = EarlyStopping(monitor='val_loss', patience=10, verbose=0, mode='min')
callbacks_list1 = [checkpoint1, checkpoint2]
def new_model():
img_in = Input(t_x.shape[1:])
model = DenseNet121(include_top= False ,
layers=tf.keras.layers,
weights='imagenet',
input_tensor= img_in,
input_shape= t_x.shape[1:],
pooling ='avg')
x = model.output
predictions = Dense(3, activation="softmax", name="predictions")(x)
model = Model(inputs=img_in, outputs=predictions)
return model
model1 = new_model()
opt = Adam(lr=3E-4)
model1.compile(optimizer = opt, loss = 'sparse_categorical_crossentropy',metrics = ['accuracy'])
history1 = model1.fit(train_generator,
validation_data = valid_generator,
epochs = 200,
callbacks=callbacks_list1)
model1.load_weights(save_dir+'best_res.hdf5')
model1.compile(optimizer = opt, loss = 'sparse_categorical_crossentropy',metrics = ['accuracy'])
y_pred1 = model1.predict(test_im)
pred_class1=np.argmax(y_pred1,axis=1)
print('accuracy = ',accuracy_score(pred_class1,test_lb))
cm = confusion_matrix(y_target=test_lb,y_predicted=pred_class1, binary=False)
print(cm)
fig, ax = plot_confusion_matrix(conf_mat=cm)
plt.gcf().savefig(save_dir+"resnet.png", dpi=144)
plt.close()
extract_outputs(cm, 'Resnet')
Here are some of the screenshots of the output from tensorflow 2.x
Basically the flow_from_directory by default shuffle the data and you didn't change it.
Just add shuffle=False to your test_generator should be enough.
Like
test_generator = train_datagen.flow_from_directory(
test_dir,
target_size=(IMG_SIZE, IMG_SIZE),
batch_size=16,
class_mode='sparse',
shuffle=False)
Or if you really want to have it shuffled then test_im and test_lb have to be in the same order. For example
test_im = []
test_lb = []
for im, lb in test_generator:
test_im.append(im)
test_lb.append(lb)
test_im = np.array(test_im)
test_lb = np.array(test_lb)
Hi everyone, i'm the new on keras and i'm in trouble. I've found to how to combine to cnn model but i cant give datasets to models. is the anyone who can help me ?
Here is my code :
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 16 08:20:24 2019
#author: TECHFEA
"""
from keras import applications
from keras.layers import GlobalAveragePooling2D, Dense,Flatten,Conv2D,MaxPooling2D,Add,Input
from keras.layers import Concatenate
from keras.preprocessing.image import ImageDataGenerator
from sklearn.metrics import log_loss
from keras.models import Model
from keras.optimizers import SGD
from sklearn.metrics import roc_curve, auc
from sklearn.metrics import classification_report,confusion_matrix,accuracy_score
import matplotlib.pyplot as plt
from keras.models import load_model
from scipy import interp
from itertools import cycle
from glob import glob
from keras.optimizers import Adam
train_path ="C:/Users/Monster/Desktop/furkan_ecevit/Datasets/fer_orj/train/"
validation_path ="C:/Users/Monster/Desktop/furkan_ecevit/Datasets/fer_orj/validation/"
train_path2="C:/Users/Monster/Desktop/furkan_ecevit/Datasets/fer_lbp/train_lbp/"
validation_path2="C:/Users/Monster/Desktop/furkan_ecevit/Datasets/fer_lbp/validation_lbp/"
className = glob(train_path + "*/")
numberOfClass = len(className)
batch_size=32
train_datagen = ImageDataGenerator(rescale= 1./255,
vertical_flip=False,
horizontal_flip=True)
validation_datagen = ImageDataGenerator(rescale = 1./255)
train_generator = train_datagen.flow_from_directory(train_path, target_size =(72,72),
batch_size = batch_size,
color_mode = "rgb",
class_mode = "categorical")
validation_generator = validation_datagen.flow_from_directory(validation_path, target_size =(72,72),
batch_size = 10,
color_mode = "rgb",
class_mode = "categorical")
train_generator2 = train_datagen.flow_from_directory(train_path2, target_size =(72,72),
batch_size = batch_size,
color_mode = "rgb",
class_mode = "categorical")
validation_generator2 = validation_datagen.flow_from_directory(validation_path2, target_size =(72,72),
batch_size = 10,
color_mode = "rgb",
class_mode = "categorical")
base_model1 = applications.VGG16(weights='imagenet', include_top=False, input_shape=(72,72,3))
base_model1.summary()
x1=base_model1.output
x1=Flatten()(x1)
x1=Dense(100,activation='relu')(x1)
model1 = Model(inputs=base_model1.input, outputs=x1)
model1.summary()
input_shallow = Input(shape = (72,72,3))
conv1 = Conv2D(16, (3,3), activation = 'relu', padding = "same")(input_shallow)
pool1 = MaxPooling2D(pool_size = (2,2), strides = 2)(conv1)
conv2 = Conv2D(32, (3,3), activation = 'relu', padding = "same")(pool1)
pool2 = MaxPooling2D(pool_size = (2,2), strides = 2)(conv2)
flat1=Flatten()(pool2)
dense_1=Dense(100,activation='relu')(flat1)
model2=Model(inputs=input_shallow,outputs=dense_1)
model2.summary()
mergedOut = Add()([model1.output,model2.output])
out=Dense(2048, activation='relu')(mergedOut)
out = Dense(7, activation='softmax', name='predictions')(out)
model = Model(inputs=[model1.input,model2.input], outputs=out)
model.summary()
opt = Adam(lr=0.001, beta_1=0.9, beta_2=0.999)
model.compile(optimizer=opt, loss="categorical_crossentropy", metrics=["accuracy"])
hist = model.fit_generator(
generator=(train_generator,train_generator2),
steps_per_epoch = 10,
epochs=16,
validation_data =(validation_generator,validation_generator2),
validation_steps = 2,
shuffle=True)
Here is the what i want to do with image :
Here is the what i got error :
'DirectoryIterator' object has no attribute 'ndim'