How to fix 'ValueError: Error when checking target: expected dense_1 to have 2 dimensions, but got array with shape (373, 2, 2)' - python

This is my first time image classification, I have tried to classification the images that have 2 classes. My images dataset are 128*128 and I use RGB so the I thinks the dimension is 128, 128, 3. The code is
import cv2
import glob
import numpy as np
import matplotlib.pyplot as plt
train = []
train_label = []
train_files_1 = glob.glob('/Users/filmer2002/Desktop/real_rash_project/train_images/Atopic_Dermatitis/*.jpg')
for files in train_files_1:
image = cv2.imread(files)
train.append(image)
train_label.append([0., 1.])
train_files_2 = glob.glob('/Users/filmer2002/Desktop/real_rash_project/train_images/Contact_Dermatitis/*.jpg')
for files in train_files_2:
image = cv2.imread(files, )
train.append(image)
train_label.append([1., 0.])
train_array = np.array(train, dtype='int')
train_label_array = np.array(train_label, dtype='int')
test = []
test_label = []
test_files = glob.glob('/Users/filmer2002/Desktop/real_rash_project/test_images/Atopic_Dermatitis/*.jpg')
for files in test_files:
image = cv2.imread(files)
test.append(image)
test_label.append([0., 1.])
test_files_2 = glob.glob('/Users/filmer2002/Desktop/real_rash_project/test_images/Contact_Dermatitis/*.jpg')
for files in test_files_2:
image = cv2.imread(files)
test.append(image)
test_label.append([1., 0.])
test_array = np.array(test, dtype='int')
test_label_array = np.array(test_array, dtype='int')
from keras.models import Sequential
from keras.layers import Dense, Conv2D, Flatten
from keras.utils import to_categorical
x_train = train_array
y_train = train_label_array
x_test = test_array
y_test = test_label_array
x_train = x_train.reshape(373, 128, 128, 3)
x_test = x_test.reshape(95, 128, 128, 3)
model = Sequential()
model.add(Conv2D(64, kernel_size = 3, activation = 'relu', input_shape = (128, 128, 3)))
model.add(Conv2D(32, kernel_size = 3, activation = 'relu'))
model.add(Conv2D(16, kernel_size = 3, activation = 'relu'))
model.add(Flatten())
model.add(Dense(10, activation = 'softmax'))
model.compile(optimizer = 'adam',
loss = 'categorical_crossentropy',
metrics = ['accuracy'])
model.fit(x_train, to_categorical(y_train), validation_data = (x_test, to_categorical(y_test)), epochs = 3)
After run the code it show 'ValueError: Error when checking target: expected dense_1 to have 2 dimensions, but got array with shape (373, 2, 2)' and I don't know how to fix it you can see the code in github at https://github.com/filmer2002/real_rash_project/blob/master/images_to_numpy_and_CNN_code.ipynb

You are getting this error because you have labels, with a shape different from your output layer, given to the network to train.
to_categorical() needs 2 parameters, labels and len, labels have to be dtype=int and smaller then len, and make sure that your tensorflow and tensorboard are up to date.
Try
import cv2
import glob
import numpy as np
import matplotlib.pyplot as plt
train = []
train_label = []
train_files_1 = glob.glob('/Users/filmer2002/Desktop/real_rash_project/train_images/Atopic_Dermatitis/*.jpg')
for files in train_files_1:
image = cv2.imread(files)
train.append(image)
train_label.append(1)
train_files_2 = glob.glob('/Users/filmer2002/Desktop/real_rash_project/train_images/Contact_Dermatitis/*.jpg')
for files in train_files_2:
image = cv2.imread(files, )
train.append(image)
train_label.append(0)
train_array = np.array(train, dtype=int)
train_label_array = np.array(train_label, dtype=int)
test = []
test_label = []
test_files = glob.glob('/Users/filmer2002/Desktop/real_rash_project/test_images/Atopic_Dermatitis/*.jpg')
for files in test_files:
image = cv2.imread(files)
test.append(image)
test_label.append(1)
test_files_2 = glob.glob('/Users/filmer2002/Desktop/real_rash_project/test_images/Contact_Dermatitis/*.jpg')
for files in test_files_2:
image = cv2.imread(files)
test.append(image)
test_label.append(0)
test_array = np.array(test, dtype=int)
test_label_array = np.array(test_array, dtype=int)
from keras.models import Sequential
from keras.layers import Dense, Conv2D, Flatten
from keras.utils import to_categorical
x_train = train_array
y_train = train_label_array
x_test = test_array
y_test = test_label_array
x_train = x_train.reshape(373, 128, 128, 3)
x_test = x_test.reshape(95, 128, 128, 3)
model = Sequential()
model.add(Conv2D(64, kernel_size = 3, activation = 'relu', input_shape = (128, 128, 3)))
model.add(Conv2D(32, kernel_size = 3, activation = 'relu'))
model.add(Conv2D(16, kernel_size = 3, activation = 'relu'))
model.add(Flatten())
model.add(Dense(10, activation = 'softmax'))
model.compile(optimizer = 'adam',
loss = 'categorical_crossentropy',
metrics = ['accuracy'])
model.fit(x_train, to_categorical(y_train, 10), validation_data = (x_test, to_categorical(y_test, 10)), epochs = 3)
len given to to_categorical in this case is 10 because your output layer has shape == (10,)
Also make sure that you have equal amount of images and labels for them, you cen test that by:
x_train.shape[0] == to_categorical(y_train, 10).shape[0]
True #should be True if everything is ok
And one final note: tensorflow works with tensors i. e. n dimensional arrays, don't use lists when working with training data, numpy arrays should be used most of the time.
EDIT:
Only 1 dimensional arrays or lists consisting of int values are ok for to_categorical().
Don't try to use lists consisting of [0, 1] in to_categorical(), it won't work, just use 1 to_categorical() will convert it for you into [0, 1] elements.
Your mistake was to use lists as labels for to_categorical(), to_categorical() only accepts 1 dimensional int arrays as it's first parameter, the shape you where giving it was (number_of_labels, 2), when it only works with shape == (number_of_labels,).
OR
If you want to use lists/arrays consisting of elements like [0, 1] and your model output shape is 2, then just don't use to_categorical() at all.

This should work, and if it doesn't you have inconsistent data.
import cv2
import glob
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import tensorflow.keras as keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Conv2D, Flatten
from tensorflow.keras.utils import to_categorical
#assert tf.__version__ == '1.14.0'
train = np.zeros((1, 128, 128, 3))
train_label = []
train_files_1 = glob.glob('/path/to/your/folder/with/data/*.jpg')
for files in train_files_1:
image = cv2.imread(files)
img = np.array(image).astype(int).reshape(128, 128, 3)
train = np.concatenate((train, [img]), axis=0)
train_label.append(1)
train_files_2 = glob.glob('/path/to/your/folder/with/data/*.jpg')
for files in train_files_2:
image = cv2.imread(files)
img = np.array(image).astype(int)
train = np.concatenate((train, [img]), axis=0)
train_label.append(0)
x_train = train[1:]
y_train = to_categorical(train_label, 2)
test = np.zeros((1, 128, 128, 3))
test_label = []
test_files = glob.glob('/path/to/your/folder/with/data/*.jpg')
for files in test_files:
image = cv2.imread(files)
img = np.array(image).astype(int)
test = np.concatenate((test, [img]), axis=0)
test_label.append(1)
test_files_2 = glob.glob('/path/to/your/folder/with/data/*.jpg')
for files in test_files_2:
image = cv2.imread(files)
img = np.array(image).astype(int)
test = np.concatenate((test, [img]), axis=0)
test_label.append(0)
x_test = test[1:]
y_test = to_categorical(test_label, 2)
print ('train', x_train.shape)
print ('test', x_test.shape)
print ('test labels', y_test.shape)
print ('train labels', y_train.shape)
assert x_train.shape == (len(train[1:]), 128, 128, 3)
assert x_test.shape == (len(test[1:]), 128, 128, 3)
assert y_train.shape == (len(train_label), 2)
assert y_test.shape == (len(test_label), 2)
# if you get an error about one of the asserts above => your data is not consistant
model = Sequential()
model.add(Conv2D(64, kernel_size=3, activation='relu', input_shape=(128, 128, 3)))
model.add(Conv2D(32, kernel_size=3, activation='relu'))
model.add(Conv2D(16, kernel_size=3, activation='relu'))
model.add(Flatten())
model.add(Dense(2, activation='softmax'))
model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
model.fit(x_train, y_train, validation_data=(x_test, y_test), epochs=3)
print ('all good, all done!')
Note: mixing python lists and numpy arrays is very bad idea

Related

ValueError: Shapes (29, 1) and (42, 1, 29) are incompatible [Keras]

Hi I'm trying to use keras to build a model that takes 42 inputs and outputs 1 of 29 classes. But when I try to fit my model, I get that error message.
from tensorflow.keras.utils import to_categorical
Y_train_cat = to_categorical(Y_train, 29)
Y_test_cat = to_categorical(Y_test, 29)
Y_val_cat = to_categorical(Y_val, 29)
train_dataset = tf.data.Dataset.from_tensor_slices((X_train,Y_train_cat))
test_dataset = tf.data.Dataset.from_tensor_slices((X_test,Y_test_cat))
val_dataset = tf.data.Dataset.from_tensor_slices((X_val,Y_val_cat))
from tensorflow.keras.callbacks import EarlyStopping
early_stopping_monitor = EarlyStopping(monitor='val_loss', patience = 2)
model = Sequential([
Dense(units = 102, input_shape = (42,1), activation = 'relu'),
Dense(units = 56, activation = 'relu'),
Dense(units = 29, activation = 'softmax')
])
model.compile(optimizer = Adam(learning_rate = 0.0001),
loss='categorical_crossentropy',
metrics=['accuracy'])
tf.keras.utils.plot_model(model)
model.fit(x = train_dataset,
batch_size = 30,
epochs = 10,
shuffle = False,
verbose = 2,
validation_data = val_dataset,
callbacks = [early_stopping_monitor]
)
The shape of train_dataset is
<TensorSliceDataset shapes: ((42,), (29,)), types: (tf.float64, tf.float32)>
Thank you in advance!
Since Dense Layers have limitation for 2D arrays, you should first Flatten data to a vector, then feed it through dense layers. Otherwise, it will process data in last dimension and you will get other dimensions in the output.
In your case input shape is (42,1). So, the output shape of last layer would be (42,29), and it is incompatible with the labels you have provided. So, consider to change the input shape. Either you can squeeze your data from (42,1) to (42) and modify input shape argument, or you can add a Flatten layer as the first layer like this:
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(input_shape=(42,1)),
Dense(units = 102, activation = 'relu'),
Dense(units = 56, activation = 'relu'),
Dense(units = 29, activation = 'softmax')
])
UPDATE: Also batch your data to add a batch dimension like this:
BATCH_SIZE = 30
train_dataset = tf.data.Dataset.from_tensor_slices((X_train,Y_train_cat)).batch(BATCH_SIZE)
Whole code with random dataset with shapes like yours:
import tensorflow as tf
import numpy as np
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.layers import Dense
from tensorflow.keras.optimizers import Adam
X_train = tf.random.uniform((100,42,))
Y_train = tf.random.uniform((100,), dtype=tf.int32, minval=0, maxval=29)
Y_train_cat = to_categorical(Y_train, 29)
train_dataset = tf.data.Dataset.from_tensor_slices((X_train,Y_train_cat)).batch(30)
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(input_shape=(42,1)),
Dense(units = 102, activation = 'relu'),
Dense(units = 56, activation = 'relu'),
Dense(units = 29, activation = 'softmax')
])
model.compile(optimizer = Adam(learning_rate = 0.0001),
loss='categorical_crossentropy',
metrics=['accuracy'])
tf.keras.utils.plot_model(model)
model.summary()
model.fit(x = train_dataset,
batch_size = 30,
epochs = 10,
shuffle = False,
verbose = 2,
)

"Input 0 of layer sequential_1 is incompatible with the layer" with tf.data.Dataset

I am getting
ValueError: Input 0 of layer sequential_1 is incompatible with the layer: : expected min_ndim=4, found ndim=3. Full shape received: [300, 300, 3]. Model Summary shows 4 inputs but it says 3.
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import tensorflow_datasets as tfds
import logging
logger = tf.get_logger()
logger.setLevel(logging.ERROR)
from tensorflow.keras.preprocessing.image import ImageDataGenerator
(train_dataset, test_dataset), metadata = tfds.load('rock_paper_scissors',split=['train', 'test'], as_supervised=True, with_info=True)
class_names = ['rock','paper','scissors']
num_train_examples = metadata.splits['train'].num_examples
num_test_examples = metadata.splits['test'].num_examples
print("Number of training examples: {}".format(num_train_examples))
print("Number of test examples: {}".format(num_test_examples))
get_label_name = metadata.features['label'].int2str
print(get_label_name(0))
print(get_label_name(1))
print(get_label_name(2))
def format_example(image, label):
# Make image color values to be float.
image = tf.cast(image, tf.float32)
# Make image color values to be in [0..1] range.
image = image / 255.
return image, label
dataset_train = train_dataset.map(format_example)
dataset_test =test_dataset.map(format_example)
l1 = tf.keras.layers.Conv2D(32, (3,3), activation = 'relu', input_shape=(300,300,3))
l2 = tf.keras.layers.MaxPooling2D(2,2)
l3 = tf.keras.layers.Conv2D(64, (3,3), activation='relu')
l4 = tf.keras.layers.MaxPooling2D(2,2)
l5 = tf.keras.layers.Conv2D(128, (3,3), activation='relu')
l6 = tf.keras.layers.MaxPooling2D(2,2)
l7 = tf.keras.layers.Flatten()
l8 = tf.keras.layers.Dense(512, activation='relu')
l9 = tf.keras.layers.Dense(3, activation='softmax')
model = tf.keras.Sequential([l1,l2,l3,l4,l5,l6,l7,l8,l9])
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy,
metrics=['accuracy'])
model.summary()
epochs = 10
batch_size = 32
history = model.fit(
dataset_train,
validation_data = dataset_test,
steps_per_epoch = int(np.ceil(num_train_examples/float(batch_size))),
validation_steps = int(np.ceil(num_test_examples/float(batch_size))),
epochs = epochs
)
Where am I going wrong?
I have not encountered this error with cats and dogs dataset.
You need to batch your dataset in order to get the right shape:
dataset_train = train_dataset.map(format_example).batch(1)
dataset_test =test_dataset.map(format_example).batch(1)
When you don't batch,a picture with shape (h, w, c) will be returned. If you batch, you will get (n, h, w, c), which is what Keras expects. Do the test yourself:
import tensorflow as tf
images = tf.random.uniform(shape=(10, 224, 224, 3), maxval=256, dtype=tf.int32)
ds = tf.data.Dataset.from_tensor_slices(images)
for pic in ds:
print(pic.shape)
break
(224, 224, 3)
With batching:
for pic in ds.batch(4):
print(pic.shape)
break
(4, 224, 224, 3)

ValueError: Error when checking target: expected activation_1 to have shape (158,) but got array with shape (121,)

Got the following error when training my CNN:
Traceback (most recent call last):
File "train_and_test.py", line 66, in
H = model.fit(trainX, trainY, validation_data=(testX, testY), batch_size=32, epochs=100, verbose=1)
File "/usr/local/lib/python3.6/dist-packages/keras/engine/training.py", line 972, in fit
batch_size=batch_size)
File "/usr/local/lib/python3.6/dist-packages/keras/engine/training.py", line 789, in _standardize_user_data
exception_prefix='target')
File "/usr/local/lib/python3.6/dist-packages/keras/engine/training_utils.py", line 138, in standardize_input_data
str(data_shape))
ValueError: Error when checking target: expected activation_1 to have shape (158,) but got array with shape (121,)
Activation_1 is the Last layer of my network, it should have an array of size 158 as input, because my problem has 158 classes. I build the model like this:
model = DeepIrisNet_A.build(width=128, height=128, depth=1, classes=158)
model.compile(loss="categorical_crossentropy", optimizer=opt, metrics=["accuracy"])
Now there's the Strange thing: if I put a number X in classes argument that is different from 158, the error says:
ValueError: Error when checking target: expected activation_1 to have shape (X,) but got array with shape (158,)
So the input array has the right dimensions! But everytime I use the correct value the input array has never (158,) shape.
Where am I wrong? Any suggestions?
EDIT - Here's some of my code:
This is for training and testing the CNN
from sklearn.preprocessing import LabelBinarizer
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from datasets import UtirisLoader
from models import DeepIrisNet_A
from utilities import ResizerPreprocessor
from utilities import ConvertColorSpacePreprocessor
from keras.optimizers import SGD
from imutils import paths
import matplotlib.pyplot as plt
import numpy as np
import argparse
import tensorflow as tf
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-d", "--dataset", required=True, help="path to input dataset")
ap.add_argument("-o", "--output", required=True, help="path to the output loss/accuracy plot")
args = vars(ap.parse_args())
# grab the list of images that we’ll be describing
print("[INFO] loading images...")
imagePaths = list(paths.list_images(args["dataset"]))
# initialize the image preprocessor
rp = ResizerPreprocessor(128, 128)
ccsp = ConvertColorSpacePreprocessor()
# load the dataset from disk then scale the raw pixel intensities to the range [0, 1]
utiris = UtirisLoader(preprocessors=[rp, ccsp])
(data, labels) = utiris.load_infrared(imagePaths, verbose=100)
# print some infos
print("DATA LENGTH: {}".format(len(data)))
print("LABELS LENGTH: {}".format(len(labels)))
unique = np.unique(labels, return_counts=False)
print("LABELS COUNT: {}".format(len(unique)))
# convert data to float
data = data.astype("float") / 255.0
# partition the data into training and testing splits using 75% of the data for training
# and the remaining 25% for testing
(trainX, testX, trainY, testY) = train_test_split(data, labels, test_size=0.25, random_state=42)
#trainX = np.resize(trainX, (-1, 128, 128, 1))
trainX = trainX.reshape((trainX.shape[0], 128, 128, 1))
testX = testX.reshape((testX.shape[0], 128, 128, 1))
# convert the labels from integers to vectors
trainY = LabelBinarizer().fit_transform(trainY)
testY = LabelBinarizer().fit_transform(testY)
print("trainY: {}".format(trainY))
# initialize the optimizer and model_selection
print("[INFO] compiling model...")
opt = SGD(lr=0.01, momentum=0.9)
model = DeepIrisNet_A.build(width=128, height=128, depth=1, classes=158)
model.compile(loss="categorical_crossentropy", optimizer=opt, metrics=["accuracy"])
#train the network
print("[INFO] training network...")
H = model.fit(trainX, trainY, validation_data=(testX, testY), batch_size=32, epochs=100, verbose=1)
# evaluate the network
print("[INFO] evaluating network...")
predictions = model.predict(testX, batch_size=32)
print(classification_report(testY.argmax(axis=1), predictions.argmax(axis=1), target_names=["cat", "dog", "panda"]))
# plot the training loss and accuracy
plt.style.use("ggplot")
plt.figure()
plt.plot(np.arange(0, 100), H.history["loss"], label="train_loss")
plt.plot(np.arange(0, 100), H.history["val_loss"], label="val_loss")
plt.plot(np.arange(0, 100), H.history["acc"], label="train_acc")
plt.plot(np.arange(0, 100), H.history["val_acc"], label="val_acc")
plt.title("Training Loss and Accuracy")
plt.xlabel("Epoch #")
plt.ylabel("Loss/Accuracy")
plt.legend()
plt.savefig(args["output"])
This is the structure of the CNN
from keras.models import Sequential
from keras.layers.normalization import BatchNormalization
from keras.layers.convolutional import Conv2D
from keras.layers.convolutional import MaxPooling2D
from keras.layers.core import Activation
from keras.layers.core import Flatten
from keras.layers.core import Dropout
from keras.layers.core import Dense
from keras import backend as K
class DeepIrisNet_A:
#staticmethod
def build(width, height, depth, classes):
# initialize the models along with the input shape to be "channels last" and the channels dimension itself
model = Sequential()
inputShape = (height, width, depth)
chanDim = -1 # the index of the channel dimension, needed for batch normalization. -1 indicates that channels is the last dimension in the input shape
# if we are using "channel first", update the input shape
if K.image_data_format() == "channels_first":
inputShape = (depth, height, width)
chanDim = 1
# CONV 1
model.add(Conv2D(32,(5,5), strides=(1,1), padding="same", input_shape=inputShape))
# BN 1
model.add(BatchNormalization(axis=chanDim))
# CONV 2
model.add(Conv2D(64, (3,3), strides=(1,1), padding ="valid"))
# POOL 1
model.add(MaxPooling2D(pool_size=(2,2), strides=(2,2)))
# BN 2
model.add(BatchNormalization(axis=chanDim))
# CONV 3
model.add(Conv2D(128, (3,3), strides=(1,1), padding ="valid"))
# BN 3
model.add(BatchNormalization(axis=chanDim))
# CONV 4
model.add(Conv2D(192, (3,3), strides=(1,1), padding ="same"))
# POOL 2
model.add(MaxPooling2D(pool_size=(2,2), strides=(2,2)))
# BN 4
model.add(BatchNormalization(axis=chanDim))
# CONV 5
model.add(Conv2D(256, (3,3), strides=(1,1), padding ="valid"))
# BN 5
model.add(BatchNormalization(axis=chanDim))
# CONV 6
model.add(Conv2D(320, (3,3), strides=(1,1), padding ="valid"))
# POOL 3
model.add(MaxPooling2D(pool_size=(2,2), strides=(2,2)))
# BN 6
model.add(BatchNormalization(axis=chanDim))
# CONV 7
model.add(Conv2D(480, (3,3), strides=(1,1), padding ="valid"))
# BN 7
model.add(BatchNormalization(axis=chanDim))
# CONV 8
model.add(Conv2D(512, (3,3), strides=(1,1), padding ="valid"))
# POOL 4
model.add(MaxPooling2D(pool_size=(2,2), strides=(2,2)))
# BN 8
model.add(BatchNormalization(axis=chanDim))
# FC 9
model.add(Flatten())
model.add(Dense(4096))
# DROP 10
model.add(Dropout(0.5))
# FC 11
model.add(Dense(4096))
# DROP 12
model.add(Dropout(0.5))
# FC 13
model.add(Dense(classes))
# COST 14
model.add(Activation("softmax"))
# return the constructed network architecture
return model
I didn't try to run the code but i might have figured your problem.
Be aware that LabelBinarizeronly gives you as many columns as there are different classes. For example:
from sklearn import preprocessing
y = [1, 2, 6, 4, 2]
lb = preprocessing.LabelBinarizer()
lb.fit(y)
lb.transform(y)
will give you:
>>> array([[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 0, 1],
[0, 0, 1, 0],
[0, 1, 0, 0]])
Since there are only 4 unique classes.
You might have 158 different classes but maybe you dont have a sample for each one so you only get 121 columns in trainYin the end.

Store images into multiply array and use it to train model

I get this error when train the model:
ValueError: Error when checking target:
expected dropout_5 to have shape (33,) but got array with shape (1,).
I want to store my images into 33 array from folder using path. I have categories the images into different folder which were 1,2,3,4,5...
I have use this code to do it but i dont know how to store it into different array. Can someone help me.
datadir = 'C:/Users/user/Desktop/RESIZE' #path of the folder
categories = ['1','2','3','4','5','6','7','8','9','A','B','C','D','E','F','G','H','I','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y']
img_rows, img_cols = 100, 100
training_data = []
for category in categories:
path = os.path.join(datadir,category)
class_num = categories.index(category)
for img in os.listdir(path):
img_array = cv2.imread(os.path.join(path,img),cv2.IMREAD_GRAYSCALE)
new_array = cv2.resize(img_array,(img_rows,img_cols))
training_data.append([new_array,class_num])
random.shuffle(training_data)
X = []
y = []
for features, label in training_data:
X.append(features)
y.append(label)
X = np.array(X).reshape(-1,img_rows,img_cols,1)
X = X.astype("float32")
pickle_out = open("X.pickle","wb")
pickle.dump(X,pickle_out)
pickle_out.close()
pickle_out = open("y.pickle","wb")
pickle.dump(y,pickle_out)
pickle_out.close()
After I save the file, then I use this code to train model and I want get 33 output layer but it only can work when my output layer(Dense) set 1.
I got this error:
ValueError: Error when checking target:
expected dropout_5 to have shape (33,) but got array with shape (1,)
Here was my training code.
import tensorflow as tf
from keras import optimizers
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout
from keras.layers import Flatten
from keras.layers.convolutional import Conv2D
from keras.layers.convolutional import MaxPooling2D
from keras.layers import Activation
import cv2
import os
import numpy as np
import pickle
from sklearn.utils import shuffle
X = pickle.load(open("X.pickle","rb"))
y = pickle.load(open("y.pickle","rb"))
X = X/255.0
model = Sequential()
model.add(Conv2D(32,(3,3), input_shape = X.shape[1:]))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size = (2,2)))
model.add(Dropout(0.25))
model.add(Conv2D(64,(3,3)))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size = (2,2)))
model.add(Dropout(0.25))
model.add(Conv2D(128,(3,3)))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size = (2,2)))
model.add(Dropout(0.4))
model.add(Dense(128))
model.add(Activation("relu"))
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(33, activation='softmax'))
model.add(Dropout(0.4))
model.compile(loss = "binary_crossentropy", optimizer = "adam", metrics = ["accuracy"])
model.fit(X, y, batch_size = 2, epochs = 1, validation_split = 0.2)
You need to change your y as one hot encoded data to do the training.
Try this for y,
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
# integer encode
label_encoder = LabelEncoder()
integer_encoded = label_encoder.fit_transform(y)
print(label_encoder.classes_) # This is your classes.
print(integer_encoded.shape())
# binary encode
onehot_encoder = OneHotEncoder(sparse=False)
integer_encoded = integer_encoded.reshape(len(integer_encoded), 1)
onehot_encoded = onehot_encoder.fit_transform(integer_encoded)
print(onehot_encoded.shape())
And one more thing, if you want to classify for 33 class than change your loss to categorical_crossentropy.

Error: "CUDNN STATUS NOT INITIALIZED" in keras-based convolutional network

I'm trying to create a convolutional network using keras. However, I'm getting the following error:
2018-08-05 21:10:44.670676: E
T:\src\github\tensorflow\tensorflow\stream_executor\cuda\cuda_dnn.cc:332]
could not create cudnn handle: CUDNN_STATUS_NOT_INITIALIZED 2018-08-05
21:10:44.670843: E
T:\src\github\tensorflow\tensorflow\stream_executor\cuda\cuda_dnn.cc:336]
error retrieving driver version: Unimplemented: kernel reported driver
version not implemented on Windows
I haven't installed cudnn seperately, only installed tensorflow-gpu through pip (not using the url). A seperate program that doesn't use a convolutional network works fine. My code:
from __future__ import print_function
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.datasets import mnist
from tensorflow.keras.layers import Dense, Flatten
from tensorflow.keras.layers import Conv2D, MaxPooling2D
from tensorflow.keras.models import Sequential
import matplotlib.pylab as plt
import numpy as np
batch_size = 64
num_classes = 10
epochs = 10
# input image dimensions
img_x, img_y = 32, 32
# Load cifar data from file
# define standard sizing values
image_height = 32
image_width = 32
color_channels = 3
model_name = "cifar"
def unpickle(file):
import pickle
with open(file, 'rb') as fo:
dict = pickle.load(fo, encoding='bytes')
return dict
# Set the path as a mutable variable and initiate our training stuff
cifar_path = 'cifar-10-batches-py/'
x_train = np.array([])
y_train = np.array([])
# Load all the data batches.
for i in range(1, 3):
data_batch = unpickle(cifar_path + 'data_batch_' + str(i))
x_train = np.append(x_train, data_batch[b'data'])
y_train = np.append(y_train, data_batch[b'labels'])
# Load the eval batch.
eval_batch = unpickle(cifar_path + 'test_batch')
x_test = eval_batch[b'data']
y_test = eval_batch[b'labels']
# Load the english category names.
category_names_bytes = unpickle(cifar_path + 'batches.meta')[b'label_names']
category_names = list(map(lambda x: x.decode("utf-8"),
category_names_bytes))
def process_data(data):
float_data = np.array(data, dtype=float) / 255.0
reshaped_data = np.reshape(float_data, (-1, color_channels, image_height, image_width))
# The incorrect image
transposed_data = np.transpose(reshaped_data, [0, 2, 3, 1])
return transposed_data
# redefine the data with it in its processed form
x_train = process_data(x_train)
x_test = process_data(x_test)
# reshape the data into a 4D tensor - (sample_number, x_img_size, y_img_size, num_channels)
x_train = x_train.reshape(x_train.shape[0], img_x, img_y, 3)
x_test = x_test.reshape(x_test.shape[0], img_x, img_y, 3)
input_shape = (img_x, img_y, 3)
# convert the data to the right type
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# convert class vectors to binary class matrices - this is for use in the
# categorical_crossentropy loss below
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
model = Sequential()
model.add(Conv2D(32, kernel_size=(5, 5), strides=(1, 1),
activation='relu',
input_shape=input_shape))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Conv2D(64, (5, 5), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(1000, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adam(),
metrics=['accuracy'])
class AccuracyHistory(keras.callbacks.Callback):
def on_train_begin(self, logs={}):
self.acc = []
def on_epoch_end(self, batch, logs={}):
self.acc.append(logs.get('acc'))
history = AccuracyHistory()
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test),
callbacks=[history])
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
plt.plot(range(1, 11), history.acc)
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.show()
You need to include cudnn in your environment variables(if on windows), if you need to run tensorflow-gpu.

Categories

Resources