index 243 is out of bounds for axis 1 with size 10 - python

I am getting an error "index 243 is out of bounds for axis 1 with size 10" and I have no idea what I'm doing wrong. Also, I was wondering if there is a general guideline for linear algebra and keras.
Let me know what y'all think!
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from tensorflow import keras
from tensorflow import keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Flatten, Conv2D
img_rows, img_cols = 16, 16
num_classes = 10
image_file = "../input/pic-test/output2.csv"
image_data = np.loadtxt(image_file,skiprows=1, delimiter=',') #skiprows=1,
def prep_data(raw):
y = raw[:, 0]
out_y = keras.utils.to_categorical(y,num_classes) #num_classes
x = raw[:,1:]
num_images = raw.shape[0]
out_x = x.reshape(num_images, img_rows, img_cols,1)
out_x = out_x / 255
return out_x, out_y
x, y = prep_data(image_data)
image_model = Sequential()
image_model.add(Conv2D(12,
activation='relu',
kernel_size=3,
input_shape = (img_rows, img_cols, 1)))
image_model.add(Conv2D(20, activation='relu', kernel_size=3))
image_model.add(Conv2D(20, activation='relu', kernel_size=3))
image_model.add(Flatten())
image_model.add(Dense(100, activation='relu'))
image_model.add(Dense(10, activation='softmax'))
image_model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
image_model.fit(x, y, batch_size=100, epochs=4, validation_split=0.2)

Related

Keras-viz throwing InvalidArgumentError

I am running a simple CNN model in Keras. Code:
from __future__ import print_function
import numpy as np
import keras
from keras.datasets import mnist
from keras.models import Sequential, Model
from keras.layers import Dense, Dropout, Flatten, Activation, Input
from keras.layers import Conv2D, MaxPooling2D
from keras import backend as K
import tensorflow as tf
batch_size = 128
num_classes = 10
epochs = 1
# input image dimensions
img_rows, img_cols = 28, 28
# the data, shuffled and split between train and test sets
(x_train, y_train), (x_test, y_test) = mnist.load_data()
if K.image_data_format() == 'channels_first':
x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
y_train = tf.keras.utils.to_categorical(y_train, num_classes)
y_test = tf.keras.utils.to_categorical(y_test, num_classes)
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3),
activation='relu',
input_shape=input_shape))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax', name='preds'))
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=tf.keras.optimizers.Adam(),
metrics=['accuracy'])
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test))
I want to visualize the dense layers. For this, I am using keras-vis
While running the following code:
from vis.visualization import visualize_activation
from vis.utils import utils
from keras import activations
from matplotlib import pyplot as plt
%matplotlib inline
plt.rcParams['figure.figsize'] = (18, 6)
# Utility to search for layer index by name.
# Alternatively we can specify this as -1 since it corresponds to the last layer.
layer_idx = utils.find_layer_idx(model, 'preds')
# Swap softmax with linear
model.layers[layer_idx].activation = activations.linear
model = utils.apply_modifications(model)
# This is the output node we want to maximize.
filter_idx = 0
img = visualize_activation(model, layer_idx, filter_indices=filter_idx)
plt.imshow(img[..., 0])
I am getting the following error:
InvalidArgumentError: conv2d_2_input_2:0 is both fed and fetched.
Solutions that I have tried
Installing Keras-vis from source for the latest build
Applying Changes from PR mentioned in issues
Version:
Keras : 2.7
Keras-Vis : 0.5

ValueError being thrown 'This model has not been built'...for CNN Audio Spectrogram feature extraction

In the code below, I am getting the error:
raise ValueError('This model has not yet been built. ' ValueError: This model has not yet been built. Build the model first by calling ``build()`` or calling ``fit()`` with some data, or specify an ``input_shape`` argument in the first layer(s) for automatic build.
I am trying to use a spectrogram instead of the Mel feature extraction for this CNN. I think the error may be in the extraction process for the extract_features step.
Any help will be greatly appreciated in figuring out the source of this error.
Here are some sizes of arrays generated before the error:
x_test = {ndarray:(353,128)}
x_train = {ndarray:(1408,128)}
y = {ndarray:(1761,)}
y_test = {ndarray:(353,10)}
y_ttrain = {ndarray:(1408,10)}
CODE
# Sample rate conversion
import librosa
import librosa.display
from scipy.io import wavfile as wav
import numpy as np
import pandas as pd
import os
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
max_pad_len = 174
n_mels = 128
def extract_features(file_name):
try:
audio, sample_rate = librosa.core.load(file_name, res_type='kaiser_fast')
mely = librosa.feature.melspectrogram(y=audio, sr=sample_rate, n_mels=n_mels)
# pad_width = max_pad_len - mely.shape[1]
# mely = np.pad(mely, pad_width=((0, 0), (0, pad_width)), mode='constant')
melyscaled = np.mean(mely.T, axis=0)
except Exception as e:
print("Error encountered while parsing file: ", file_name)
return None
return melyscaled
features = []
# Iterate through each sound file and extract the features
from time import sleep
#from tqdm import tqdm
from alive_progress import alive_bar
items = len(metadata.index)
with alive_bar(items) as bar:
for index, row in metadata.iterrows():
bar()
file_name = os.path.join(os.path.abspath(fulldatasetpath), 'fold' + str(row["fold"]) + '/',
str(row["slice_file_name"]))
class_label = row["class_name"]
data = extract_features(file_name)
features.append([data, class_label])
# Convert into a Panda dataframe
featuresdf = pd.DataFrame(features, columns=['feature', 'class_label'])
print('Finished feature extraction from ', len(featuresdf), ' files')
from sklearn.preprocessing import LabelEncoder
from keras.utils import to_categorical
# Convert features and corresponding classification labels into numpy arrays
X = np.array(featuresdf.feature.tolist())
y = np.array(featuresdf.class_label.tolist())
# Encode the classification labels
le = LabelEncoder()
yy = to_categorical(le.fit_transform(y))
# split the dataset
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(X, yy, test_size=0.2, random_state = 42)
### Convolutional Neural Network (CNN) model architecture
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution2D, Conv2D, MaxPooling2D, GlobalAveragePooling2D
from keras.optimizers import Adam
from keras.utils import np_utils
from sklearn import metrics
num_rows = 40
num_columns = 174
num_channels = 1
#
#x_train = x_train.reshape(x_train.shape[0], num_rows, num_columns, num_channels)
#x_test = x_test.reshape(x_test.shape[0], num_rows, num_columns, num_channels)
num_labels = yy.shape[1]
filter_size = 2
# Construct model
model = Sequential()
#model.add(Conv2D(filters=16, kernel_size=2, input_shape=(num_rows, num_columns, num_channels), activation='relu'))
model.add(Conv2D(filters=16, kernel_size=2, activation='relu'))
model.add(MaxPooling2D(pool_size=2))
model.add(Dropout(0.2))
model.add(Conv2D(filters=32, kernel_size=2, activation='relu'))
model.add(MaxPooling2D(pool_size=2))
model.add(Dropout(0.2))
model.add(Conv2D(filters=64, kernel_size=2, activation='relu'))
model.add(MaxPooling2D(pool_size=2))
model.add(Dropout(0.2))
model.add(Conv2D(filters=128, kernel_size=2, activation='relu'))
model.add(MaxPooling2D(pool_size=2))
model.add(Dropout(0.2))
model.add(GlobalAveragePooling2D())
model.add(Dense(num_labels, activation='softmax'))
model.compile(loss='categorical_crossentropy', metrics=['accuracy'], optimizer='adam')
# Display model architecture summary
model.summary()
You have to build your model with your input shape before calling model.summary():
model.build((128,))

How to fix 'ValueError: Error when checking target: expected dense_1 to have 2 dimensions, but got array with shape (373, 2, 2)'

This is my first time image classification, I have tried to classification the images that have 2 classes. My images dataset are 128*128 and I use RGB so the I thinks the dimension is 128, 128, 3. The code is
import cv2
import glob
import numpy as np
import matplotlib.pyplot as plt
train = []
train_label = []
train_files_1 = glob.glob('/Users/filmer2002/Desktop/real_rash_project/train_images/Atopic_Dermatitis/*.jpg')
for files in train_files_1:
image = cv2.imread(files)
train.append(image)
train_label.append([0., 1.])
train_files_2 = glob.glob('/Users/filmer2002/Desktop/real_rash_project/train_images/Contact_Dermatitis/*.jpg')
for files in train_files_2:
image = cv2.imread(files, )
train.append(image)
train_label.append([1., 0.])
train_array = np.array(train, dtype='int')
train_label_array = np.array(train_label, dtype='int')
test = []
test_label = []
test_files = glob.glob('/Users/filmer2002/Desktop/real_rash_project/test_images/Atopic_Dermatitis/*.jpg')
for files in test_files:
image = cv2.imread(files)
test.append(image)
test_label.append([0., 1.])
test_files_2 = glob.glob('/Users/filmer2002/Desktop/real_rash_project/test_images/Contact_Dermatitis/*.jpg')
for files in test_files_2:
image = cv2.imread(files)
test.append(image)
test_label.append([1., 0.])
test_array = np.array(test, dtype='int')
test_label_array = np.array(test_array, dtype='int')
from keras.models import Sequential
from keras.layers import Dense, Conv2D, Flatten
from keras.utils import to_categorical
x_train = train_array
y_train = train_label_array
x_test = test_array
y_test = test_label_array
x_train = x_train.reshape(373, 128, 128, 3)
x_test = x_test.reshape(95, 128, 128, 3)
model = Sequential()
model.add(Conv2D(64, kernel_size = 3, activation = 'relu', input_shape = (128, 128, 3)))
model.add(Conv2D(32, kernel_size = 3, activation = 'relu'))
model.add(Conv2D(16, kernel_size = 3, activation = 'relu'))
model.add(Flatten())
model.add(Dense(10, activation = 'softmax'))
model.compile(optimizer = 'adam',
loss = 'categorical_crossentropy',
metrics = ['accuracy'])
model.fit(x_train, to_categorical(y_train), validation_data = (x_test, to_categorical(y_test)), epochs = 3)
After run the code it show 'ValueError: Error when checking target: expected dense_1 to have 2 dimensions, but got array with shape (373, 2, 2)' and I don't know how to fix it you can see the code in github at https://github.com/filmer2002/real_rash_project/blob/master/images_to_numpy_and_CNN_code.ipynb
You are getting this error because you have labels, with a shape different from your output layer, given to the network to train.
to_categorical() needs 2 parameters, labels and len, labels have to be dtype=int and smaller then len, and make sure that your tensorflow and tensorboard are up to date.
Try
import cv2
import glob
import numpy as np
import matplotlib.pyplot as plt
train = []
train_label = []
train_files_1 = glob.glob('/Users/filmer2002/Desktop/real_rash_project/train_images/Atopic_Dermatitis/*.jpg')
for files in train_files_1:
image = cv2.imread(files)
train.append(image)
train_label.append(1)
train_files_2 = glob.glob('/Users/filmer2002/Desktop/real_rash_project/train_images/Contact_Dermatitis/*.jpg')
for files in train_files_2:
image = cv2.imread(files, )
train.append(image)
train_label.append(0)
train_array = np.array(train, dtype=int)
train_label_array = np.array(train_label, dtype=int)
test = []
test_label = []
test_files = glob.glob('/Users/filmer2002/Desktop/real_rash_project/test_images/Atopic_Dermatitis/*.jpg')
for files in test_files:
image = cv2.imread(files)
test.append(image)
test_label.append(1)
test_files_2 = glob.glob('/Users/filmer2002/Desktop/real_rash_project/test_images/Contact_Dermatitis/*.jpg')
for files in test_files_2:
image = cv2.imread(files)
test.append(image)
test_label.append(0)
test_array = np.array(test, dtype=int)
test_label_array = np.array(test_array, dtype=int)
from keras.models import Sequential
from keras.layers import Dense, Conv2D, Flatten
from keras.utils import to_categorical
x_train = train_array
y_train = train_label_array
x_test = test_array
y_test = test_label_array
x_train = x_train.reshape(373, 128, 128, 3)
x_test = x_test.reshape(95, 128, 128, 3)
model = Sequential()
model.add(Conv2D(64, kernel_size = 3, activation = 'relu', input_shape = (128, 128, 3)))
model.add(Conv2D(32, kernel_size = 3, activation = 'relu'))
model.add(Conv2D(16, kernel_size = 3, activation = 'relu'))
model.add(Flatten())
model.add(Dense(10, activation = 'softmax'))
model.compile(optimizer = 'adam',
loss = 'categorical_crossentropy',
metrics = ['accuracy'])
model.fit(x_train, to_categorical(y_train, 10), validation_data = (x_test, to_categorical(y_test, 10)), epochs = 3)
len given to to_categorical in this case is 10 because your output layer has shape == (10,)
Also make sure that you have equal amount of images and labels for them, you cen test that by:
x_train.shape[0] == to_categorical(y_train, 10).shape[0]
True #should be True if everything is ok
And one final note: tensorflow works with tensors i. e. n dimensional arrays, don't use lists when working with training data, numpy arrays should be used most of the time.
EDIT:
Only 1 dimensional arrays or lists consisting of int values are ok for to_categorical().
Don't try to use lists consisting of [0, 1] in to_categorical(), it won't work, just use 1 to_categorical() will convert it for you into [0, 1] elements.
Your mistake was to use lists as labels for to_categorical(), to_categorical() only accepts 1 dimensional int arrays as it's first parameter, the shape you where giving it was (number_of_labels, 2), when it only works with shape == (number_of_labels,).
OR
If you want to use lists/arrays consisting of elements like [0, 1] and your model output shape is 2, then just don't use to_categorical() at all.
This should work, and if it doesn't you have inconsistent data.
import cv2
import glob
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import tensorflow.keras as keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Conv2D, Flatten
from tensorflow.keras.utils import to_categorical
#assert tf.__version__ == '1.14.0'
train = np.zeros((1, 128, 128, 3))
train_label = []
train_files_1 = glob.glob('/path/to/your/folder/with/data/*.jpg')
for files in train_files_1:
image = cv2.imread(files)
img = np.array(image).astype(int).reshape(128, 128, 3)
train = np.concatenate((train, [img]), axis=0)
train_label.append(1)
train_files_2 = glob.glob('/path/to/your/folder/with/data/*.jpg')
for files in train_files_2:
image = cv2.imread(files)
img = np.array(image).astype(int)
train = np.concatenate((train, [img]), axis=0)
train_label.append(0)
x_train = train[1:]
y_train = to_categorical(train_label, 2)
test = np.zeros((1, 128, 128, 3))
test_label = []
test_files = glob.glob('/path/to/your/folder/with/data/*.jpg')
for files in test_files:
image = cv2.imread(files)
img = np.array(image).astype(int)
test = np.concatenate((test, [img]), axis=0)
test_label.append(1)
test_files_2 = glob.glob('/path/to/your/folder/with/data/*.jpg')
for files in test_files_2:
image = cv2.imread(files)
img = np.array(image).astype(int)
test = np.concatenate((test, [img]), axis=0)
test_label.append(0)
x_test = test[1:]
y_test = to_categorical(test_label, 2)
print ('train', x_train.shape)
print ('test', x_test.shape)
print ('test labels', y_test.shape)
print ('train labels', y_train.shape)
assert x_train.shape == (len(train[1:]), 128, 128, 3)
assert x_test.shape == (len(test[1:]), 128, 128, 3)
assert y_train.shape == (len(train_label), 2)
assert y_test.shape == (len(test_label), 2)
# if you get an error about one of the asserts above => your data is not consistant
model = Sequential()
model.add(Conv2D(64, kernel_size=3, activation='relu', input_shape=(128, 128, 3)))
model.add(Conv2D(32, kernel_size=3, activation='relu'))
model.add(Conv2D(16, kernel_size=3, activation='relu'))
model.add(Flatten())
model.add(Dense(2, activation='softmax'))
model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
model.fit(x_train, y_train, validation_data=(x_test, y_test), epochs=3)
print ('all good, all done!')
Note: mixing python lists and numpy arrays is very bad idea

Store images into multiply array and use it to train model

I get this error when train the model:
ValueError: Error when checking target:
expected dropout_5 to have shape (33,) but got array with shape (1,).
I want to store my images into 33 array from folder using path. I have categories the images into different folder which were 1,2,3,4,5...
I have use this code to do it but i dont know how to store it into different array. Can someone help me.
datadir = 'C:/Users/user/Desktop/RESIZE' #path of the folder
categories = ['1','2','3','4','5','6','7','8','9','A','B','C','D','E','F','G','H','I','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y']
img_rows, img_cols = 100, 100
training_data = []
for category in categories:
path = os.path.join(datadir,category)
class_num = categories.index(category)
for img in os.listdir(path):
img_array = cv2.imread(os.path.join(path,img),cv2.IMREAD_GRAYSCALE)
new_array = cv2.resize(img_array,(img_rows,img_cols))
training_data.append([new_array,class_num])
random.shuffle(training_data)
X = []
y = []
for features, label in training_data:
X.append(features)
y.append(label)
X = np.array(X).reshape(-1,img_rows,img_cols,1)
X = X.astype("float32")
pickle_out = open("X.pickle","wb")
pickle.dump(X,pickle_out)
pickle_out.close()
pickle_out = open("y.pickle","wb")
pickle.dump(y,pickle_out)
pickle_out.close()
After I save the file, then I use this code to train model and I want get 33 output layer but it only can work when my output layer(Dense) set 1.
I got this error:
ValueError: Error when checking target:
expected dropout_5 to have shape (33,) but got array with shape (1,)
Here was my training code.
import tensorflow as tf
from keras import optimizers
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout
from keras.layers import Flatten
from keras.layers.convolutional import Conv2D
from keras.layers.convolutional import MaxPooling2D
from keras.layers import Activation
import cv2
import os
import numpy as np
import pickle
from sklearn.utils import shuffle
X = pickle.load(open("X.pickle","rb"))
y = pickle.load(open("y.pickle","rb"))
X = X/255.0
model = Sequential()
model.add(Conv2D(32,(3,3), input_shape = X.shape[1:]))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size = (2,2)))
model.add(Dropout(0.25))
model.add(Conv2D(64,(3,3)))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size = (2,2)))
model.add(Dropout(0.25))
model.add(Conv2D(128,(3,3)))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size = (2,2)))
model.add(Dropout(0.4))
model.add(Dense(128))
model.add(Activation("relu"))
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(33, activation='softmax'))
model.add(Dropout(0.4))
model.compile(loss = "binary_crossentropy", optimizer = "adam", metrics = ["accuracy"])
model.fit(X, y, batch_size = 2, epochs = 1, validation_split = 0.2)
You need to change your y as one hot encoded data to do the training.
Try this for y,
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
# integer encode
label_encoder = LabelEncoder()
integer_encoded = label_encoder.fit_transform(y)
print(label_encoder.classes_) # This is your classes.
print(integer_encoded.shape())
# binary encode
onehot_encoder = OneHotEncoder(sparse=False)
integer_encoded = integer_encoded.reshape(len(integer_encoded), 1)
onehot_encoded = onehot_encoder.fit_transform(integer_encoded)
print(onehot_encoded.shape())
And one more thing, if you want to classify for 33 class than change your loss to categorical_crossentropy.

image input to neural networks

I am doing figure extractor from scanned documents. using 1100x850 images. we use 44x34 grids of image. so that last layer will be 1496 fully connected layer.
label is 44x34 BINARY array which is 1 for figure rigion and 0 for non figure region. i.e if figure falls within (top right) (x,y)=(0,0) (bottom left) (x,y)=(50,50) then bin array has 1 at (0,0) (0,1) and (1,0) (1,1) these positions and rest 0s. so i have buit a neural network model. following is the structure.
conv(5,2,48)
maxpool(3,2)
conv(5,2,96)
maxpool(3,2)
conv(5,2,96)
maxpool(3,2)
FC-1496
The notation conv(k,d, n) denotes a convolutional layer with n filters, each of size k × k, applied with a shift of d pixels; maxpool(k, d) denotes a downsampling operation over k×k windows, applied with a shift of d pixels. FC-1496 refers to the final fully connected
layer which connects the hidden units from the previous layers to the 1496 output units (we have 1496 units for a 44x34 grid).
So my question is how to feed input ( images and labels (array) ) to this model using keras and tensor flow.
here is the model code
from keras.layers.convolutional import Conv2D
from keras.layers.convolutional import MaxPooling2D
from keras.layers.core import Activation
from keras.layers.core import Dense
from keras.models import Sequential
from keras.layers import Flatten
xtrain=#image of 850*1100 for 10 images 10 850*1100
xtest=#binary array of size 1496 for 10 images size is 10*1496
# initialize the model
model = Sequential()
model.add(Conv2D(48, 5, 2, input_shape=(1100, 850, 1)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(3, 3), strides=2))
model.add(Conv2D(96, 5, 2))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(3, 3), strides=2))
model.add(Conv2D(96, 5, 2))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(3, 3), strides=2))
model.add(Flatten())
model.add(Dense(1496, activation='sigmoid'))
model.compile(loss='mean_squared_error', optimizer='sgd', metrics=['accuracy'])
here is a working example based on your data (as I assume from your info)
I have using the label as a vector of 1 and 0 e.g [1,0,1,1,...] 1 for figure region and 0 for none figure region, for a total of 1496 regions
from __future__ import print_function
import numpy as np
np.random.seed(1337) # for reproducibility
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Activation, Flatten
from keras.utils import np_utils
batch_size = 128
nb_epoch = 10
nb_regions = 1496
# input image dimensions
img_rows, img_cols = 850, 1100
# create random test and train sets
X_train = np.random.randint(256, size=(10, img_rows, img_cols))
Y_train = np.random.randint(2, size=(10, nb_regions))
X_test = np.random.randint(256, size=(10, img_rows, img_cols))
Y_test = np.random.randint(2, size=(10, nb_regions))
X_train = X_train.reshape(X_train.shape[0], 1, img_rows, img_cols)
X_test = X_test.reshape(X_test.shape[0], 1, img_rows, img_cols)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
model = Sequential([
Dense(32, input_shape=(1, img_rows, img_cols)),
Activation('relu'),
Flatten(),
Dense(nb_regions),
Activation('softmax'),
])
model.compile(loss='categorical_crossentropy',
optimizer='adadelta',
metrics=['accuracy'])
model.fit(X_train, Y_train, batch_size=batch_size, epochs=nb_epoch,
verbose=1, validation_data=(X_test, Y_test))
score = model.evaluate(X_test, Y_test, verbose=0)
print('Test score:', score[0])
print('Test accuracy:', score[1])

Categories

Resources