I copied some sample code straight from Keras official website and edited it to make a machine learning model.
I am using Google Colab for my code.
Link: https://keras.io/examples/vision/image_classification_from_scratch/
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from google.colab import drive
drive.mount("/content/gdrive")
image_size = (50, 50)
batch_size = 400
import random
num = random.randint(1, 400)
#random seed
train_ds = tf.keras.preprocessing.image_dataset_from_directory(
"/content/gdrive/My Drive/pest/train",
seed=num,
image_size=image_size,
batch_size=batch_size,
)
val_ds = tf.keras.preprocessing.image_dataset_from_directory(
"/content/gdrive/My Drive/pest/test",
seed=num,
image_size=image_size,
batch_size=batch_size,
)
#tried data augmentation
data_augmentation = keras.Sequential(
[
layers.RandomFlip("horizontal"),
layers.RandomRotation(0.1),
]
)
def make_model(input_shape, num_classes):
inputs = keras.Input(shape=input_shape)
# Image augmentation block
x = data_augmentation(inputs)
# Entry block
x = layers.Rescaling(1.0 / 255)(x)
x = layers.Conv2D(32, 3, strides=2, padding="same")(x)
x = layers.BatchNormalization()(x)
x = layers.Activation("relu")(x)
x = layers.Conv2D(64, 3, padding="same")(x)
x = layers.BatchNormalization()(x)
x = layers.Activation("relu")(x)
previous_block_activation = x # Set aside residual
for size in [128, 256, 512, 728]:
x = layers.Activation("relu")(x)
x = layers.SeparableConv2D(size, 3, padding="same")(x)
x = layers.BatchNormalization()(x)
x = layers.Activation("relu")(x)
x = layers.SeparableConv2D(size, 3, padding="same")(x)
x = layers.BatchNormalization()(x)
x = layers.MaxPooling2D(3, strides=2, padding="same")(x)
# Project residual
residual = layers.Conv2D(size, 1, strides=2, padding="same")(
previous_block_activation
)
x = layers.add([x, residual]) # Add back residual
previous_block_activation = x # Set aside next residual
x = layers.SeparableConv2D(1024, 3, padding="same")(x)
x = layers.BatchNormalization()(x)
x = layers.Activation("relu")(x)
x = layers.GlobalAveragePooling2D()(x)
if num_classes == 2:
activation = "sigmoid"
units = 1
else:
activation = "softmax"
units = num_classes
x = layers.Dropout(0.5)(x)
outputs = layers.Dense(units, activation=activation)(x)
return keras.Model(inputs, outputs)
model = make_model(input_shape=image_size + (3,), num_classes=2)
keras.utils.plot_model(model, show_shapes=True)
epochs = 50
callbacks = [
keras.callbacks.ModelCheckpoint("save_at_{epoch}.h5"),
]
model.compile(
optimizer=keras.optimizers.Adam(1e-3),
loss="binary_crossentropy",
metrics=["accuracy"],
)
model.fit(
train_ds, epochs=epochs, callbacks=callbacks, validation_data=val_ds,
#should have automatic shuffling I think
)
However, when I run it, the result is
Epoch 1/50
2/2 [==============================] - 71s 14s/step - loss: 0.6260 - accuracy: 0.6050 - val_loss: 0.6931 - val_accuracy: 0.5000
Epoch 2/50
2/2 [==============================] - 2s 507ms/step - loss: 0.2689 - accuracy: 0.8867 - val_loss: 0.6932 - val_accuracy: 0.5000
Epoch 3/50
2/2 [==============================] - 2s 536ms/step - loss: 0.1241 - accuracy: 0.9483 - val_loss: 0.6932 - val_accuracy: 0.5000
Epoch 4/50
2/2 [==============================] - 2s 506ms/step - loss: 0.0697 - accuracy: 0.9750 - val_loss: 0.6934 - val_accuracy: 0.5000
Epoch 5/50
2/2 [==============================] - 2s 525ms/step - loss: 0.0479 - accuracy: 0.9867 - val_loss: 0.6936 - val_accuracy: 0.5000
Epoch 6/50
2/2 [==============================] - 2s 534ms/step - loss: 0.0359 - accuracy: 0.9867 - val_loss: 0.6940 - val_accuracy: 0.5000
Epoch 7/50
2/2 [==============================] - 2s 509ms/step - loss: 0.0145 - accuracy: 0.9983 - val_loss: 0.6946 - val_accuracy: 0.5000
Epoch 8/50
2/2 [==============================] - 2s 545ms/step - loss: 0.0124 - accuracy: 0.9967 - val_loss: 0.6954 - val_accuracy: 0.5000
Epoch 9/50
2/2 [==============================] - 2s 544ms/step - loss: 0.0092 - accuracy: 0.9967 - val_loss: 0.6964 - val_accuracy: 0.5000
Epoch 10/50
2/2 [==============================] - 2s 512ms/step - loss: 0.0060 - accuracy: 0.9967 - val_loss: 0.6980 - val_accuracy: 0.5000
Epoch 11/50
2/2 [==============================] - 2s 535ms/step - loss: 0.0036 - accuracy: 0.9983 - val_loss: 0.6998 - val_accuracy: 0.5000
Epoch 12/50
2/2 [==============================] - 2s 503ms/step - loss: 0.0085 - accuracy: 0.9983 - val_loss: 0.7020 - val_accuracy: 0.5000
Epoch 13/50
2/2 [==============================] - 2s 665ms/step - loss: 0.0040 - accuracy: 1.0000 - val_loss: 0.7046 - val_accuracy: 0.5000
Epoch 14/50
2/2 [==============================] - 2s 516ms/step - loss: 0.0017 - accuracy: 1.0000 - val_loss: 0.7078 - val_accuracy: 0.5000
Epoch 15/50
2/2 [==============================] - 2s 520ms/step - loss: 0.0023 - accuracy: 0.9983 - val_loss: 0.7115 - val_accuracy: 0.5000
Epoch 16/50
2/2 [==============================] - 2s 500ms/step - loss: 8.5606e-04 - accuracy: 1.0000 - val_loss: 0.7157 - val_accuracy: 0.5000
Epoch 17/50
2/2 [==============================] - 2s 524ms/step - loss: 0.0018 - accuracy: 1.0000 - val_loss: 0.7205 - val_accuracy: 0.5000
Epoch 18/50
2/2 [==============================] - 2s 499ms/step - loss: 9.0626e-04 - accuracy: 1.0000 - val_loss: 0.7258 - val_accuracy: 0.5000
Epoch 19/50
2/2 [==============================] - 2s 510ms/step - loss: 0.0014 - accuracy: 1.0000 - val_loss: 0.7313 - val_accuracy: 0.5000
Epoch 20/50
2/2 [==============================] - 2s 711ms/step - loss: 0.0013 - accuracy: 1.0000 - val_loss: 0.7371 - val_accuracy: 0.5000
Epoch 21/50
2/2 [==============================] - 2s 511ms/step - loss: 9.9904e-04 - accuracy: 1.0000 - val_loss: 0.7431 - val_accuracy: 0.5000
Epoch 22/50
2/2 [==============================] - 2s 540ms/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 0.7489 - val_accuracy: 0.5000
Epoch 23/50
2/2 [==============================] - 2s 513ms/step - loss: 4.9861e-04 - accuracy: 1.0000 - val_loss: 0.7553 - val_accuracy: 0.5000
Epoch 24/50
2/2 [==============================] - 2s 542ms/step - loss: 6.6248e-04 - accuracy: 1.0000 - val_loss: 0.7622 - val_accuracy: 0.5000
Epoch 25/50
2/2 [==============================] - 2s 510ms/step - loss: 7.7911e-04 - accuracy: 1.0000 - val_loss: 0.7699 - val_accuracy: 0.5000
Epoch 26/50
2/2 [==============================] - 2s 502ms/step - loss: 3.3703e-04 - accuracy: 1.0000 - val_loss: 0.7781 - val_accuracy: 0.5000
Epoch 27/50
2/2 [==============================] - 2s 539ms/step - loss: 3.7860e-04 - accuracy: 1.0000 - val_loss: 0.7870 - val_accuracy: 0.5000
Epoch 28/50
2/2 [==============================] - 2s 507ms/step - loss: 2.4852e-04 - accuracy: 1.0000 - val_loss: 0.7962 - val_accuracy: 0.5000
Epoch 29/50
2/2 [==============================] - 2s 512ms/step - loss: 1.7709e-04 - accuracy: 1.0000 - val_loss: 0.8058 - val_accuracy: 0.5000
Epoch 30/50
2/2 [==============================] - 2s 538ms/step - loss: 1.6884e-04 - accuracy: 1.0000 - val_loss: 0.8161 - val_accuracy: 0.5000
Epoch 31/50
2/2 [==============================] - 2s 521ms/step - loss: 2.0884e-04 - accuracy: 1.0000 - val_loss: 0.8266 - val_accuracy: 0.5000
Epoch 32/50
2/2 [==============================] - 2s 543ms/step - loss: 1.8691e-04 - accuracy: 1.0000 - val_loss: 0.8375 - val_accuracy: 0.5000
Epoch 33/50
2/2 [==============================] - 2s 520ms/step - loss: 1.7296e-04 - accuracy: 1.0000 - val_loss: 0.8487 - val_accuracy: 0.5000
Epoch 34/50
2/2 [==============================] - 2s 516ms/step - loss: 4.5739e-04 - accuracy: 1.0000 - val_loss: 0.8601 - val_accuracy: 0.5000
Epoch 35/50
2/2 [==============================] - 2s 530ms/step - loss: 9.6831e-05 - accuracy: 1.0000 - val_loss: 0.8720 - val_accuracy: 0.5000
Epoch 36/50
2/2 [==============================] - 2s 553ms/step - loss: 1.2694e-04 - accuracy: 1.0000 - val_loss: 0.8847 - val_accuracy: 0.5000
Epoch 37/50
2/2 [==============================] - 2s 514ms/step - loss: 8.6252e-05 - accuracy: 1.0000 - val_loss: 0.8977 - val_accuracy: 0.5000
Epoch 38/50
2/2 [==============================] - 2s 520ms/step - loss: 2.6762e-04 - accuracy: 1.0000 - val_loss: 0.9115 - val_accuracy: 0.5000
Epoch 39/50
2/2 [==============================] - 2s 542ms/step - loss: 8.1350e-05 - accuracy: 1.0000 - val_loss: 0.9258 - val_accuracy: 0.5000
Epoch 40/50
2/2 [==============================] - 2s 506ms/step - loss: 8.0961e-05 - accuracy: 1.0000 - val_loss: 0.9405 - val_accuracy: 0.5000
Epoch 41/50
2/2 [==============================] - 2s 526ms/step - loss: 6.6102e-05 - accuracy: 1.0000 - val_loss: 0.9555 - val_accuracy: 0.5000
Epoch 42/50
2/2 [==============================] - 2s 549ms/step - loss: 1.1529e-04 - accuracy: 1.0000 - val_loss: 0.9707 - val_accuracy: 0.5000
Epoch 43/50
2/2 [==============================] - 2s 528ms/step - loss: 6.1373e-05 - accuracy: 1.0000 - val_loss: 0.9864 - val_accuracy: 0.5000
Epoch 44/50
2/2 [==============================] - 2s 516ms/step - loss: 7.2809e-05 - accuracy: 1.0000 - val_loss: 1.0025 - val_accuracy: 0.5000
Epoch 45/50
2/2 [==============================] - 2s 513ms/step - loss: 5.9504e-05 - accuracy: 1.0000 - val_loss: 1.0191 - val_accuracy: 0.5000
Epoch 46/50
2/2 [==============================] - 2s 515ms/step - loss: 6.1622e-05 - accuracy: 1.0000 - val_loss: 1.0361 - val_accuracy: 0.5000
Epoch 47/50
2/2 [==============================] - 2s 525ms/step - loss: 7.7296e-05 - accuracy: 1.0000 - val_loss: 1.0534 - val_accuracy: 0.5000
Epoch 48/50
2/2 [==============================] - 2s 512ms/step - loss: 4.5088e-05 - accuracy: 1.0000 - val_loss: 1.0711 - val_accuracy: 0.5000
Epoch 49/50
2/2 [==============================] - 2s 532ms/step - loss: 1.1449e-04 - accuracy: 1.0000 - val_loss: 1.0887 - val_accuracy: 0.5000
Epoch 50/50
2/2 [==============================] - 2s 516ms/step - loss: 6.0932e-05 - accuracy: 1.0000 - val_loss: 1.1071 - val_accuracy: 0.5000
<keras.callbacks.History at 0x7fb4205a20d0>
Since I have 2 classes, my teacher said that a validation accuracy of 0.5 means that it is completely random.
My images are in the format of 50x50 .jpg images in Google Drive. Could that be the problem as my current image size is 50x50? But when I run
import matplotlib.pyplot as plt
plt.figure(figsize=(10, 10))
for images, labels in train_ds.take(1):
for i in range(9):
ax = plt.subplot(3, 3, i + 1)
plt.imshow(images[i].numpy().astype("uint8"))
plt.title(int(labels[i]))
plt.axis("off")
The images are correct, as in the entire image is shown and is clear.
I tried changing the seed to a random number. The code comes with data augmentation and the model.fit() should automatically shuffle the images (if I understood the online sites correctly).
My teacher does not know what is wrong either. Any solutions?
Edit: this is the dataset
https://www.kaggle.com/datasets/simranvolunesia/pest-dataset
Edit2: Sorry for the confusion but I only used two datasets, aphids and bollworm.
Edit: You are also using binary_crossentropy for a multi-class classification problem, yet you're forcing it to only have two classes when your passed dataset contains nine.
model = make_model(input_shape=image_size + (3,), num_classes=2)
According to your dataset, the classes are:
Pests: aphids, armyworm, beetle, bollworm, grasshopper, mites, mosquito, sawfly, stem borer
I don't see where you're only working with two classes, unless there's some code missing somewhere that removes the other seven. This site (https://keras.io/examples/vision/image_classification_from_scratch/) is classifying into two classes: cat or dog. That's probably where you got two classes from.
So that line needs to be changed to:
model = make_model(input_shape=image_size + (3,), num_classes=9)
Change this:
model.compile(
optimizer=keras.optimizers.Adam(1e-3),
loss="binary_crossentropy",
metrics=["accuracy"],
)
To:
model.compile(
optimizer=keras.optimizers.Adam(1e-3),
loss="sparse_categorical_crossentropy",
metrics=["accuracy"],
)
You might also need to change that metric from accuracy to binary_accuracy. Try with just accuracy first, then with binary_accuracy.
model.compile(
optimizer=keras.optimizers.Adam(1e-3),
loss="sparse_categorical_crossentropy",
metrics=["binary_accuracy"],
According to the documentation, you are not splitting your validation data correctly and probably dealing with the default shuffling too.
Define your datasets like this (assuming a 20% validation split):
train_ds = tf.keras.preprocessing.image_dataset_from_directory(
"/content/gdrive/My Drive/pest/train",
validation_split=0.2,
subset="training"
seed=num,
image_size=image_size,
batch_size=batch_size,
)
val_ds = tf.keras.preprocessing.image_dataset_from_directory(
"/content/gdrive/My Drive/pest/train",
validation_split=0.2,
subset="validation"
seed=num,
image_size=image_size,
batch_size=batch_size,
)
# with test folder for test set
test_ds = tf.keras.preprocessing.image_dataset_from_directory(
"/content/gdrive/My Drive/pest/test",
image_size=image_size,
batch_size=batch_size,
shuffle=False
)
the answer provided by Djinn is correct. Also your are including augmentation WITHIN your model. So your model will augment not only the training images but the validation and test images as well. Test and Validation images should not be augmented. If you want augmentation then use the ImageDataGenerator.flow_from_directory. Documentation for that is here
I’ve found the problem. It is because I have quite few images (only 300 in each class), my batch size is too big. val_accuracy is around 0.8 to 0.9 after changing the batch size to 8. Thanks everyone for the answers!
Related
I'm currently working on a neural network project, and I need some help understanding the relationships between parameters and the values my neural network is outputing.
My goal is to train a LSTM neural network to detect stress in speech. I'm using a dataset divided into audios of neutral voices and audios of voices under stress. In order to classify which audios contain stress, I'm extracting relevant features from the voices each frame, and then feeding this information into the LSTM neural network.
Since I'm extracting features by frame, the extraction output from audio files with different lenghts also have different lenghts, proportionally to the audio duration. To normalize the neural networks inputs, I'm using a padding technique, which consists in adding zeroes to the end of each extracted features set to meet the biggest set size.
So, for example, if I have 3 audio files, each with these durations: 4, 5, 6 seconds, the extracted features set from the first two audios would be padded with zeroes to meet the third audio extracted set length.
A padded features set looks like this:
[
[9.323346e+00, 9.222625e+00, 8.910659e+00],
[8.751126e+00, 8.432300e+00, 8.046866e+00],
...
[7.439109e+00, 7.380966e+00, 6.092496e+00],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]
]
The whole dataset dimensions are as follows: (number of audio files) x (number of frames in biggest audio file) x (number of features)
I divided my dataset into a training set, a validation set and a test set. Currently, I have audio files from two public databases, one set with 576 audio files (288 non-stressed, 288 stressed) and other with 240 files (120 non-stressed, 120 stressed).
The following code shows my LSTM implementation using Keras:
N_HIDDEN_CELLS = 100
LEARNING_RATE = 0.00005
BATCH_SIZE = 32
EPOCHS_N = 30
ACTIVATION_FUNCTION = 'softmax'
LOSS_FUNCTION = 'binary_crossentropy'
def create_model(input_shape)
model = keras.Sequential()
model.add(keras.layers.LSTM(N_HIDDEN_CELLS, input_shape=input_shape, return_sequences=True))
model.add(keras.layers.LSTM(N_HIDDEN_CELLS, return_sequences=True))
model.add(keras.layers.LSTM(N_HIDDEN_CELLS, return_sequences=True))
model.add(keras.layers.Dropout(0.3))
model.add(keras.layers.LSTM(2, activation=ACTIVATION_FUNCTION))
return model
def prepare_datasets(data, labels, test_size, validation_size):
X_train, X_test, y_train, y_test = train_test_split(data, labels, test_size=test_size)
X_train, X_validation, y_train, y_validation = train_test_split(X_train, y_train, test_size=validation_size)
return X_train, X_validation, X_test, y_train, y_validation, y_test
X_train, X_validation, X_test, y_train, y_validation, y_test = prepare_datasets(data, labels, 0.25, 0.2)
input_shape = (X_train.shape[1], X_train.shape[2])
model = create_model(input_shape)
optimizer = keras.optimizers.Adam(learning_rate=LEARNING_RATE)
model.compile(optimizer=optimizer, loss=LOSS_FUNCTION, metrics=['accuracy'])
model.summary()
history = model.fit(X_train, y_train, validation_data=(X_validation, y_validation), batch_size=BATCH_SIZE, epochs=EPOCHS_N)
test_loss, test_acc = model.evaluate(X_test, y_test, verbose=2)
After various tests and executions, I'm not so confident about my network performance. At first, the validation loss values were all over the place, varying a lot and not converging at all. With some adjustments to parameters, I ended up with the values in the code above. Still, I'm not that confident, mainly because the validation loss starts to vary after epoch 15 (more or less). In the first epochs, both training and validation losses fall accordingly to expectations, but after some epochs, the training loss keeps falling and the validation loss starts to vary and rise.
Below are two executions of the same network (with the same parameters as the code provided) and same dataset (the one with 576 audio files):
Epoch 1/30
11/11 [==============================] - 5s 194ms/step - loss: 0.8493 - accuracy: 0.4934 - val_loss: 0.8436 - val_accuracy: 0.4943
Epoch 2/30
11/11 [==============================] - 1s 123ms/step - loss: 0.8398 - accuracy: 0.5271 - val_loss: 0.8364 - val_accuracy: 0.4943
Epoch 3/30
11/11 [==============================] - 1s 124ms/step - loss: 0.8291 - accuracy: 0.6015 - val_loss: 0.8277 - val_accuracy: 0.4828
Epoch 4/30
11/11 [==============================] - 1s 128ms/step - loss: 0.8187 - accuracy: 0.6022 - val_loss: 0.8159 - val_accuracy: 0.5402
Epoch 5/30
11/11 [==============================] - 1s 124ms/step - loss: 0.8017 - accuracy: 0.6691 - val_loss: 0.8002 - val_accuracy: 0.5862
Epoch 6/30
11/11 [==============================] - 1s 123ms/step - loss: 0.7754 - accuracy: 0.7081 - val_loss: 0.7750 - val_accuracy: 0.6322
Epoch 7/30
11/11 [==============================] - 1s 124ms/step - loss: 0.7455 - accuracy: 0.7168 - val_loss: 0.7391 - val_accuracy: 0.6092
Epoch 8/30
11/11 [==============================] - 1s 130ms/step - loss: 0.7017 - accuracy: 0.7287 - val_loss: 0.6896 - val_accuracy: 0.6437
Epoch 9/30
11/11 [==============================] - 1s 125ms/step - loss: 0.6519 - accuracy: 0.7210 - val_loss: 0.6311 - val_accuracy: 0.6897
Epoch 10/30
11/11 [==============================] - 1s 129ms/step - loss: 0.5613 - accuracy: 0.7817 - val_loss: 0.5935 - val_accuracy: 0.7356
Epoch 11/30
11/11 [==============================] - 1s 123ms/step - loss: 0.5050 - accuracy: 0.7789 - val_loss: 0.5645 - val_accuracy: 0.7471
Epoch 12/30
11/11 [==============================] - 1s 123ms/step - loss: 0.4612 - accuracy: 0.8098 - val_loss: 0.5127 - val_accuracy: 0.7356
Epoch 13/30
11/11 [==============================] - 1s 127ms/step - loss: 0.4117 - accuracy: 0.8301 - val_loss: 0.4848 - val_accuracy: 0.7931
Epoch 14/30
11/11 [==============================] - 1s 128ms/step - loss: 0.3857 - accuracy: 0.8479 - val_loss: 0.4609 - val_accuracy: 0.7816
Epoch 15/30
11/11 [==============================] - 1s 122ms/step - loss: 0.3392 - accuracy: 0.8724 - val_loss: 0.4467 - val_accuracy: 0.8276
Epoch 16/30
11/11 [==============================] - 1s 118ms/step - loss: 0.3140 - accuracy: 0.8901 - val_loss: 0.4462 - val_accuracy: 0.8161
Epoch 17/30
11/11 [==============================] - 1s 125ms/step - loss: 0.2775 - accuracy: 0.9092 - val_loss: 0.4619 - val_accuracy: 0.8046
Epoch 18/30
11/11 [==============================] - 1s 128ms/step - loss: 0.2963 - accuracy: 0.8873 - val_loss: 0.3995 - val_accuracy: 0.8621
Epoch 19/30
11/11 [==============================] - 1s 122ms/step - loss: 0.2663 - accuracy: 0.9141 - val_loss: 0.4364 - val_accuracy: 0.8276
Epoch 20/30
11/11 [==============================] - 1s 120ms/step - loss: 0.2415 - accuracy: 0.9368 - val_loss: 0.4758 - val_accuracy: 0.8276
Epoch 21/30
11/11 [==============================] - 1s 121ms/step - loss: 0.2209 - accuracy: 0.9297 - val_loss: 0.3855 - val_accuracy: 0.8276
Epoch 22/30
11/11 [==============================] - 1s 121ms/step - loss: 0.1605 - accuracy: 0.9676 - val_loss: 0.3658 - val_accuracy: 0.8621
Epoch 23/30
11/11 [==============================] - 1s 126ms/step - loss: 0.1618 - accuracy: 0.9641 - val_loss: 0.3638 - val_accuracy: 0.8506
Epoch 24/30
11/11 [==============================] - 1s 129ms/step - loss: 0.1309 - accuracy: 0.9728 - val_loss: 0.4450 - val_accuracy: 0.8276
Epoch 25/30
11/11 [==============================] - 1s 125ms/step - loss: 0.2014 - accuracy: 0.9394 - val_loss: 0.3439 - val_accuracy: 0.8621
Epoch 26/30
11/11 [==============================] - 1s 126ms/step - loss: 0.1342 - accuracy: 0.9554 - val_loss: 0.3356 - val_accuracy: 0.8851
Epoch 27/30
11/11 [==============================] - 1s 125ms/step - loss: 0.1555 - accuracy: 0.9618 - val_loss: 0.3486 - val_accuracy: 0.8736
Epoch 28/30
11/11 [==============================] - 1s 124ms/step - loss: 0.1346 - accuracy: 0.9659 - val_loss: 0.3208 - val_accuracy: 0.9080
Epoch 29/30
11/11 [==============================] - 1s 127ms/step - loss: 0.1193 - accuracy: 0.9697 - val_loss: 0.3706 - val_accuracy: 0.8851
Epoch 30/30
11/11 [==============================] - 1s 123ms/step - loss: 0.0836 - accuracy: 0.9777 - val_loss: 0.3623 - val_accuracy: 0.8621
5/5 - 0s - loss: 0.4383 - accuracy: 0.8472
Test accuracy: 0.8472222089767456
Test loss: 0.43826407194137573
1st execution val_loss x train_loss graph
Epoch 1/30
11/11 [==============================] - 5s 190ms/step - loss: 0.8297 - accuracy: 0.5306 - val_loss: 0.8508 - val_accuracy: 0.4138
Epoch 2/30
11/11 [==============================] - 1s 123ms/step - loss: 0.8138 - accuracy: 0.5460 - val_loss: 0.8355 - val_accuracy: 0.4713
Epoch 3/30
11/11 [==============================] - 1s 120ms/step - loss: 0.8082 - accuracy: 0.5384 - val_loss: 0.8145 - val_accuracy: 0.5402
Epoch 4/30
11/11 [==============================] - 1s 118ms/step - loss: 0.7997 - accuracy: 0.5799 - val_loss: 0.7911 - val_accuracy: 0.5517
Epoch 5/30
11/11 [==============================] - 1s 117ms/step - loss: 0.7752 - accuracy: 0.6585 - val_loss: 0.7654 - val_accuracy: 0.5862
Epoch 6/30
11/11 [==============================] - 1s 125ms/step - loss: 0.7527 - accuracy: 0.6609 - val_loss: 0.7289 - val_accuracy: 0.6437
Epoch 7/30
11/11 [==============================] - 1s 121ms/step - loss: 0.7129 - accuracy: 0.7432 - val_loss: 0.6790 - val_accuracy: 0.6782
Epoch 8/30
11/11 [==============================] - 1s 125ms/step - loss: 0.6570 - accuracy: 0.7707 - val_loss: 0.6107 - val_accuracy: 0.7356
Epoch 9/30
11/11 [==============================] - 1s 125ms/step - loss: 0.6112 - accuracy: 0.7513 - val_loss: 0.5529 - val_accuracy: 0.7586
Epoch 10/30
11/11 [==============================] - 1s 129ms/step - loss: 0.5339 - accuracy: 0.8026 - val_loss: 0.4895 - val_accuracy: 0.7816
Epoch 11/30
11/11 [==============================] - 1s 120ms/step - loss: 0.4720 - accuracy: 0.8189 - val_loss: 0.4579 - val_accuracy: 0.8046
Epoch 12/30
11/11 [==============================] - 1s 121ms/step - loss: 0.4332 - accuracy: 0.8527 - val_loss: 0.4169 - val_accuracy: 0.8046
Epoch 13/30
11/11 [==============================] - 1s 122ms/step - loss: 0.3976 - accuracy: 0.8568 - val_loss: 0.3850 - val_accuracy: 0.7931
Epoch 14/30
11/11 [==============================] - 1s 124ms/step - loss: 0.3489 - accuracy: 0.8726 - val_loss: 0.3753 - val_accuracy: 0.8046
Epoch 15/30
11/11 [==============================] - 1s 124ms/step - loss: 0.3088 - accuracy: 0.9020 - val_loss: 0.3562 - val_accuracy: 0.8161
Epoch 16/30
11/11 [==============================] - 1s 124ms/step - loss: 0.3489 - accuracy: 0.8745 - val_loss: 0.3501 - val_accuracy: 0.8391
Epoch 17/30
11/11 [==============================] - 1s 130ms/step - loss: 0.2725 - accuracy: 0.9240 - val_loss: 0.3436 - val_accuracy: 0.8506
Epoch 18/30
11/11 [==============================] - 1s 121ms/step - loss: 0.3494 - accuracy: 0.8764 - val_loss: 0.3516 - val_accuracy: 0.8506
Epoch 19/30
11/11 [==============================] - 1s 119ms/step - loss: 0.2553 - accuracy: 0.9243 - val_loss: 0.3413 - val_accuracy: 0.8391
Epoch 20/30
11/11 [==============================] - 1s 122ms/step - loss: 0.2723 - accuracy: 0.9092 - val_loss: 0.3258 - val_accuracy: 0.8621
Epoch 21/30
11/11 [==============================] - 1s 121ms/step - loss: 0.2600 - accuracy: 0.9306 - val_loss: 0.3257 - val_accuracy: 0.8506
Epoch 22/30
11/11 [==============================] - 1s 126ms/step - loss: 0.2406 - accuracy: 0.9411 - val_loss: 0.3203 - val_accuracy: 0.8966
Epoch 23/30
11/11 [==============================] - 1s 127ms/step - loss: 0.1892 - accuracy: 0.9577 - val_loss: 0.3191 - val_accuracy: 0.8851
Epoch 24/30
11/11 [==============================] - 1s 127ms/step - loss: 0.1869 - accuracy: 0.9594 - val_loss: 0.3246 - val_accuracy: 0.8621
Epoch 25/30
11/11 [==============================] - 1s 122ms/step - loss: 0.1898 - accuracy: 0.9487 - val_loss: 0.3217 - val_accuracy: 0.8851
Epoch 26/30
11/11 [==============================] - 1s 125ms/step - loss: 0.1731 - accuracy: 0.9523 - val_loss: 0.3280 - val_accuracy: 0.8506
Epoch 27/30
11/11 [==============================] - 1s 128ms/step - loss: 0.1445 - accuracy: 0.9687 - val_loss: 0.3213 - val_accuracy: 0.8851
Epoch 28/30
11/11 [==============================] - 1s 117ms/step - loss: 0.1441 - accuracy: 0.9718 - val_loss: 0.3212 - val_accuracy: 0.8621
Epoch 29/30
11/11 [==============================] - 1s 124ms/step - loss: 0.1250 - accuracy: 0.9762 - val_loss: 0.3232 - val_accuracy: 0.8851
Epoch 30/30
11/11 [==============================] - 1s 123ms/step - loss: 0.1460 - accuracy: 0.9687 - val_loss: 0.3218 - val_accuracy: 0.8736
5/5 - 0s - loss: 0.3297 - accuracy: 0.8889
Test accuracy: 0.8888888955116272
Test loss: 0.32971107959747314
2nd execution val_loss x train_loss graph
Some additional information:
My labels are hot encoded.
Frame step is 0.05s.
Frame size is 0.125s.
When running this configuration with the smaller dataset, I get a slightly different behaviour. The loss value falls more evenly, but kind of slowly. I tried increasing the epochs number, but after the 30th epoch the validation loss started to vary and rise as well.
My questions are:
What could be causing this validation loss problem?
What does it mean when a model has a high loss rate, but its accuracy remains ok?
I read about binary cross entropy but I don't know if I understand what the loss value means in my tests, could someone help me understanding these values?
Could this padding strategy be affecting the network performance?
Is my input data and its dimensions coherent considering LSTM definitions?
Could this be related to my dataset size?
What would be an acceptable validation loss rate?
Your validation loss being much higher than your training loss usually implies overfitting. Note that your val_loss isn't really high, it's just higher than the training loss. The validation accuracy isn't bad either, just much lower than on the training data which has effectively been memorized by your network.
Basically you need to reduce the strength of the model so it can generalize to the complexity of the problem at hand. Use more dropout and fewer parameters/layers.
I am fairly new to deep learning and right now am trying to predict consumer choices based on EEG data. The total dataset consists of 1045 EEG recordings each with a corresponding label, indicating Like or Dislike for a product. Classes are distributed as follows (44% Likes and 56% Dislikes). I read that Convolutional Neural Networks are suitable to work with raw EEG data so I tried to implement a network based on keras with the following structure:
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(full_data, target, test_size=0.20, random_state=42)
y_train = np.asarray(y_train).astype('float32').reshape((-1,1))
y_test = np.asarray(y_test).astype('float32').reshape((-1,1))
# X_train.shape = ((836, 512, 14))
# y_train.shape = ((836, 1))
from keras.optimizers import Adam
from keras.optimizers import SGD
from keras.layers import MaxPooling1D
model = Sequential()
model.add(Conv1D(16, kernel_size=3, activation="relu", input_shape=(512,14)))
model.add(MaxPooling1D())
model.add(Conv1D(8, kernel_size=3, activation="relu"))
model.add(MaxPooling1D())
model.add(Flatten())
model.add(Dense(1, activation="sigmoid"))
model.compile(optimizer=Adam(lr = 0.001), loss='binary_crossentropy', metrics=['accuracy'])
model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=20, batch_size = 64)
When I fit the model however the validation accuracy does not change at all with the following output:
Epoch 1/20
14/14 [==============================] - 0s 32ms/step - loss: 292.6353 - accuracy: 0.5383 - val_loss: 0.7884 - val_accuracy: 0.5407
Epoch 2/20
14/14 [==============================] - 0s 7ms/step - loss: 1.3748 - accuracy: 0.5598 - val_loss: 0.8860 - val_accuracy: 0.5502
Epoch 3/20
14/14 [==============================] - 0s 6ms/step - loss: 1.0537 - accuracy: 0.5598 - val_loss: 0.7629 - val_accuracy: 0.5455
Epoch 4/20
14/14 [==============================] - 0s 6ms/step - loss: 0.8827 - accuracy: 0.5598 - val_loss: 0.7010 - val_accuracy: 0.5455
Epoch 5/20
14/14 [==============================] - 0s 6ms/step - loss: 0.7988 - accuracy: 0.5598 - val_loss: 0.8689 - val_accuracy: 0.5407
Epoch 6/20
14/14 [==============================] - 0s 6ms/step - loss: 1.0221 - accuracy: 0.5610 - val_loss: 0.6961 - val_accuracy: 0.5455
Epoch 7/20
14/14 [==============================] - 0s 6ms/step - loss: 0.7415 - accuracy: 0.5598 - val_loss: 0.6945 - val_accuracy: 0.5455
Epoch 8/20
14/14 [==============================] - 0s 6ms/step - loss: 0.7381 - accuracy: 0.5574 - val_loss: 0.7761 - val_accuracy: 0.5455
Epoch 9/20
14/14 [==============================] - 0s 6ms/step - loss: 0.7326 - accuracy: 0.5598 - val_loss: 0.6926 - val_accuracy: 0.5455
Epoch 10/20
14/14 [==============================] - 0s 6ms/step - loss: 0.7338 - accuracy: 0.5598 - val_loss: 0.6917 - val_accuracy: 0.5455
Epoch 11/20
14/14 [==============================] - 0s 7ms/step - loss: 0.7203 - accuracy: 0.5610 - val_loss: 0.6916 - val_accuracy: 0.5455
Epoch 12/20
14/14 [==============================] - 0s 6ms/step - loss: 0.7192 - accuracy: 0.5610 - val_loss: 0.6914 - val_accuracy: 0.5455
Epoch 13/20
14/14 [==============================] - 0s 6ms/step - loss: 0.7174 - accuracy: 0.5610 - val_loss: 0.6912 - val_accuracy: 0.5455
Epoch 14/20
14/14 [==============================] - 0s 6ms/step - loss: 0.7155 - accuracy: 0.5610 - val_loss: 0.6911 - val_accuracy: 0.5455
Epoch 15/20
14/14 [==============================] - 0s 6ms/step - loss: 0.7143 - accuracy: 0.5610 - val_loss: 0.6910 - val_accuracy: 0.5455
Epoch 16/20
14/14 [==============================] - 0s 6ms/step - loss: 0.7129 - accuracy: 0.5610 - val_loss: 0.6909 - val_accuracy: 0.5455
Epoch 17/20
14/14 [==============================] - 0s 6ms/step - loss: 0.7114 - accuracy: 0.5610 - val_loss: 0.6907 - val_accuracy: 0.5455
Epoch 18/20
14/14 [==============================] - 0s 6ms/step - loss: 0.7103 - accuracy: 0.5610 - val_loss: 0.6906 - val_accuracy: 0.5455
Epoch 19/20
14/14 [==============================] - 0s 6ms/step - loss: 0.7088 - accuracy: 0.5610 - val_loss: 0.6906 - val_accuracy: 0.5455
Epoch 20/20
14/14 [==============================] - 0s 6ms/step - loss: 0.7075 - accuracy: 0.5610 - val_loss: 0.6905 - val_accuracy: 0.5455
Thanks in advance for any insights!
The phenomenon you run into is called underfitting. This happens when the amount our quality of your training data is insufficient, or your network architecture is too small and not capable to learn the problem.
Try normalizing your input data and experiment with different network architectures, learning rates and activation functions.
As #Muhammad Shahzad stated in his comment, adding some Dense Layers after flatting would be a concrete architecture adaption you should try.
You can also increase the epoch and must increase the data set. And you also can use-
train_datagen= ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
vertical_flip = True,
channel_shift_range=0.2,
fill_mode='nearest'
)
for feeding the model more data and I hope you can increase the validation_accuracy.
I am building a training model for my character recognition system. During every epochs, I am getting the same accuracy and it doesn't improve. I have currently 4000 training images and 77 validation images.
My model is as follows:
inputs = Input(shape=(32,32,3))
x = Conv2D(filters = 64, kernel_size = 5, activation = 'relu')(inputs)
x = MaxPooling2D()(x)
x = Conv2D(filters = 32,
kernel_size = 3,
activation = 'relu')(x)
x = MaxPooling2D()(x)
x = Flatten()(x)
x=Dense(256,
activation='relu')(x)
outputs = Dense(1, activation = 'softmax')(x)
model = Model(inputs = inputs, outputs = outputs)
model.compile(
optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
data_gen_train = ImageDataGenerator(rescale=1/255)
data_gen_test=ImageDataGenerator(rescale=1/255)
data_gen_valid = ImageDataGenerator(rescale=1/255)
train_generator = data_gen_train.flow_from_directory(directory=r"./drive/My Drive/train_dataset",
target_size=(32,32), batch_size=10, class_mode="binary")
valid_generator = data_gen_valid.flow_from_directory(directory=r"./drive/My
Drive/validation_dataset", target_size=(32,32), batch_size=2, class_mode="binary")
test_generator = data_gen_test.flow_from_directory(
directory=r"./drive/My Drive/test_dataset",
target_size=(32, 32),
batch_size=6,
class_mode="binary"
)
model.fit(
train_generator,
epochs =10,
steps_per_epoch=400,
validation_steps=37,
validation_data=valid_generator)
The result is as follows:
Found 4000 images belonging to 2 classes.
Found 77 images belonging to 2 classes.
Found 6 images belonging to 2 classes.
Epoch 1/10
400/400 [==============================] - 14s 35ms/step - loss: 0.0000e+00 - accuracy: 0.5000 - val_loss: 0.0000e+00 - val_accuracy: 0.5811
Epoch 2/10
400/400 [==============================] - 13s 33ms/step - loss: 0.0000e+00 - accuracy: 0.5000 - val_loss: 0.0000e+00 - val_accuracy: 0.5811
Epoch 3/10
400/400 [==============================] - 13s 34ms/step - loss: 0.0000e+00 - accuracy: 0.5000 - val_loss: 0.0000e+00 - val_accuracy: 0.5676
Epoch 4/10
400/400 [==============================] - 13s 33ms/step - loss: 0.0000e+00 - accuracy: 0.5000 - val_loss: 0.0000e+00 - val_accuracy: 0.5676
Epoch 5/10
400/400 [==============================] - 18s 46ms/step - loss: 0.0000e+00 - accuracy: 0.5000 - val_loss: 0.0000e+00 - val_accuracy: 0.5541
Epoch 6/10
400/400 [==============================] - 13s 34ms/step - loss: 0.0000e+00 - accuracy: 0.5000 - val_loss: 0.0000e+00 - val_accuracy: 0.5676
Epoch 7/10
400/400 [==============================] - 13s 33ms/step - loss: 0.0000e+00 - accuracy: 0.5000 - val_loss: 0.0000e+00 - val_accuracy: 0.5676
Epoch 8/10
400/400 [==============================] - 13s 33ms/step - loss: 0.0000e+00 - accuracy: 0.5000 - val_loss: 0.0000e+00 - val_accuracy: 0.5946
Epoch 9/10
400/400 [==============================] - 13s 33ms/step - loss: 0.0000e+00 - accuracy: 0.5000 - val_loss: 0.0000e+00 - val_accuracy: 0.5811
Epoch 10/10
400/400 [==============================] - 13s 33ms/step - loss: 0.0000e+00 - accuracy: 0.5000 - val_loss: 0.0000e+00 - val_accuracy: 0.5811
<tensorflow.python.keras.callbacks.History at 0x7fa3a5f4a8d0>
If you are trying to recognize charaters of 2 classes, you should:
use class_mode="binary" in the flow_from_directory function
use binary_crossentropy as loss
your last layer must have 1 neuron with sigmoid activation function
In case there are more than 2 classes:
do not use class_mode="binary" in the flow_from_directory function
use categorical_crossentropy as loss
your last layer must have n neurons with softmax activation, where n stands for the number of classes
This is the model accuracy in train and validation
This is the model loss
Batch generator:
train_batches = ImageDataGenerator(rotation_range=8).flow_from_directory(train_path,target_size=(224,224), classes=['Covid','Normal','Pneumonia'],batch_size=64)
valid_batches = ImageDataGenerator().flow_from_directory(valid_path,target_size=(224,224), classes=['Covid','Normal','Pneumonia'],batch_size=32)
I'm using a pretrained model:
model=keras.applications.resnet.ResNet50(include_top=False, weights='imagenet', input_tensor=None, input_shape=(224, 224, 3), pooling=None, classes=1000)
Performed regularisation:
regularizer = tf.keras.regularizers.l1(0.0001)
for layer in model.layers:
for attr in ['kernel_regularizer']:
if hasattr(layer, attr):
setattr(layer, attr, regularizer)
Used chopped off the last fully connected layers of the pretrained model and added the layers below:
x = AveragePooling2D(pool_size=(4, 4))(last_layer)
x = Flatten(name="flatten")(x)
x = Dense(64, activation="relu",kernel_regularizer=regularizers.l2(0.001))(x)
x = Dropout(0.6)(x)
# x = Dropout(0.6)(x)
out = Dense(3, activation="softmax",name='output_layer')(x)
Froze the upper layers:
for layer in custom_resnet_model.layers[:-7]:
layer.trainable = False
Using Adam optimizer:
custom_resnet_model.compile(Adam(lr=.0001),loss='binary_crossentropy',metrics=['accuracy'])
And the model fit:
history = custom_resnet_model.fit_generator(train_batches, steps_per_epoch=36,
validation_data=valid_batches, validation_steps=18, epochs=25, verbose=2)
As you can see below, towards the end the validation loss is all over the place:
Epoch 1/25
- 67s - loss: 0.7458 - accuracy: 0.7076 - val_loss: 0.7266 - val_accuracy: 0.7584
Epoch 2/25
- 64s - loss: 0.5467 - accuracy: 0.8139 - val_loss: 0.5276 - val_accuracy: 0.8022
Epoch 3/25
- 62s - loss: 0.4723 - accuracy: 0.8543 - val_loss: 0.4393 - val_accuracy: 0.8336
Epoch 4/25
- 62s - loss: 0.4274 - accuracy: 0.8800 - val_loss: 0.6082 - val_accuracy: 0.8384
Epoch 5/25
- 62s - loss: 0.4017 - accuracy: 0.8862 - val_loss: 0.5227 - val_accuracy: 0.8490
Epoch 6/25
- 62s - loss: 0.3698 - accuracy: 0.9004 - val_loss: 0.5691 - val_accuracy: 0.8532
Epoch 7/25
- 63s - loss: 0.3524 - accuracy: 0.9093 - val_loss: 0.4616 - val_accuracy: 0.8425
Epoch 8/25
- 63s - loss: 0.3379 - accuracy: 0.9183 - val_loss: 0.4604 - val_accuracy: 0.8467
Epoch 9/25
- 62s - loss: 0.3206 - accuracy: 0.9248 - val_loss: 0.5499 - val_accuracy: 0.8526
Epoch 10/25
- 61s - loss: 0.3240 - accuracy: 0.9244 - val_loss: 0.4745 - val_accuracy: 0.8526
Epoch 11/25
- 63s - loss: 0.3134 - accuracy: 0.9297 - val_loss: 0.4533 - val_accuracy: 0.8567
Epoch 12/25
- 62s - loss: 0.2995 - accuracy: 0.9337 - val_loss: 0.5668 - val_accuracy: 0.8555
Epoch 13/25
- 63s - loss: 0.2898 - accuracy: 0.9404 - val_loss: 0.6349 - val_accuracy: 0.8603
Epoch 14/25
- 62s - loss: 0.2845 - accuracy: 0.9386 - val_loss: 0.5612 - val_accuracy: 0.8650
Epoch 15/25
- 63s - loss: 0.2961 - accuracy: 0.9330 - val_loss: 0.7284 - val_accuracy: 0.8579
Epoch 16/25
- 64s - loss: 0.2759 - accuracy: 0.9429 - val_loss: 0.4720 - val_accuracy: 0.8650
Epoch 17/25
- 62s - loss: 0.2707 - accuracy: 0.9482 - val_loss: 0.9979 - val_accuracy: 0.8650
Epoch 18/25
- 63s - loss: 0.2744 - accuracy: 0.9416 - val_loss: 0.8098 - val_accuracy: 0.8733
Epoch 19/25
- 63s - loss: 0.2771 - accuracy: 0.9428 - val_loss: 0.1989 - val_accuracy: 0.8662
Epoch 20/25
- 62s - loss: 0.2647 - accuracy: 0.9440 - val_loss: 0.8921 - val_accuracy: 0.8686
Epoch 21/25
- 63s - loss: 0.2566 - accuracy: 0.9478 - val_loss: 0.3362 - val_accuracy: 0.8745
Epoch 22/25
- 62s - loss: 0.2645 - accuracy: 0.9402 - val_loss: 1.2044 - val_accuracy: 0.8662
Epoch 23/25
- 63s - loss: 0.2550 - accuracy: 0.9472 - val_loss: 0.6615 - val_accuracy: 0.8745
Epoch 24/25
- 62s - loss: 0.2486 - accuracy: 0.9519 - val_loss: 0.4722 - val_accuracy: 0.8674
Epoch 25/25
- 62s - loss: 0.2542 - accuracy: 0.9507 - val_loss: 0.8232 - val_accuracy: 0.8721
I have posted the code so that someone can pointout if I'm doing something wrong.
Try increasing the validation dataset.
Reason for the fluctuation may be "Unrepresentative Validation Dataset".
Please let me know if it solves your problem
Unrepresentative Validation Dataset
I am doing a Binary classification of IMDB movie review data into Positive or Negative Sentiment.
I have 25K movie reviews and corresponding label.
Preprocessing:
Removed the stop words and split the data into 70:30 training and test. So 17.5K training and 7k test. 17.5k training has been further divided into 14K train and 3.5 k validation dataset as used in keras.model.fit method
Each processed movie review has been converted to TF-IDF vector using Keras text processing module.
Here is my Fully Connected Architecture I used in Keras Dense class
def model_param(self):
""" Method to do deep learning
"""
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation
from keras.optimizers import SGD
from keras import regularizers
self.model = Sequential()
# Dense(64) is a fully-connected layer with 64 hidden units.
# in the first layer, you must specify the expected input data shape:
# here, 20-dimensional vectors.
self.model.add(Dense(32, activation='relu', input_dim=self.x_train_std.shape[1]))
self.model.add(Dropout(0.5))
#self.model.add(Dense(60, activation='relu'))
#self.model.add(Dropout(0.5))
self.model.add(Dense(1, activation='sigmoid'))
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
self.model.compile(loss='binary_crossentropy',
optimizer=sgd,
metrics=['accuracy'])
def fit(self):
""" Training the deep learning network on the training data
"""
self.model.fit(self.x_train_std, self.y_train,validation_split=0.20,
epochs=50,
batch_size=128)
As you see, I tried first without Dropout and as usual I got training accuracy as 1.0 but validation was poor as overfitting was happening. So I added Dropout to prevent overfitting
However inspite of trying multiple dropout ratio, adding another layer with different no. of units in it, changing learning rate I am still getting overfitting on validation dataset. Gets stuck at 85% while training keeps increasing to 99% and so on. Even changed the Epochs from 10 to 50
What could be going wrong here
Train on 14000 samples, validate on 3500 samples
Epoch 1/50
14000/14000 [==============================] - 0s - loss: 0.5684 - acc: 0.7034 - val_loss: 0.3794 - val_acc: 0.8431
Epoch 2/50
14000/14000 [==============================] - 0s - loss: 0.3630 - acc: 0.8388 - val_loss: 0.3304 - val_acc: 0.8549
Epoch 3/50
14000/14000 [==============================] - 0s - loss: 0.2977 - acc: 0.8749 - val_loss: 0.3271 - val_acc: 0.8591
Epoch 4/50
14000/14000 [==============================] - 0s - loss: 0.2490 - acc: 0.8991 - val_loss: 0.3302 - val_acc: 0.8580
Epoch 5/50
14000/14000 [==============================] - 0s - loss: 0.2251 - acc: 0.9086 - val_loss: 0.3388 - val_acc: 0.8546
Epoch 6/50
14000/14000 [==============================] - 0s - loss: 0.2021 - acc: 0.9189 - val_loss: 0.3532 - val_acc: 0.8523
Epoch 7/50
14000/14000 [==============================] - 0s - loss: 0.1797 - acc: 0.9286 - val_loss: 0.3670 - val_acc: 0.8529
Epoch 8/50
14000/14000 [==============================] - 0s - loss: 0.1611 - acc: 0.9350 - val_loss: 0.3860 - val_acc: 0.8543
Epoch 9/50
14000/14000 [==============================] - 0s - loss: 0.1427 - acc: 0.9437 - val_loss: 0.4077 - val_acc: 0.8529
Epoch 10/50
14000/14000 [==============================] - 0s - loss: 0.1344 - acc: 0.9476 - val_loss: 0.4234 - val_acc: 0.8526
Epoch 11/50
14000/14000 [==============================] - 0s - loss: 0.1222 - acc: 0.9534 - val_loss: 0.4473 - val_acc: 0.8506
Epoch 12/50
14000/14000 [==============================] - 0s - loss: 0.1131 - acc: 0.9546 - val_loss: 0.4718 - val_acc: 0.8497
Epoch 13/50
14000/14000 [==============================] - 0s - loss: 0.1079 - acc: 0.9559 - val_loss: 0.4818 - val_acc: 0.8526
Epoch 14/50
14000/14000 [==============================] - 0s - loss: 0.0954 - acc: 0.9630 - val_loss: 0.5057 - val_acc: 0.8494
Epoch 15/50
14000/14000 [==============================] - 0s - loss: 0.0906 - acc: 0.9636 - val_loss: 0.5229 - val_acc: 0.8557
Epoch 16/50
14000/14000 [==============================] - 0s - loss: 0.0896 - acc: 0.9657 - val_loss: 0.5387 - val_acc: 0.8497
Epoch 17/50
14000/14000 [==============================] - 0s - loss: 0.0816 - acc: 0.9666 - val_loss: 0.5579 - val_acc: 0.8463
Epoch 18/50
14000/14000 [==============================] - 0s - loss: 0.0762 - acc: 0.9709 - val_loss: 0.5704 - val_acc: 0.8491
Epoch 19/50
14000/14000 [==============================] - 0s - loss: 0.0718 - acc: 0.9723 - val_loss: 0.5834 - val_acc: 0.8454
Epoch 20/50
14000/14000 [==============================] - 0s - loss: 0.0633 - acc: 0.9752 - val_loss: 0.6032 - val_acc: 0.8494
Epoch 21/50
14000/14000 [==============================] - 0s - loss: 0.0687 - acc: 0.9724 - val_loss: 0.6181 - val_acc: 0.8480
Epoch 22/50
14000/14000 [==============================] - 0s - loss: 0.0614 - acc: 0.9762 - val_loss: 0.6280 - val_acc: 0.8503
Epoch 23/50
14000/14000 [==============================] - 0s - loss: 0.0620 - acc: 0.9756 - val_loss: 0.6407 - val_acc: 0.8500
Epoch 24/50
14000/14000 [==============================] - 0s - loss: 0.0536 - acc: 0.9794 - val_loss: 0.6563 - val_acc: 0.8511
Epoch 25/50
14000/14000 [==============================] - 0s - loss: 0.0538 - acc: 0.9791 - val_loss: 0.6709 - val_acc: 0.8500
Epoch 26/50
14000/14000 [==============================] - 0s - loss: 0.0507 - acc: 0.9807 - val_loss: 0.6869 - val_acc: 0.8491
Epoch 27/50
14000/14000 [==============================] - 0s - loss: 0.0528 - acc: 0.9794 - val_loss: 0.7002 - val_acc: 0.8483
Epoch 28/50
14000/14000 [==============================] - 0s - loss: 0.0465 - acc: 0.9810 - val_loss: 0.7083 - val_acc: 0.8469
Epoch 29/50
14000/14000 [==============================] - 0s - loss: 0.0504 - acc: 0.9796 - val_loss: 0.7153 - val_acc: 0.8497
Epoch 30/50
14000/14000 [==============================] - 0s - loss: 0.0477 - acc: 0.9819 - val_loss: 0.7232 - val_acc: 0.8480
Epoch 31/50
14000/14000 [==============================] - 0s - loss: 0.0475 - acc: 0.9819 - val_loss: 0.7343 - val_acc: 0.8469
Epoch 32/50
14000/14000 [==============================] - 0s - loss: 0.0459 - acc: 0.9819 - val_loss: 0.7352 - val_acc: 0.8500
Epoch 33/50
14000/14000 [==============================] - 0s - loss: 0.0426 - acc: 0.9807 - val_loss: 0.7429 - val_acc: 0.8511
Epoch 34/50
14000/14000 [==============================] - 0s - loss: 0.0396 - acc: 0.9846 - val_loss: 0.7576 - val_acc: 0.8477
Epoch 35/50
14000/14000 [==============================] - 0s - loss: 0.0420 - acc: 0.9836 - val_loss: 0.7603 - val_acc: 0.8506
Epoch 36/50
14000/14000 [==============================] - 0s - loss: 0.0359 - acc: 0.9856 - val_loss: 0.7683 - val_acc: 0.8497
Epoch 37/50
14000/14000 [==============================] - 0s - loss: 0.0377 - acc: 0.9849 - val_loss: 0.7823 - val_acc: 0.8520
Epoch 38/50
14000/14000 [==============================] - 0s - loss: 0.0352 - acc: 0.9861 - val_loss: 0.7912 - val_acc: 0.8500
Epoch 39/50
14000/14000 [==============================] - 0s - loss: 0.0390 - acc: 0.9845 - val_loss: 0.8025 - val_acc: 0.8489
Epoch 40/50
14000/14000 [==============================] - 0s - loss: 0.0371 - acc: 0.9853 - val_loss: 0.8128 - val_acc: 0.8494
Epoch 41/50
14000/14000 [==============================] - 0s - loss: 0.0367 - acc: 0.9848 - val_loss: 0.8184 - val_acc: 0.8503
Epoch 42/50
14000/14000 [==============================] - 0s - loss: 0.0331 - acc: 0.9871 - val_loss: 0.8264 - val_acc: 0.8500
Epoch 43/50
14000/14000 [==============================] - 0s - loss: 0.0338 - acc: 0.9871 - val_loss: 0.8332 - val_acc: 0.8483
Epoch 44/50