Training loss higher than validation loss - python

I am trying to train a regression model of a dummy function with 3 variables with fully connected neural nets in Keras and I always get a training loss much higher than the validation loss.
I split the data set in 2/3 for training and 1/3 for validation. I have tried lots of different things:
changing the architecture
adding more neurons
using regularization
using different batch sizes
Still the training error is one order of magnitude higer than the validation error:
Epoch 5995/6000
4020/4020 [==============================] - 0s 78us/step - loss: 1.2446e-04 - mean_squared_error: 1.2446e-04 - val_loss: 1.3953e-05 - val_mean_squared_error: 1.3953e-05
Epoch 5996/6000
4020/4020 [==============================] - 0s 98us/step - loss: 1.2549e-04 - mean_squared_error: 1.2549e-04 - val_loss: 1.5730e-05 - val_mean_squared_error: 1.5730e-05
Epoch 5997/6000
4020/4020 [==============================] - 0s 105us/step - loss: 1.2500e-04 - mean_squared_error: 1.2500e-04 - val_loss: 1.4372e-05 - val_mean_squared_error: 1.4372e-05
Epoch 5998/6000
4020/4020 [==============================] - 0s 96us/step - loss: 1.2500e-04 - mean_squared_error: 1.2500e-04 - val_loss: 1.4151e-05 - val_mean_squared_error: 1.4151e-05
Epoch 5999/6000
4020/4020 [==============================] - 0s 80us/step - loss: 1.2487e-04 - mean_squared_error: 1.2487e-04 - val_loss: 1.4342e-05 - val_mean_squared_error: 1.4342e-05
Epoch 6000/6000
4020/4020 [==============================] - 0s 79us/step - loss: 1.2494e-04 - mean_squared_error: 1.2494e-04 - val_loss: 1.4769e-05 - val_mean_squared_error: 1.4769e-05
This makes no sense, please help!
Edit: this is the full code
I have 6000 training examples
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 26 13:40:03 2018
#author: Michele
"""
#from keras.datasets import reuters
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Dropout, LSTM
from keras import optimizers
import matplotlib.pyplot as plt
import os
import pylab
from keras.constraints import maxnorm
from sklearn.model_selection import train_test_split
from keras import regularizers
from sklearn.preprocessing import MinMaxScaler
import math
from sklearn.metrics import mean_squared_error
import keras
# fix random seed for reproducibility
seed=7
np.random.seed(seed)
dataset = np.loadtxt("BabbaX.csv", delimiter=",")
#split into input (X) and output (Y) variables
#x = dataset.transpose()[:,10:15] #only use power
x = dataset
del(dataset) # delete container
dataset = np.loadtxt("BabbaY.csv", delimiter=",")
#split into input (X) and output (Y) variables
y = dataset.transpose()
del(dataset) # delete container
#scale labels from 0 to 1
scaler = MinMaxScaler(feature_range=(0, 1))
y = np.reshape(y, (y.shape[0],1))
y = scaler.fit_transform(y)
lenData=x.shape[0]
x=np.transpose(x)
xtrain=x[:,0:round(lenData*0.67)]
ytrain=y[0:round(lenData*0.67),]
xtest=x[:,round(lenData*0.67):round(lenData*1.0)]
ytest=y[round(lenData*0.67):round(lenData*1.0)]
xtrain=np.transpose(xtrain)
xtest=np.transpose(xtest)
l2_lambda = 0.1 #reg factor
#sequential type of model
model = Sequential()
#stacking layers with .add
units=300
#model.add(Dense(units, input_dim=xtest.shape[1], activation='relu', kernel_initializer='normal', kernel_regularizer=regularizers.l2(l2_lambda), kernel_constraint=maxnorm(3)))
model.add(Dense(units, activation='relu', input_dim=xtest.shape[1]))
#model.add(Dropout(0.1))
model.add(Dense(units, activation='relu'))
#model.add(Dropout(0.1))
model.add(Dense(1)) #no activation function should be used for the output layer
rms = optimizers.RMSprop(lr=0.00001, rho=0.9, epsilon=None, decay=0) #It is recommended to leave the parameters
adam = keras.optimizers.Adam(lr=0.00001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=1e-6, amsgrad=False)
#of this optimizer at their default values (except the learning rate, which can be freely tuned).
#adam=keras.optimizers.Adam(lr=0.01, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
#configure learning process with .compile
model.compile(optimizer=adam, loss='mean_squared_error', metrics=['mse'])
# fit the model (iterate on the training data in batches)
history = model.fit(xtrain, ytrain, nb_epoch=1000, batch_size=round(xtest.shape[0]/100),
validation_data=(xtest, ytest), shuffle=True, verbose=2)
#extract weights for each layer
weights = [layer.get_weights() for layer in model.layers]
#evaluate on training data set
valuesTrain=model.predict(xtrain)
#evaluate on test data set
valuesTest=model.predict(xtest)
#invert predictions
valuesTrain = scaler.inverse_transform(valuesTrain)
ytrain = scaler.inverse_transform(ytrain)
valuesTest = scaler.inverse_transform(valuesTest)
ytest = scaler.inverse_transform(ytest)

TL;DR:
When a model is learning well and quickly the validation loss can be lower than the training loss, since the validation happens on the updated model, while the training loss did not have any (no batches) or only some (with batches) of the updates applied.
Okay I think I found out what's happening here. I used the following code to test this.
import numpy as np
import keras
from keras.models import Sequential
from keras.layers import Dense
import matplotlib.pyplot as plt
np.random.seed(7)
N_DATA = 6000
x = np.random.uniform(-10, 10, (3, N_DATA))
y = x[0] + x[1]**2 + x[2]**3
xtrain = x[:, 0:round(N_DATA*0.67)]
ytrain = y[0:round(N_DATA*0.67)]
xtest = x[:, round(N_DATA*0.67):N_DATA]
ytest = y[round(N_DATA*0.67):N_DATA]
xtrain = np.transpose(xtrain)
xtest = np.transpose(xtest)
model = Sequential()
model.add(Dense(10, activation='relu', input_dim=3))
model.add(Dense(5, activation='relu'))
model.add(Dense(1))
adam = keras.optimizers.Adam()
# configure learning process with .compile
model.compile(optimizer=adam, loss='mean_squared_error', metrics=['mse'])
# fit the model (iterate on the training data in batches)
history = model.fit(xtrain, ytrain, nb_epoch=50,
batch_size=round(N_DATA/100),
validation_data=(xtest, ytest), shuffle=False, verbose=2)
plt.plot(history.history['mean_squared_error'])
plt.plot(history.history['val_loss'])
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
This is essentially the same as your code and replicates the problem, which is not actually a problem. Simply change
history = model.fit(xtrain, ytrain, nb_epoch=50,
batch_size=round(N_DATA/100),
validation_data=(xtest, ytest), shuffle=False, verbose=2)
to
history = model.fit(xtrain, ytrain, nb_epoch=50,
batch_size=round(N_DATA/100),
validation_data=(xtrain, ytrain), shuffle=False, verbose=2)
So instead of validating with your validation data you validate using the training data again, which leads to exactly the same behavior. Weird isn't it? No actually not. What I think is happening is:
The initial mean_squared_error given by Keras on every epoch is the loss before the gradients have been applied, while the validation happens after the gradients have been applied, which makes sense.
With highly stochastic problems for which NNs are usually used you do not see that, because the data varies so much that the updated weights simply are not good enough to describe the validation data, the slight overfitting effect on the training data is still so much stronger that even after updating the weights the validation loss is still higher than the training loss from before. That is only how I think it is though, I might be completely wrong.

One of the reasons that I think is maybe you can increase the size of training data and lower the size of validation data. Then your model will be trained on more samples which may include some complex samples as well and then can be validated on the remaining samples. Try something like train-80% and Validation-20% or any other numbers a little higher than what you used previously.
If you don't want to change the size of training and validation sets, then you can try changing the random seed value to some other number so that you will get a training set with different samples which might be helpful in training the model well.
Check this answer here to get more understanding of the other possible reasons.
Check this link if you want a more detailed explanation with an example. #Michele

If training loss is a little higher or nearer to validation loss, it mean that model is not overfitting.
Efforts are always there to use best out of features to have less overfitting and better validation and test accuracies.
Probable reason that you are always getting train loss higher can be the features and data you are using to train.
Please refer following link and observe the training and validation loss in case of dropout:
http://danielnouri.org/notes/2014/12/17/using-convolutional-neural-nets-to-detect-facial-keypoints-tutorial/

Related

Accuracy of same validation dataset differs between last epoch and after fit

The following code gives a log ending with
Epoch 19/20
1/1 [==============================] - 0s 473ms/step - loss: 1.4018 - accuracy: 0.8750 - val_loss: 1.8656 - val_accuracy: 0.8900
Epoch 20/20
1/1 [==============================] - 0s 444ms/step - loss: 0.5904 - accuracy: 0.8750 - val_loss: 2.1255 - val_accuracy: 0.8700
get_dataset: validation
Found 1000 files belonging to 2 classes.
Using 100 files for validation.
4/4 [==============================] - 1s 81ms/step
eval acc: 0.81
My question is:
Why is the val_accuracy after the last epoch (0.87) different from the eval acc (0.81) after the fit?
In my code, I try to use the same dataset for the validation of each epoch during fit and the additional validation afterwards.
[Update 1, 2022-07-19:
Obviously, the two accuracy calculations don't really use the same data. How can I debug which data is actually used?
[Update 3, 2022-07-20: I have followed the data into TensorFlow. The last thing I see is that in Model.evaluate (during fit) and Model.predict the x.filenames are equal. I did not manage to debug much further, because soon in quick_execute the __inference_test_function_248219 resp. the __inference_predict_function_231438 are evaluated outside Python, and the arguments are tensors with dtype=resource, whose contents I cannot see.]
I have deliberately removed my class balancing code to keep my example small. I know that this makes the accuracies less useful, but I don't care about that for now.
Note that get_dataset('validation') is only called once at the beginning of the fit, not at each epoch.
I have now also set max_queue_size=0, use_multiprocessing=False, workers=0 (as seen here, found via this related SO question about TensorFlow 1), but this did not make the accuracies equal.
]
Code:
import tensorflow as tf
from sklearn.metrics import accuracy_score
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Dense, Flatten
from tensorflow.keras.preprocessing import image_dataset_from_directory
inputs = tf.keras.Input(shape=(224, 224, 3))
base_model = tf.keras.applications.ResNet50(weights='imagenet', include_top=False)
base_output = base_model(inputs)
base_model.trainable = False
out = Flatten(name='flat')(base_output)
out = Dense(1, activation='sigmoid')(out)
model = Model(inputs=inputs, outputs=out)
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
def get_dataset(subset):
print('get_dataset:', subset)
return image_dataset_from_directory(
'data-nodup-1000',
labels="inferred",
label_mode='binary',
color_mode="rgb",
image_size=(224, 224),
shuffle=True,
seed=1,
validation_split=0.1,
subset=subset,
crop_to_aspect_ratio=False,
)
model.fit(
get_dataset('training'),
steps_per_epoch=1,
epochs=20,
validation_data=get_dataset('validation'),
max_queue_size=0,
use_multiprocessing=False,
workers=0,
)
val_dataset = get_dataset('validation')
true_class = tf.concat([y for x, y in val_dataset], axis=0)
pred = model.predict(val_dataset)
pred_class = pred >= .5
print('eval acc:', accuracy_score(true_class, pred_class))
[Update 2, 2022-07-19:
I can also reproduce the behavior with the deprecated ImageDataGenerator, using
from tensorflow.keras.applications.resnet50 import preprocess_input
from keras_preprocessing.image import ImageDataGenerator
datagen = ImageDataGenerator(
preprocessing_function=preprocess_input,
validation_split=0.1,
)
def get_dataset(subset):
print('get_dataset:', subset)
return datagen.flow_from_directory(
'data-nodup-1000',
class_mode='binary',
target_size=(224, 224),
shuffle=True,
seed=1,
subset=subset,
)
and
true_class = val_dataset.labels
]
[Update 4, 2022-07-21: Note that deactivating shuffling of validation data by setting shuffle=(subset == 'training') makes the two validation accuracies equal. This is not a workaround, however, because the validation set then consists only of class 1, since flow_from_directory doesn't do stratification.
]
My environment:
I am using all up-to-date libraries, like tensorflow 2.9.1 and sklearn 1.1.1 (via pip-compile -U).
The folder data-nodup-1000 contains one subfolder with 113 files of class 0, and one subfolder with 887 files of class 1.
I have now found out that in TensorFlow 2.9.1 model.predict uses the second iteration of the dataset, which is shuffled differently than the first iteration!
It even uses the second iteration when I directly call model.predict(get_dataset('validation'))!
Therefore, the entries of true_class and pred do not match.
Switching to TensorFlow 2.10.0-rc3 and its tf.keras.utils.split_dataset makes the accuracies equal.
Here's the updated code:
import tensorflow as tf
from sklearn.metrics import accuracy_score
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Dense, Flatten
from tensorflow.keras.preprocessing import image_dataset_from_directory
inputs = tf.keras.Input(shape=(224, 224, 3))
base_model = tf.keras.applications.ResNet50(weights='imagenet', include_top=False)
base_output = base_model(inputs)
base_model.trainable = False
out = Flatten(name='flat')(base_output)
out = Dense(1, activation='sigmoid')(out)
model = Model(inputs=inputs, outputs=out)
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
dataset = image_dataset_from_directory(
'data-synthetic',
labels="inferred",
label_mode='binary',
color_mode="rgb",
image_size=(224, 224),
shuffle=True,
seed=1,
crop_to_aspect_ratio=False,
)
train_dataset, val_dataset = tf.keras.utils.split_dataset(dataset, right_size=0.1)
model.fit(
train_dataset,
steps_per_epoch=1,
epochs=20,
validation_data=val_dataset,
max_queue_size=0,
use_multiprocessing=False,
workers=0,
)
true_class = tf.concat([y for x, y in val_dataset], axis=0)
pred = model.predict(val_dataset)
pred_class = pred >= .5
print('eval acc:', accuracy_score(true_class, pred_class))
which correctly yields:
Epoch 19/20
1/1 [==============================] - 0s 438ms/step - loss: 0.4426 - accuracy: 0.9062 - val_loss: 0.4658 - val_accuracy: 0.8800
Epoch 20/20
1/1 [==============================] - 0s 444ms/step - loss: 2.1619 - accuracy: 0.8438 - val_loss: 0.5886 - val_accuracy: 0.8900
4/4 [==============================] - 1s 87ms/step
eval acc: 0.89
there are a few points about your data which causes this:
First, your data is highly imbalanced (8 to 1 label ratio) which makes the model rather overfit and the CV estimate inaccurate.
Second, in the get_dataset function, the shuffle is set to True so every time you call the get_dataset(), it shuffles your data, and because (1) Your validation set is very small and (2) your train/val split is not stratified over your labels, the validation metrics would vary a lot due to this shuffling.
Suggestions to solve this:
call the get_dataset() only once for train and val dataset before fitting the model and save them as variables. and if there is no sequential order in your data, maybe set shuffle=False.
(optional) If possible make your dataset more balanced by techniques such as data augmentation, over-/under-sampling, etc.
def get_dataset(subset):
return image_dataset_from_directory(
'data-nodup-1000',
labels="inferred",
label_mode='binary',
color_mode="rgb",
image_size=(224, 224),
shuffle=False,
seed=0,
validation_split=0.1,
subset=subset,
crop_to_aspect_ratio=False,
)
train_dataset = get_dataset('training')
val_dataset = get_dataset('validation')
model.fit(
train_dataset,
steps_per_epoch=1,
epochs=20,
validation_data=val_dataset,
)
true_class = tf.concat([y for x, y in val_dataset], axis=0)
pred = model.predict(val_dataset)
pred_class = pred >= .5
print('eval acc:', accuracy_score(true_class, pred_class))

TensorFlow 2.0 tf.random.set_seed not working since I am getting different results

I am using tf.random.set_seed to assure the reproducibility of my experiments but getting different results in terms of loss after training my model multiple times. I am monitoring the learning curve of each experiment using Tensorboard, but I am getting different values of loss and accuracy.
Providing the solution here (Answer Section), even though it is present in the Comment Section, for the benefit of the community.
To reproduce same results, you can create function as below and pass seeds directly to the layers as mentioned Daniel in the comments.
def reset_random_seeds():
os.environ['PYTHONHASHSEED']=str(2)
tf.random.set_seed(2)
np.random.seed(2)
random.seed(2)
Please refer complete code in below, which reproduced same results
import os
####*IMPORANT*: Have to do this line *before* importing tensorflow
os.environ['PYTHONHASHSEED']=str(2)
import tensorflow as tf
import tensorflow.keras as keras
import tensorflow.keras.layers
import random
import pandas as pd
import numpy as np
def reset_random_seeds():
os.environ['PYTHONHASHSEED']=str(2)
tf.random.set_seed(2)
np.random.seed(2)
random.seed(2)
#make some random data
reset_random_seeds()
NUM_ROWS = 1000
NUM_FEATURES = 10
random_data = np.random.normal(size=(NUM_ROWS, NUM_FEATURES))
df = pd.DataFrame(data=random_data, columns=['x_' + str(ii) for ii in range(NUM_FEATURES)])
y = df.sum(axis=1) + np.random.normal(size=(NUM_ROWS))
def run(x, y):
reset_random_seeds()
model = keras.Sequential([
keras.layers.Dense(40, input_dim=df.shape[1], activation='relu'),
keras.layers.Dense(20, activation='relu'),
keras.layers.Dense(10, activation='relu'),
keras.layers.Dense(1, activation='linear')
])
NUM_EPOCHS = 100
model.compile(optimizer='adam', loss='mean_squared_error')
model.fit(x, y, epochs=NUM_EPOCHS, verbose=0)
predictions = model.predict(x).flatten()
loss = model.evaluate(x, y) #This prints out the loss by side-effect
#With Tensorflow 2.0 this is now reproducible!
run(df, y)
run(df, y)
run(df, y)
Output:
32/32 [==============================] - 0s 2ms/step - loss: 0.5633
32/32 [==============================] - 0s 2ms/step - loss: 0.5633
32/32 [==============================] - 0s 2ms/step - loss: 0.5633
for tensorflow 2, use tf.random.set_seed(seed) instead

Keras Binary Classifier Tutorial Example gives only 50% validation accuracy

Keras Binary Classifier Tutorial Example gives only 50% validation accuracy.
The near 50% accuracy can be gotten from an un-trained classifier itself for binary classification.
This example is straight from https://keras.io/getting-started/sequential-model-guide/
import numpy as np
import tensorflow as tf
from tensorflow_core.python.keras.models import Sequential
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout
np.random.seed(10)
# Generate dummy data
x_train = np.random.random((1000, 20))
y_train = np.random.randint(2, size=(1000, 1))
x_test = np.random.random((800, 20))
y_test = np.random.randint(2, size=(800, 1))
model = Sequential()
model.add(Dense(64, input_dim=20, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
model.fit(x_train, y_train,
epochs=50,
batch_size=128,
validation_data=(x_test, y_test))
score = model.evaluate(x_test, y_test, batch_size=128)
Accuracy output.
I tried with multiple trials.
Increased the number of hidden layers
Epoch 50/50 1000/1000 [==============================] - 0s
211us/sample - loss: 0.6905 - accuracy: 0.5410 - val_loss: 0.6959 -
val_accuracy: 0.4812
Could someone help me understand if anything is wrong here?
How to increase the accuracy for this "example" problem presented in the tutorial?
If you train a classifier with random examples, you will always get aprrox. 50% accuracy at validation data here represented by x_test. It is because your training samples get trained with random classes. Also the validation or test set has been assigned to random classes. This is why the random accuracy i.e. 50-50% occurs.
The more epoch you test the training set the more accuracy you will get on training set as an effect of overfitting.

tf.keras predictions are bad while evaluation is good

I'm programming a model in tf.keras, and running model.evaluate() on the training set usually yields ~96% accuracy. My evaluation on the test set is usually close, about 93%. However, when I predict manually, the model is usually inaccurate. This is my code:
import tensorflow as tf
from tensorflow.keras import layers
import numpy as np
import pandas as pd
!git clone https://github.com/DanorRon/data
%cd data
!ls
batch_size = 100
epochs = 15
alpha = 0.001
lambda_ = 0.001
h1 = 50
train = pd.read_csv('/content/data/mnist_train.csv.zip')
test = pd.read_csv('/content/data/mnist_test.csv.zip')
train = train.loc['1':'5000', :]
test = test.loc['1':'2000', :]
train = train.sample(frac=1).reset_index(drop=True)
test = test.sample(frac=1).reset_index(drop=True)
x_train = train.loc[:, '1x1':'28x28']
y_train = train.loc[:, 'label']
x_test = test.loc[:, '1x1':'28x28']
y_test = test.loc[:, 'label']
x_train = x_train.values
y_train = y_train.values
x_test = x_test.values
y_test = y_test.values
nb_classes = 10
targets = y_train.reshape(-1)
y_train_onehot = np.eye(nb_classes)[targets]
nb_classes = 10
targets = y_test.reshape(-1)
y_test_onehot = np.eye(nb_classes)[targets]
model = tf.keras.Sequential()
model.add(layers.Dense(784, input_shape=(784,), kernel_initializer='random_uniform', bias_initializer='zeros'))
model.add(layers.Dense(h1, activation='relu', kernel_regularizer=tf.keras.regularizers.l2(lambda_), kernel_initializer='random_uniform', bias_initializer='zeros'))
model.add(layers.Dense(10, activation='softmax', kernel_regularizer=tf.keras.regularizers.l2(lambda_), kernel_initializer='random_uniform', bias_initializer='zeros'))
model.compile(optimizer='SGD',
loss = 'mse',
metrics = ['categorical_accuracy'])
model.fit(x_train, y_train_onehot, epochs=epochs, batch_size=batch_size)
model.evaluate(x_test, y_test_onehot, batch_size=batch_size)
prediction = model.predict_classes(x_test)
print(prediction)
print(y_test[1:])
I've heard that a lot of the time when people have this problem, it's just a problem with data input. But I can't see any problem with that here since it almost always predicts wrongly (about as much as you would expect if it was random). How do I fix this problem?
Edit: Here are the specific results:
Last training step:
Epoch 15/15
49999/49999 [==============================] - 3s 70us/sample - loss: 0.0309 - categorical_accuracy: 0.9615
Evaluation output:
2000/2000 [==============================] - 0s 54us/sample - loss: 0.0352 - categorical_accuracy: 0.9310
[0.03524150168523192, 0.931]
Output from model.predict_classes:
[9 9 0 ... 5 0 5]
Output from print(y_test):
[9 0 0 7 6 8 5 1 3 2 4 1 4 5 8 4 9 2 4]
First thing is, your loss function is wrong: you are in a multi-class classification setting, and you are using a loss function suitable for regression and not classification (MSE).
Change our model compilation to:
model.compile(loss='categorical_crossentropy',
optimizer='SGD',
metrics=['accuracy'])
See the Keras MNIST MLP example for corroboration, and own answer in What function defines accuracy in Keras when the loss is mean squared error (MSE)? for more details (although here you actually have the inverse problem, i.e. regression loss in a classification setting).
Moreover, it is not clear if the MNIST variant you are using is already normalized; if not, you should normalize them yourself:
x_train = x_train.values/255
x_test = x_test.values/255
It is also not clear why you ask for a 784-unit layer, since this is actually the second layer of your NN (the first is implicitly set by the input_shape argument - see Keras Sequential model input layer), and it certainly does not need to contain one unit for each one of your 784 input features.
UPDATE (after comments):
But why is MSE meaningless for classification?
This is a theoretical issue, not exactly appropriate for SO; roughly speaking, it is for the same reason we don't use linear regression for classification - we use logistic regression, the actual difference between the two approaches being exactly the loss function. Andrew Ng, in his popular Machine Learning course at Coursera, explains this nicely - see his Lecture 6.1 - Logistic Regression | Classification at Youtube (explanation starts at ~ 3:00), as well as section 4.2 Why Not Linear Regression [for classification]? of the (highly recommended and freely available) textbook An Introduction to Statistical Learning by Hastie, Tibshirani and coworkers.
And MSE does give a high accuracy, so why doesn't that matter?
Nowadays, almost anything you throw at MNIST will "work", which of course neither makes it correct nor a good approach for more demanding datasets...
UPDATE 2:
whenever I run with crossentropy, the accuracy just flutters around at ~10%
Sorry, cannot reproduce the behavior... Taking the Keras MNIST MLP example with a simplified version of your model, i.e.:
model = Sequential()
model.add(Dense(784, activation='linear', input_shape=(784,)))
model.add(Dense(50, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=SGD(),
metrics=['accuracy'])
we easily end up with a ~ 92% validation accuracy after only 5 epochs:
history = model.fit(x_train, y_train,
batch_size=128,
epochs=5,
verbose=1,
validation_data=(x_test, y_test))
Train on 60000 samples, validate on 10000 samples
Epoch 1/10
60000/60000 [==============================] - 4s - loss: 0.8974 - acc: 0.7801 - val_loss: 0.4650 - val_acc: 0.8823
Epoch 2/10
60000/60000 [==============================] - 4s - loss: 0.4236 - acc: 0.8868 - val_loss: 0.3582 - val_acc: 0.9034
Epoch 3/10
60000/60000 [==============================] - 4s - loss: 0.3572 - acc: 0.9009 - val_loss: 0.3228 - val_acc: 0.9099
Epoch 4/10
60000/60000 [==============================] - 4s - loss: 0.3263 - acc: 0.9082 - val_loss: 0.3024 - val_acc: 0.9156
Epoch 5/10
60000/60000 [==============================] - 4s - loss: 0.3061 - acc: 0.9132 - val_loss: 0.2845 - val_acc: 0.9196
Notice the activation='linear' of the first Dense layer, which is the equivalent of not specifying anything, like in your case (as I said, practically everything you throw to MNIST will "work")...
Final advice: Try modifying your model as:
model = tf.keras.Sequential()
model.add(layers.Dense(784, activation = 'relu',input_shape=(784,)))
model.add(layers.Dense(h1, activation='relu'))
model.add(layers.Dense(10, activation='softmax'))
in order to use the better (and default) 'glorot_uniform' initializer, and remove the kernel_regularizer args (they may be the cause of any issue - always start simple!)...

XOR not learned using keras v2.0

I have for some time gotten pretty bad results using the tool keras, and haven't been suspisous about the tool that much.. But I am beginning to be a bit concerned now.
I tried to see whether it could handle a simple XOR problem, and after 30000 epochs it still haven't solved it...
code:
from keras.models import Sequential
from keras.layers.core import Dense, Activation
from keras.optimizers import SGD
import numpy as np
np.random.seed(100)
model = Sequential()
model.add(Dense(2, input_dim=2))
model.add(Activation('tanh'))
model.add(Dense(1, input_dim=2))
model.add(Activation('sigmoid'))
X = np.array([[0,0],[0,1],[1,0],[1,1]], "float32")
y = np.array([[0],[1],[1],[0]], "float32")
model.compile(loss='binary_crossentropy', optimizer='adam')
model.fit(X, y, nb_epoch=30000, batch_size=1,verbose=1)
print(model.predict_classes(X))
Here is part of my result:
4/4 [==============================] - 0s - loss: 0.3481
Epoch 29998/30000
4/4 [==============================] - 0s - loss: 0.3481
Epoch 29999/30000
4/4 [==============================] - 0s - loss: 0.3481
Epoch 30000/30000
4/4 [==============================] - 0s - loss: 0.3481
4/4 [==============================] - 0s
[[0]
[1]
[0]
[0]]
Is there something wrong with the tool - or am I doing something wrong??
Version I am using:
MacBook-Pro:~ usr$ python -c "import keras; print keras.__version__"
Using TensorFlow backend.
2.0.3
MacBook-Pro:~ usr$ python -c "import tensorflow as tf; print tf.__version__"
1.0.1
MacBook-Pro:~ usr$ python -c "import numpy as np; print np.__version__"
1.12.0
Updated version:
from keras.models import Sequential
from keras.layers.core import Dense, Activation
from keras.optimizers import Adam, SGD
import numpy as np
#np.random.seed(100)
model = Sequential()
model.add(Dense(units = 2, input_dim=2, activation = 'relu'))
model.add(Dense(units = 1, activation = 'sigmoid'))
X = np.array([[0,0],[0,1],[1,0],[1,1]], "float32")
y = np.array([[0],[1],[1],[0]], "float32")
model.compile(loss='binary_crossentropy', optimizer='adam')
print model.summary()
model.fit(X, y, nb_epoch=5000, batch_size=4,verbose=1)
print(model.predict_classes(X))
I cannot add a comment to Daniel's response as I don't have enough reputation, but I believe he's on the right track. While I have not personally tried running the XOR with Keras, here's an article that might be interesting - it analyzes the various regions of local minima for a 2-2-1 network, showing that higher numerical precision would lead to fewer instances of getting stuck on a gradient descent algorithm.
The Local Minima of the Error Surface of the 2-2-1 XOR Network (Ida G. Sprinkhuizen-Kuyper and Egbert J.W. Boers)
On a side note I won't consider using a 2-4-1 network as over-fitting the problem. Having 4 linear cuts on the 0-1 plane (cutting into a 2x2 grid) instead of 2 cuts (cutting the corners off diagonally) just separates the data in a different way, but since we only have 4 data points and no noise in the data, the neural network that uses 4 linear cuts isn't describing "noise" instead of the XOR relationship.
I think it's a "local minimum" in the loss function.
Why?
I have run this same code over and over for a few times, and sometimes it goes right, sometimes it gets stuck into a wrong result. Notice that this code "recreates" the model every time I run it. (If I insist on training a model that found the wrong results, it will simply be kept there forever).
from keras.models import Sequential
from keras.layers import *
import numpy as np
m = Sequential()
m.add(Dense(2,input_dim=2, activation='tanh'))
#m.add(Activation('tanh'))
m.add(Dense(1,activation='sigmoid'))
#m.add(Activation('sigmoid'))
X = np.array([[0,0],[0,1],[1,0],[1,1]],'float32')
Y = np.array([[0],[1],[1],[0]],'float32')
m.compile(optimizer='adam',loss='binary_crossentropy')
m.fit(X,Y,batch_size=1,epochs=20000,verbose=0)
print(m.predict(X))
Running this code, I have found some different outputs:
Wrong: [[ 0.00392423], [ 0.99576807], [ 0.50008368], [ 0.50008368]]
Right: [[ 0.08072935], [ 0.95266515], [ 0.95266813], [ 0.09427474]]
What conclusion can we take from it?
The optimizer is not dealing properly with this local minimum. If it gets lucky (a proper weight initialization), it will fall in a good minimum, and bring the right results.
If it gets unlucky (a bad weight initialization), it will fall in a local minimum, without really knowing that there are better places in the loss function, and its learn_rate is simply not big enough to escape this minimum. The small gradient keeps turning around the same point.
If you take the time to study which gradients appear in the wrong case, you will probably see it keeps pointing towards that same point, and increasing the learning rate a little may make it escape the hole.
Intuition makes me think that such very small models have more prominent local minimums.
Instead of just increasing the number of epochs, try using relu for the activation of your hidden layer instead of tanh. Making only that change to the code you provide, I am able to obtain the following result after only 2000 epochs (Theano backend):
import numpy as np
print(np.__version__) #1.11.3
import keras
print(theano.__version__) # 0.9.0
import theano
print(keras.__version__) # 2.0.2
from keras.models import Sequential
from keras.layers.core import Dense, Activation
from keras.optimizers import Adam, SGD
np.random.seed(100)
model = Sequential()
model.add(Dense(units = 2, input_dim=2, activation = 'relu'))
model.add(Dense(units = 1, activation = 'sigmoid'))
X = np.array([[0,0],[0,1],[1,0],[1,1]], "float32")
y = np.array([[0],[1],[1],[0]], "float32")
model.compile(loss='binary_crossentropy', optimizer='adam'
model.fit(X, y, epochs=2000, batch_size=1,verbose=0)
print(model.evaluate(X,y))
print(model.predict_classes(X))
4/4 [==============================] - 0s
0.118175707757
4/4 [==============================] - 0s
[[0]
[1]
[1]
[0]]
It would be easy to conclude that this is due to vanishing gradient problem. However, the simplicity of this network suggest that this isn't the case. Indeed, if I change the optimizer from 'adam' to SGD(lr=0.01, momentum=0.0, decay=0.0, nesterov=False) (the default values), I can see the following result after 5000 epochs with tanh activation in the hidden layer.
from keras.models import Sequential
from keras.layers.core import Dense, Activation
from keras.optimizers import Adam, SGD
np.random.seed(100)
model = Sequential()
model.add(Dense(units = 2, input_dim=2, activation = 'tanh'))
model.add(Dense(units = 1, activation = 'sigmoid'))
X = np.array([[0,0],[0,1],[1,0],[1,1]], "float32")
y = np.array([[0],[1],[1],[0]], "float32")
model.compile(loss='binary_crossentropy', optimizer=SGD())
model.fit(X, y, epochs=5000, batch_size=1,verbose=0)
print(model.evaluate(X,y))
print(model.predict_classes(X))
4/4 [==============================] - 0s
0.0314897596836
4/4 [==============================] - 0s
[[0]
[1]
[1]
[0]]
Edit: 5/17/17 - Included complete code to enable reproduction
The minimal neuron network architecture required to learn XOR which should be a (2,2,1) network. In fact, if maths shows that the (2,2,1) network can solve the XOR problem, but maths doesn't show that the (2,2,1) network is easy to train. It could sometimes takes a lot of epochs (iterations) or does not converge to the global minimum. That said, I've got easily good results with (2,3,1) or (2,4,1) network architectures.
The problem seems to be related to the existence of many local minima. Look at this 1998 paper, «Learning XOR: exploring the space of a classic problem» by Richard Bland. Furthermore weights initialization with random number between 0.5 and 1.0 helps to converge.
It works fine with Keras or TensorFlow using loss function 'mean_squared_error', sigmoid activation and Adam optimizer. Even with pretty good hyperparameters, I observed that the learned XOR model is trapped in a local minimum about 15% of the time.
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from tensorflow.keras import initializers
import numpy as np
X = np.array([[0,0],[0,1],[1,0],[1,1]])
y = np.array([[0],[1],[1],[0]])
def initialize_weights(shape, dtype=None):
return np.random.normal(loc = 0.75, scale = 1e-2, size = shape)
model = Sequential()
model.add(Dense(2,
activation='sigmoid',
kernel_initializer=initialize_weights,
input_dim=2))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='mean_squared_error',
optimizer='adam',
metrics=['accuracy'])
print("*** Training... ***")
model.fit(X, y, batch_size=4, epochs=10000, verbose=0)
print("*** Training done! ***")
print("*** Model prediction on [[0,0],[0,1],[1,0],[1,1]] ***")
print(model.predict_proba(X))
*** Training... ***
*** Training done! ***
*** Model prediction on [[0,0],[0,1],[1,0],[1,1]] ***
[[0.08662204]
[0.9235283 ]
[0.92356336]
[0.06672956]]

Categories

Resources