Keras sequential NaN values for Loss and val_loss - python

I'm having some problems which I cannot solve on my own. I'm pretty new to ML and Sequential Models of Keras.
Problem:
I only get NaN for loss and accuracy during fit()
Further when I try to predict, I'm just getting NaNs for the prediction.
My data is defined as datas(85802, 223) inclusive target
*UPDATE:
The problem was, that I did new NaN values after clearing the NaNs.
datas.dropna(inplace=True) <===== This have switched
labels = datas.columns
datas['target'] = datas['close'].shift(-4) <==== With this
Pretty dumb issue... Thank you guys!
import pandas as pd
import numpy as numpy
from keras.layers import Dense, Dropout, Activation, Flatten,Reshape
from keras.layers import Conv1D, MaxPooling1D, LeakyReLU
from keras.utils import np_utils
from keras.layers import GRU,CuDNNGRU
from keras.callbacks import CSVLogger, ModelCheckpoint
from sklearn.model_selection import train_test_split
from tensorflow.keras.layers.experimental import preprocessing
import matplotlib.pyplot as plt
from tensorflow import keras
from tensorflow.keras.models import Sequential
import tensorflow as tf
import h5py
import os
import pandas as pd
datas = pd.read_csv("D:/freqtrade/user_data/ML/New_Approach_16_08_2021/csv/ADA_USDT.csv")
datas.drop(columns=["date"], inplace=True)
print(datas.isna().sum())
datas.dropna(inplace=True)
labels = datas.columns
datas['target'] = datas['close'].shift(-4)
target = datas['target']
datas = datas[:].values
X_train, X_test, y_train, y_test = train_test_split(datas, target, test_size=0.33)
X_train = tf.convert_to_tensor(X_train, dtype=tf.float32)
X_test = tf.convert_to_tensor(X_test, dtype=tf.float32)
def build_and_compile_model():
model = Sequential()
model.add(Dense(200, input_shape=(222,), activation="relu"))
# model.add(Dropout(0.2))
model.add(Dense(180, activation="tanh"))
model.add(Dense(100, activation="relu"))
model.add(Dense(64, activation='relu'))
model.add(Dense(12, activation='relu'))
model.add(Dense(1))
model.compile(loss='mse', metrics=['accuracy'],
optimizer='adam')
return model
dnn_model = build_and_compile_model()
history = dnn_model.fit(
X_train, y_train,
validation_split=0.2,
verbose=1, epochs=2, batch_size=32)
dnn_model.summary()
history.model.save('dnn_model2')
X_test.columns = labels
test_predictions = history.model.predict(X_test)
print(test_predictions)
1438/1438 [==============================] - 6s 4ms/step - loss: nan - accuracy: 0.0000e+00 - val_loss: nan - val_accuracy: 0.0000e+00
Epoch 2/2
1438/1438 [==============================] - 6s 4ms/step - loss: nan - accuracy: 0.0000e+00 -

Related

livelossplot keras plot per epoch

I'm trying to plot a cost function and accurancy function in Keras.
However, when I want to play with my times of epoch, for instance 100 times, I will get 100 >plots
in phyton, which I have to delete the n epoch plot before the n + 1 epoch plot will be presented.
I would like to have one figure where I can see all the 100 epoch times once.
my code:
import pandas as pd
forecast_demo = pd.read_csv('forcastdemo.csv')
forecast_demo.head()
X_train = forecast_demo[['index', 'quarter']]
y_train = forecast_demo.revenue
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import Model
from tensorflow.keras import models
from tensorflow.keras.utils import plot_model
from tensorflow.keras.layers import Input, Dense, BatchNormalization
from IPython.core.display import Image
inputs = Input(shape=(2,))
x = BatchNormalization()(inputs)
x = Dense(512, activation='relu')(x)
x = Dense(128, activation='relu')(x)
x = Dense(64, activation='relu')(x)
x = Dense(32, activation='relu')(x)
outputs = Dense(1)(x)
model = Model(inputs, outputs)
model.summary()
keras.utils.plot_model(model,to_file='images/oefening2.png', show_shapes=True)
Image('images/oefening2.png')
model.compile(
loss=keras.losses.mean_squared_error, # of keras.losses.mean_absolute_percentage_error, keras.losses.mean_absolute_error (voor regressie)
optimizer=keras.optimizers.Adam(),
metrics=keras.metrics.mean_absolute_percentage_error # of keras.metrics.mean_absolute_error, keras.metrics.mean_squared_error
)
from livelossplot import PlotLossesKeras
## here I'll get 100 plots.
history = model.fit(X_train, y_train,
batch_size=20,
epochs=100,
callbacks=[PlotLossesKeras()],
verbose=False)
It gives the same result what you are expecting (one figure with 100 epoch times plotted). I ran the above code with abalone dataset in Google colab using Tensorflow version==2.7.
Please check here:
from livelossplot import PlotLossesKeras
history = model.fit(abalone_features, abalone_labels,
batch_size=20,
epochs=100,
verbose=2,
callbacks=[PlotLossesKeras()])
#verbose=False)
Output:
Loss
loss (min: 4.342, max: 16.475, cur: 4.358)
mean_absolute_percentage_error
mean_absolute_percentage_error (min: 15.522, max: 29.032, cur: 15.898)
166/166 - 1s - loss: 4.3578 - mean_absolute_percentage_error: 15.8985 - 694ms/epoch - 4ms/step
Please specify the TensorFlow version and environment details where you are getting this issue.

Why doesn't my CNN's accuracy/loss change during training?

My goal is to train a convolutional neural network to recognise the images present in the mnist sign language dataset. Here is my attempt to process the data and train the model
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os
import cv2
import random
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D, Activation, Dropout, Flatten, Dense
import cv2
import keras
import sys
import tensorflow as tf
from keras import optimizers
import json
train_df = pd.read_csv("data/sign_mnist_train.csv")
test_df = pd.read_csv("data/sign_mnist_test.csv")
X = np.array(train_df.drop(["label"], axis=1))
y = np.array(train_df[["label"]])
X = X.reshape(-1, 28, 28, 1)
X = tf.cast(X, tf.float32)
model = Sequential()
model.add(Conv2D(28, (3,3), activation = 'relu'))
model.add(MaxPooling2D((2,2)))
model.add(Flatten())
model.add(Dense(24, activation = 'softmax'))
model.compile(optimizer='RMSprop',
loss='binary_crossentropy',
metrics=['accuracy'])
model.fit(X, y, epochs=10, validation_split=0.2)
and after running this I get this result
Epoch 1/10
687/687 [==============================] - 4s 6ms/step - loss: 174.9729 - accuracy: 0.0438 - val_loss: 174.6281 - val_accuracy: 0.0382
Epoch 2/10
687/687 [==============================] - 2s 3ms/step - loss: 174.9779 - accuracy: 0.0433 - val_loss: 174.6281 - val_accuracy: 0.0382
Epoch 3/10
687/687 [==============================] - 2s 3ms/step - loss: 174.9777 - accuracy: 0.0433 - val_loss: 174.6281 - val_accuracy: 0.0382
and this continues for the remaining 7 epochs. My model is slightly different from what I have provided (for brevity) but this sequential model has the same issue, which makes me suspect that the issue must come before the model = Sequential() line. Furthermore, I have tried countless combinations of optimizers/loss and all those do is make the accuracy/loss converge to slightly different numbers, so I doubt that's the problem.
One of potential is that you use loss='binary_crossentropy' rather than loss='CategoricalCrossentropy'.
Besides, you defined the split datasets for training and testing, but you again defined it as model.fit(X, y, epochs=10, validation_split=0.2) to split datasets with 20% for validation and 80% for training.

Keras stops after 1 completed epoch

Trying to run classification on the CIFAR-10 dataset with a simple CNN. However, the model stops after completing the first epoch and doesn't go on to complete all five. Please help.
INPUT:
cifar10 = tf.keras.datasets.cifar10
(train_images, train_labels), (test_images, test_labels) = cifar10.load_data()
import os
import matplotlib.pyplot as plt
import numpy as np
import time
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras import models
from tensorflow.keras import optimizers
from tensorflow.keras.applications import VGG16
from tensorflow.keras.preprocessing.image import ImageDataGenerator
model = models.Sequential()
# Convolutional base (feature extractor)
model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(32, 32, 3)))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
# Deep feed-forward classifier
model.add(layers.Flatten())
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(10, activation='softmax'))
model.compile(loss='sparse_categorical_crossentropy', optimizer=optimizers.RMSprop(lr=1e-4), metrics=['acc'])
history = model.fit(
x=train_images,
y=train_labels,
steps_per_epoch=100,
epochs=5,
verbose=1,
validation_data=(test_images, test_labels),
validation_steps=50)
OUTPUT:
Train on 50000 samples, validate on 10000 samples
Epoch 1/5
50000/50000 [==============================] - 28s 564us/sample - loss: 2.1455 - acc: 0.2945 - val_loss: 2.0011 - val_acc: 0.3038
You should remove steps_per_epoh and validation_steps and use batch_size params.

Keras Binary Classifier Tutorial Example gives only 50% validation accuracy

Keras Binary Classifier Tutorial Example gives only 50% validation accuracy.
The near 50% accuracy can be gotten from an un-trained classifier itself for binary classification.
This example is straight from https://keras.io/getting-started/sequential-model-guide/
import numpy as np
import tensorflow as tf
from tensorflow_core.python.keras.models import Sequential
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout
np.random.seed(10)
# Generate dummy data
x_train = np.random.random((1000, 20))
y_train = np.random.randint(2, size=(1000, 1))
x_test = np.random.random((800, 20))
y_test = np.random.randint(2, size=(800, 1))
model = Sequential()
model.add(Dense(64, input_dim=20, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
model.fit(x_train, y_train,
epochs=50,
batch_size=128,
validation_data=(x_test, y_test))
score = model.evaluate(x_test, y_test, batch_size=128)
Accuracy output.
I tried with multiple trials.
Increased the number of hidden layers
Epoch 50/50 1000/1000 [==============================] - 0s
211us/sample - loss: 0.6905 - accuracy: 0.5410 - val_loss: 0.6959 -
val_accuracy: 0.4812
Could someone help me understand if anything is wrong here?
How to increase the accuracy for this "example" problem presented in the tutorial?
If you train a classifier with random examples, you will always get aprrox. 50% accuracy at validation data here represented by x_test. It is because your training samples get trained with random classes. Also the validation or test set has been assigned to random classes. This is why the random accuracy i.e. 50-50% occurs.
The more epoch you test the training set the more accuracy you will get on training set as an effect of overfitting.

how to get loss of each output in multi output regression?

In order to analyze data, I need loss for each of output dimensions, instead I get only one loss which i suspect is a mean of the losses for all output dimensions.
Any help to understand what is the loss I get and how to get separate loss for each output:
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from scipy import stats
from keras import models
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras import optimizers
from sklearn.model_selection import KFold
siz=100000
inp0=np.random.randint(100, 1000000 , size=(siz,3))
rand0=np.random.randint(-100, 100 , size=(siz,2))
a1=0.2;a2=0.8;a3=2.5;a4=2.6;a5=1.2;a6=0.3
oup1=np.dot(inp0[:,0],a1)+np.dot(inp0[:,1],a2)+np.dot(inp0[:,2],a3)\
+rand0[:,0]
oup2=np.dot(inp0[:,0],a4)+np.dot(inp0[:,1],a5)+np.dot(inp0[:,2],a6)\
+rand0[:,1]
oup_tot=np.concatenate((oup1.reshape(siz,1), oup2.reshape(siz,1)),\
axis=1)
normzer_inp = MinMaxScaler()
inp_norm = normzer_inp.fit_transform(inp0)
normzer_oup = MinMaxScaler()
oup_norm = normzer_oup.fit_transform(oup_tot)
X=inp_norm
Y=oup_norm
kfold = KFold(n_splits=2, random_state=None, shuffle=False)
opti_SGD = SGD(lr=0.01, momentum=0.9)
model1 = Sequential()
for train, test in kfold.split(X, Y):
model = Sequential()
model.add(Dense(64, input_dim=X.shape[1], activation='relu'))
model.add(Dense(64, activation='relu'))
model.add(Dense(64, activation='relu'))
model.add(Dense(Y.shape[1], activation='linear'))
model.compile(loss='mean_squared_error', optimizer=opti_SGD)
history = model.fit(X[train], Y[train], \
validation_data=(X[test], Y[test]), \
epochs=100,batch_size=2048, verbose=2)
I get:
Epoch 1/100
- 0s - loss: 0.0864 - val_loss: 0.0248
Epoch 2/100
- 0s - loss: 0.0218 - val_loss: 0.0160
Epoch 3/100
- 0s - loss: 0.0125 - val_loss: 0.0091
I would like to know what is the loss i got now and how to get losses for each output dimension.
Pass a list of functions to the metrics argument in the compile function. See here: https://keras.io/metrics/#custom-metrics
import keras.backend as K
...
def loss_first_dim(y_true, y_pred):
return K.mean(K.square(y_pred[:, 0] - y_true[:, 0]))
model.compile(optimizer='rmsprop',
loss='binary_crossentropy',
metrics=[loss_first_dim])
...

Categories

Resources