Hi have already tuned my hyperparameters and would like to perfrom kfold cross validation for my model. I have being looking around for different methods it won't seem to work for me. The code is here below:
tf.get_logger().setLevel(logging.ERROR)
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# Set random seeds for repeatable results
RANDOM_SEED = 3
random.seed(RANDOM_SEED)
np.random.seed(RANDOM_SEED)
tf.random.set_seed(RANDOM_SEED)
classes_values = [ "nearmiss", "normal" ]
classes = len(classes_values)
Y = tf.keras.utils.to_categorical(Y - 1, classes)
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, random_state=1)
input_length = X_train[0].shape[0]
train_dataset = tf.data.Dataset.from_tensor_slices((X_train, Y_train))
validation_dataset = tf.data.Dataset.from_tensor_slices((X_test, Y_test))
def get_reshape_function(reshape_to):
def reshape(image, label):
return tf.reshape(image, reshape_to), label
return reshape
callbacks = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=3,mode="auto")
model = Sequential()
model.add(Dense(200, activation='tanh',
activity_regularizer=tf.keras.regularizers.l1(0.0001)))
model.add(Dropout(0.3))
model.add(Dense(44, activation='tanh',
activity_regularizer=tf.keras.regularizers.l1(0.0001)))
model.add(Dropout(0.3))
model.add(Dense(68, activation='tanh',
activity_regularizer=tf.keras.regularizers.l1(0.0001)))
model.add(Dropout(0.3))
model.add(Dense(44, activation='tanh',
activity_regularizer=tf.keras.regularizers.l1(0.0001)))
model.add(Dropout(0.3))
model.add(Dense(classes, activation='softmax', name='y_pred'))
# this controls the learning rate
opt = Adam(learning_rate=0.0002, beta_1=0.9, beta_2=0.999)
# this controls the batch size, or you can manipulate the tf.data.Dataset objects yourself
BATCH_SIZE = 32
train_dataset = train_dataset.batch(BATCH_SIZE, drop_remainder=False)
validation_dataset = validation_dataset.batch(BATCH_SIZE, drop_remainder=False)
# train the neural network
model.compile(loss='binary_crossentropy', optimizer=opt, metrics=['accuracy'])
history=model.fit(train_dataset, epochs=50, validation_data=validation_dataset, verbose=2, callbacks=callbacks)
model.test_on_batch(X_test, Y_test)
model.metrics_names
# Use this flag to disable per-channel quantization for a model.
# This can reduce RAM usage for convolutional models, but may have
# an impact on accuracy.
disable_per_channel_quantization = False
Appericate if someone could guide me on this as I am very new to TensorFlow and neural network
I haven't tested it, but this should roughly be what you want. You use the sklearn KFold method to split the dataset into different folds, and then you simply fit the model on the current fold.
tf.get_logger().setLevel(logging.ERROR)
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# Set random seeds for repeatable results
RANDOM_SEED = 3
random.seed(RANDOM_SEED)
np.random.seed(RANDOM_SEED)
tf.random.set_seed(RANDOM_SEED)
classes_values = [ "nearmiss", "normal" ]
classes = len(classes_values)
Y = tf.keras.utils.to_categorical(Y - 1, classes)
def get_reshape_function(reshape_to):
def reshape(image, label):
return tf.reshape(image, reshape_to), label
return reshape
callbacks = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=3,mode="auto")
def create_model():
model = Sequential()
model.add(Dense(200, activation='tanh',
activity_regularizer=tf.keras.regularizers.l1(0.0001)))
model.add(Dropout(0.3))
model.add(Dense(44, activation='tanh',
activity_regularizer=tf.keras.regularizers.l1(0.0001)))
model.add(Dropout(0.3))
model.add(Dense(68, activation='tanh',
activity_regularizer=tf.keras.regularizers.l1(0.0001)))
model.add(Dropout(0.3))
model.add(Dense(44, activation='tanh',
activity_regularizer=tf.keras.regularizers.l1(0.0001)))
model.add(Dropout(0.3))
model.add(Dense(classes, activation='softmax', name='y_pred'))
return model
# this controls the learning rate
opt = Adam(learning_rate=0.0002, beta_1=0.9, beta_2=0.999)
# this controls the batch size, or you can manipulate the tf.data.Dataset objects yourself
BATCH_SIZE = 32
kf = KFold(n_splits=5)
kf.get_n_splits(X)
# Loop over the dataset to create seprate folds
for train_index, test_index in kf.split(X):
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = Y[train_index], Y[test_index]
input_length = X_train[0].shape[0]
train_dataset = tf.data.Dataset.from_tensor_slices((X_train, y_train))
validation_dataset = tf.data.Dataset.from_tensor_slices((X_test, y_test))
train_dataset = train_dataset.batch(BATCH_SIZE, drop_remainder=False)
validation_dataset = validation_dataset.batch(BATCH_SIZE, drop_remainder=False)
# Create a new model instance
model = create_model()
model.compile(loss='binary_crossentropy', optimizer=opt, metrics=['accuracy'])
# train the model on the current fold
history=model.fit(train_dataset, epochs=50, validation_data=validation_dataset, verbose=2, callbacks=callbacks)
model.test_on_batch(X_test, y_test)
Related
Every time I change the dataset, it gives a different accuracy. Sometimes it gives 97%, 50%, and 92%. It is a text classification. Why does this happen? The other 95% comes from 2 datasets that are the same size and give almost the same result.
#Split DatA
X_train, X_test, label_train, label_test = train_test_split(X, Y, test_size=0.2,random_state=42)
#Size of train and test data:
print("Training:", len(X_train), len(label_train))
print("Testing: ", len(X_test), len(label_test))
#Function defined to test the models in the test set
def test_model(model, epoch_stop):
model.fit(X_test
, Y_test
, epochs=epoch_stop
, batch_size=batch_size
, verbose=0)
results = model.evaluate(X_test, Y_test)
return results
#############3
maxlen = 300
#Bidirectional LSTM model
embedding_dim = 100
dropout = 0.5
opt = 'adam'
####################
#embed_dim = 128 #dimension of the word embedding vector for each word in a sequence
lstm_out = 196 #no of lstm layers
lstm_model = Sequential()
#Adding dropout
#lstm_model.add(LSTM(lstm_out, dropout=0.2, recurrent_dropout=0.2))##############################
lstm_model = Sequential()
lstm_model.add(layers.Embedding(input_dim=num_words,
output_dim=embedding_dim,
input_length=X_train.shape[1]))
#lstm_model.add(Bidirectional(LSTM(lstm_out, return_sequences=True, dropout=0.2, recurrent_dropout=0.2)))
#lstm_model.add(Bidirectional(LSTM(lstm_out, dropout=0.2, recurrent_dropout=0.2)))
#lstm_model.add(Bidirectional(LSTM(64, return_sequences=True)))
lstm_model.add(Bidirectional(LSTM(64, return_sequences=True)))
lstm_model.add(layers.GlobalMaxPool1D())
#Adding a regularized dense layer
lstm_model.add(layers.Dense(32,kernel_regularizer=regularizers.l2(0.001),activation='relu'))
lstm_model.add(layers.Dropout(0.25))
lstm_model.add(Dense(3,activation='softmax'))
lstm_model.compile(loss = 'categorical_crossentropy', optimizer='adam',metrics = ['accuracy'])
print(lstm_model.summary())
#TRANING
history = lstm_model.fit(X_train, label_train,
epochs=4,
verbose=True,**strong text**
validation_data=(X_test, label_test),
batch_size=64)
loss, accuracy = lstm_model.evaluate(X_train, label_train, verbose=True)
print("Training Accuracy: {:.4f}".format(accuracy))
loss_val, accuracy_val = lstm_model.evaluate(X_test, label_test, verbose=True)
print("Testing Accuracy: {:.4f}".format(accuracy_val))
ML models will base their predictions on the data previously trained on, it is only natural that the outcome will differ in case the training data is changed. Also it might be the case that a different dataset may perform better using different hyperparameters.
I am trying to train a model using keras, for multiclass classification. There are 5 classes from which to predict. This is an image classification problem, as mentioned before there are five classes of images, bedroom, bathroom, living room, dining room, and kitchen. The problem is the model doesn't seem to learn, it's always stuck at 20% accuracy and the loss never changes from epoch 1. I'm using the convolutional base from the Xception model with my classifier on top. The train, test, and validation datasets are set up using the tf.data API.
Can someone please point out what I am doing wrong?
This is the dataset generation
train_dir = "House_Dataset/Train"
valid_dir = "House_Dataset/Valid"
test_dir = "House_Dataset/Test"
train_ds = trainAug.flow_from_directory(
train_dir,
target_size=(224,224),
shuffle= False,
class_mode= "sparse"
)
valid_ds = image_dataset_from_directory(
valid_dir,
image_size=(224,224),
shuffle=False,
)
test_ds = image_dataset_from_directory(
test_dir,
image_size=(224,224),
shuffle=False,
)
This is the importing of the exception convolution base.
conv_base = keras.applications.Xception(include_top=False, weights="imagenet", input_shape=(224,224,3))
conv_base.trainable = False
This is the model building function.
def pre_trained():
inputs = keras.Input(shape=(224,224,3))
#x = data_augmentation(inputs)
x = keras.applications.xception.preprocess_input(inputs)
x = conv_base(x)
x = layers.GlobalAveragePooling2D()(x)
x = layers.BatchNormalization()(x)
x = layers.Dropout(0.5)(x)
outputs = layers.Dense(5, activation = "softmax")(x)
model = keras.Model(inputs, outputs)
model.compile(optimizer="rmsprop", loss="sparse_categorical_crossentropy", metrics = ["accuracy"])
return model
Training function call
history = pre_trained_model.fit(train_ds, epochs=25)
This is the picture of the epochs.
While the exact cause for this remains unclear to me, I have found where the problem occurred, and the solution for it.
I added some parameters in the dataset generator function.
train_dir = "House_Dataset/Train"
valid_dir = "House_Dataset/Valid"
test_dir = "House_Dataset/Test"
train_ds = image_dataset_from_directory(
train_dir,
image_size=(224,224),
shuffle= True,
seed=1,
labels="inferred",
label_mode = "categorical"
)
valid_ds = image_dataset_from_directory(
valid_dir,
image_size=(224,224),
shuffle=True,
seed=1,
labels="inferred",
label_mode = "categorical"
)
test_ds = image_dataset_from_directory(
test_dir,
image_size=(224,224),
shuffle=True,
seed=1,
labels="inferred",
label_mode = "categorical"
)
I added the option to shuffle with some seed, and changed the label mode to categorical, which will produce a one-hot encoding of labels. Likewise I also changed the loss from sparse_categorical_crossentropy to categorical_crossentropy. These changes have allowed the model train, and there have been significant improvements in both training and validation loss as well as accuracy.
try my cnn network and see if you get 87% accuracy. cnn extract features in each layer as a filter. the filter then feeds to the category softmax function.
model=Sequential()
model.add(Conv2D(32, (3,3),activation='relu',input_shape=(IMG_SIZE,IMG_SIZE,3)))
#model.add(Dropout(0.25))
model.add(MaxPooling2D(2))
#model.add(BatchNormalization())
model.add(Conv2D(64, (3,3), activation="relu"))
model.add(MaxPooling2D(2,2))
model.add(Conv2D(128, (3,3), activation="relu"))
model.add(MaxPooling2D(2,2))
model.add(Conv2D(128, (3,3), activation="relu"))
model.add(MaxPooling2D(2,2))
#model.add(BatchNormalization())
model.add(Flatten())
model.add(Dropout(0.2))
model.add(Dense(512,activation='relu'))
model.add(Dense(5, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics = ['accuracy'])
model.summary()
I use python to multi-class text classification , my data set contains 25000 Arabic tweets divided into 10 classes[sport, politics,....]
When I use
training = pd.read_csv('E:\cluster data\One_File_nonnormalizenew2norm.txt', sep="*")
training.dropna(inplace=True)
training.columns = ["text", "class1"]
training['class1'] = training.class1.astype('category').cat.codes
training.dropna(inplace=True)
# create our training data from the tweets
text = training['text']
y = (training['class1'])
from sklearn.model_selection import train_test_split
sentences_train, sentences_test, y_train, y_test = train_test_split(text, y, test_size=0.25, random_state=1000)
from sklearn.feature_extraction.text import CountVectorizer
vectorizer = CountVectorizer()
vectorizer.fit(sentences_train)
X_train = vectorizer.transform(sentences_train)
X_test = vectorizer.transform(sentences_test)
X_train
from sklearn.linear_model import LogisticRegression
classifier = LogisticRegression()
classifier.fit(X_train, y_train)
score = classifier.score(X_test, y_test)
print("Accuracy:", score)
Accuracy: 0.9525099601593625
When I use keras:
model = Sequential()
max_words=5000
model.add(Dense(512, input_shape=(input_dim,), activation='softmax'))
model.add(Dropout(0.5))
model.add(Dense(256, activation='softmax'))
model.add(Dropout(0.5))
model.add(Dense(1,activation='softmax'))
model.add(Dense(10))
model.summary()
model.compile(loss='sparse_categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
model.fit(X_train, y_train, batch_size=150, epochs=5, verbose=1, validation_split=0.3,shuffle=True)
predicted = model.predict(X_test)
predicted = np.argmax(predicted, axis=1)
accuracy_score(y_test, predicted)
0.28127490039840636
where the mistake???
update
I change the code to:
model = Sequential()
max_words=5000
model.add(Dense(512, input_shape=(input_dim,)))
model.add(Dropout(0.5))
model.add(Dense(256))
model.add(Dropout(0.5))
#model.add(Dense(1,activation='sigmoid'))####
model.add(Dense(10))
model.summary()
model.compile(loss='sparse_categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
model.fit(X_train, y_train,batch_size=150,epochs=10,verbose=1,validation_split=0.3,shuffle=True)
predicted = model.predict(X_test)
predicted = np.argmax(predicted, axis=1)
accuracy_score(y_test, predicted)
0.7201593625498008
still bad accuracy!!!
Some ideas.
Remove all softmax activations (as #Matias said).
Remove the model.add(Dense(1,activation='softmax')), it's probably destroying your results.
Do more than 5 epochs.
You are not using the same tweets for validation in the two approaches.
You should probably give the accuracy on both the training and the testing datasets to be sure what is going on.
I use a recurrent network (in special GRU) for predict a time serie with a lenght of 90 occurrences. The type of data is multivariante, and a follow this example.
Multivariante Time Series
Option 1:
I use keras for develop the rnn
n_train_quarter = int(len(serie) * 0.75)
train = values[:n_train_quarter, :]
test = values[n_train_quarter:, :]
X_train, y_train = train[:, :-1], train[:, - 1]
X_test, y_test = test[:, :-1], test[:, - 1]
# All parameter can be changes kernel, activation, optimizer, ...
model = Sequential()
model.add(GRU(64, input_shape=(X_train.shape[1], X_train.shape[2]),return_sequences=True))
model.add(Dropout(0.5))
# n is random
for i in range(n)
model.add(GRU(64,kernel_initializer = 'uniform', return_sequences=True))
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(1))
model.add(Activation('softmax'))
#Compile and fit
model.compile(loss='mean_squared_error', optimizer='SGD')
early_stopping = EarlyStopping(monitor='val_loss', patience=50)
checkpointer = ModelCheckpoint(filepath=Checkpoint_mode, verbose=0, save_weights_only=False, save_best_only=True)
model.fit(X_train, y_train,
batch_size=256,
epochs=64,
validation_split=0.25,
callbacks=[early_stopping, checkpointer],
verbose=0,
shuffle=False)
And the result with less error look like the image (there are various experiment with same result)
Option 2:
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size = 0.33, random_state = 42)
# All parameter can be changes kernel, activation, optimizer, ...
model = Sequential()
model.add(GRU(64, input_shape=(X_train.shape[1], X_train.shape[2]),return_sequences=True))
model.add(Dropout(0.5))
# n is random
for i in range(n)
model.add(GRU(64,kernel_initializer = 'uniform', return_sequences=True))
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(1))
model.add(Activation('softmax'))
#Compile and fit
model.compile(loss='mean_squared_error', optimizer='SGD')
early_stopping = EarlyStopping(monitor='val_loss', patience=50)
checkpointer = ModelCheckpoint(filepath=Checkpoint_mode, verbose=0, save_weights_only=False, save_best_only=True)
model.fit(X_train, y_train,
batch_size=256,
epochs=64,
validation_split=0.25,
callbacks=[early_stopping, checkpointer],
verbose=0,
shuffle=False)
And the result with less error print as
Can use "train_test_split" of sklearn with random select for the data?
Why is better the result with secuential data than with random selection data, if GRU is better with secuential data?
I am trying to run a LSTM using Keras on my custom features set. I have train and test features in separate files. Each csv file contains 11 columns with last column as class label. There are total 40 classes in my dataset. The problem is I am not able to figure out the correct input_shape to the first layer. I had explored all the stackoverflow and github but still not able to solve this
Below is my complete code.
import numpy
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
numpy.random.seed(7)
train_dataset = numpy.loadtxt("train.csv", delimiter=",")
X_train = train_dataset[:, 0:10]
y_train = train_dataset[:, 10]
test_dataset = numpy.loadtxt("test.csv", delimiter=",")
X_test = test_dataset[:, 0:10]
y_test = test_dataset[:, 10]
model = Sequential()
model.add(LSTM(32, return_sequences=True, input_shape=X_train.shape))
model.add(LSTM(32, return_sequences=True))
model.add(LSTM(32))
model.add(Dense(1, activation='softmax'))
model.compile(loss='mean_squared_error', optimizer='sgd', metrics=['accuracy'])
model.fit(X_train, y_train, batch_size=10, epochs=1)
score, acc = model.evaluate(X_test, y_test, batch_size=10)
print('Test score:', score)
print('Test accuracy:', acc * 100)
Whatever I change in input_shape parameter wither I get error in first LSTM layer of in fit method.
you don't have a time dimension in your input.
Input for RNN should be (batch_size, time_step, features) while your input has dimension (batch_size, features).
If you want to use your 10 columns one at a time you should reshape the array with
numpy.reshape(train_dataset, (-1, train_dataset.shape[1], 1))
Try this code:
train_dataset = numpy.loadtxt("train.csv", delimiter=",")
train_dataset = numpy.reshape(train_dataset, (-1, train_dataset.shape[1], 1))
X_train = train_dataset[:, 0:10]
y_train = train_dataset[:, 10]
test_dataset = numpy.loadtxt("test.csv", delimiter=",")
test_dataset = numpy.reshape(test_dataset, (-1, train_dataset.shape[1], 1))
X_test = test_dataset[:, 0:10]
y_test = test_dataset[:, 10]
model = Sequential()
model.add(LSTM(32, return_sequences=True, input_shape=(X_train.shape[1], 1)))
model.add(LSTM(32, return_sequences=True))
model.add(LSTM(32))
model.add(Dense(1, activation='softmax'))
model.compile(loss='mean_squared_error', optimizer='sgd', metrics=['accuracy'])
model.fit(X_train, y_train, batch_size=10, epochs=1)
score, acc = model.evaluate(X_test, y_test, batch_size=10)
print('Test score:', score)
print('Test accuracy:', acc * 100)