I want to evaluate the following deep learning model.
lst=[]
for i in range(10):
model = Sequential()
model.add(Dense(64, kernel_initializer='he_normal', input_dim=X_train.shape[1], activation='relu'))
model.add(Dropout(0.7))
model.add(Dense(64, kernel_initializer='he_normal', activation='relu'))
model.add(Dropout(0.7))
model.add(Dense(1, activation='sigmoid'))
model.summary()
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['Recall'])
early_stopping = EarlyStopping(monitor='val_loss', patience=10)
history = model.fit(X_train, y_train, epochs=150,
batch_size=128, validation_data=(X_val, y_val), callbacks=[early_stopping])
# Generating predictions on the test set
y_pred = model.predict(X_test)
y_pred = (y_pred > 0.5)
clsf=classification_report(y_test, y_pred)
lst.append (clsf)
print (clsf)
By this, I am running the model for 10 times and after that take the average of the recall metric.
I am wondering if I am doing right the procedure or I can do this with some other way
Any suggestions? Thanks
Related
The HAR dataset should be analyzed using LSTM and 1D CNN.
I need to check the graph of the change in loss and check the confusion matrix.
I don't know how to make init and forward functions in pytorch....
# define model
model = Sequential()
model.add(ConvLSTM2D(filters=64, kernel_size=(1,3), activation='relu', input_shape=(n_steps, 1, n_length, n_features)))
model.add(Dropout(0.1))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dense(n_outputs, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
# fit network
hist = model.fit(X_train, Y_train, epochs=epochs, validation_data=(X_test, Y_test), batch_size=batch_size, verbose=verbose)
# evaluate model
(loss, accuracy) = model.evaluate(X_test, Y_test, batch_size=batch_size, verbose=verbose)
print("[INFO] loss={:.4f}, accuracy: {:.4f}%".format(loss, accuracy * 100))
The above is an LSTM model implemented by keras.
model = Sequential()
model.add(Conv1D(filters=64, kernel_size=3, activation='relu', input_shape=(n_timesteps,n_features)))
model.add(Conv1D(filters=64, kernel_size=3, activation='relu', padding = 'same'))
model.add(Dropout(0.3))
model.add(MaxPooling1D(pool_size=2))
model.add(Flatten())
model.add(Dense(100, activation='relu'))
model.add(Dense(n_outputs, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
# fit network
history = model.fit(X_train, Y_train, validation_data=(X_test, Y_test),
epochs=epochs, batch_size=batch_size, callbacks = [checkpoint], verbose=verbose)
# evaluate model
(loss, accuracy) = model.evaluate(X_test, Y_test, batch_size=batch_size, verbose=verbose)
print("[INFO] loss={:.4f}, accuracy: {:.4f}%".format(loss, accuracy * 100))
The above is a 1D CNN model implemented by keras.
I started deep learning a few months ago, so I don't know. Help me.
I am running a plain Sequential API MLP and encounter a huge loss in my output. I am not sure what is wrong and how to fix. I am using Batch Normalization layers as I read that would help and it actually does but my output is still ridiculous. Any other advice on the architecture is more than welcome.
RMSE loss is about 56000
from tensorflow.keras.models import Sequential
model = Sequential()
model.add(Dense(3, input_dim=X_train_enc.shape[1], activation='relu', kernel_initializer='he_normal'))
model.add(BatchNormalization())
model.add(Dense(10, activation='relu'))
model.add(BatchNormalization())
model.add(Dense(20, activation='relu'))
model.add(BatchNormalization())
model.add(Dense(30, activation='relu'))
model.add(BatchNormalization())
model.add(Dense(20, activation='relu'))
model.add(BatchNormalization())
model.add(Dense(10, activation='relu'))
model.add(BatchNormalization())
model.add(Dense(3, activation='relu'))
model.add(BatchNormalization())
model.add(Dense(1, activation = 'linear'))
# # summarize layers
print(model.summary())
from tensorflow.keras import backend
from tensorflow.keras.losses import mean_squared_error
#Creating/Defining our own metrics
def mean_absolute_percentage_error(y_true, y_pred):
diff = K.abs((y_true - y_pred) / K.clip(K.abs(y_true),
K.epsilon(),
None))
return 100. * K.mean(diff, axis=-1)
tf.keras.backend.set_epsilon(1) #https://stackoverflow.com/questions/49729522/why-is-the-mean-average-percentage-errormape-extremely-high
def rmse(y_true, y_pred):
return backend.sqrt(mean_squared_error(y_true, y_pred))
#def rmse(y_true, y_pred):
# return backend.sqrt(backend.mean(backend.square(y_pred - y_true), axis=-1))
Adamax = tf.keras.optimizers.Adamax(lr=0.02, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
#Compiling model
model.compile(optimizer='adam',loss=rmse, metrics=['accuracy',rmse, 'mae', 'mape'])#Metric functions are similar to loss functions, except that the results from evaluating a metric are not used when training the model
# # Creating a checkpoint
filepath="weights-improvement-{epoch:02d}-{val_rmse:.9f}.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='val_rmse', verbose=1, save_best_only=True, mode='min')
# # monitor = "val_loss", save_best_only=False
callbacks_list = [checkpoint]
history = model.fit(X_train_enc, y_train, epochs=200, batch_size=16, validation_split=0.2681867, callbacks=callbacks_list, verbose=1)
model = Sequential()
model.add(Conv2D(50, (5,5), activation='relu', input_shape =(5,5,1), kernel_initializer='he_normal'))
model.add(Flatten())
model.add(Dense(1, activation='sigmoid'))
model.summary()
# compile the model
model.compile(loss='binary_crossentropy', optimizer= 'adam', metrics=['accuracy'])
model_checkpoint=ModelCheckpoint(r'C:\Users\globo\Desktop\Test_CNN\Results\Kernel5x5\Weights'+'\\'+test+'\model_test{epoch:02d}.h5',save_freq=1,save_weights_only=True)
# fit the model
history = model.fit(X_train, Y_train, epochs=10, batch_size=32, verbose=1, callbacks=[model_checkpoint], shuffle=True, validation_split=0.5)
I'm already extracting weights for each epoch with "ModelCheckpoint", but how can I extract flatten layer output for each epoch and save them?
doing this with sequential models is not feasible at all.
you should use functional API
inp = Input((5,5,1))
x = Conv2D(50, (5,5), activation='relu', kernel_initializer='he_normal')(inp)
xflatten = Flatten()(x)
out = Dense(1, activation='sigmoid')(xflatten)
main_model = Model(inp, out) # this works same as your model
flatten_model = Model(inp, xflatten) # and this only outputs the flatten layer and is not necessary to compile it because we won't train it, it just shows the output of a layer
main_model.compile(loss='binary_crossentropy', optimizer= 'adam', metrics=['accuracy'])
history = main_model.fit(X_train, Y_train, epochs=10, batch_size=32, verbose=1, callbacks=[model_checkpoint], shuffle=True, validation_split=0.5)
to see the flatten layers's output:
flatten_model.predict(X)
I'm coding a TensorFlow model in this way:
model = Sequential()
model.add(Dense(512, input_shape=(max_words,)))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
history = model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_split=0.1)
I have this method that returns the prediction class of input:
def predict(input):
try:
q = model.predict(np.array([x_train[int(dict[input])],]))
predicted_label = text_labels[np.argmax(q)]
print("Classe Predetta: " + predicted_label + "\n")
return predicted_label
except:
return "error"
Now I use softmax to obtain level of accuracy but this isn't the better way. I know that I can use probabilistic model with the new TensorFlow Probability.
I'd like to see code to know to get model and accuracy
Thanks
model = Sequential()
model.add(LSTM(512, input_shape=(None, 1), return_sequences=True))
model.add(Dropout(0.3))
model.add(LSTM(512, input_shape=(None, 1)))
model.add(Dropout(0.3))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='rmsprop', metrics=['accuracy'])
model.summary()
hist = model.fit(x_train, y_train, epochs=10, batch_size=16, verbose=2)
p = model.predict(x_test)
I have this code and predict model p. I want to convert p of time series to a linear regression model.
Time series p -> linear regression model.
How can I accomplish this?