I'm working on multilabel emotion analysis and built a sequential multilayer model, then using Keras classifier and Gridsearchcv, I use the score function to evaluate the system. Then I tried to compare the results of the score with that from using predict_proba function followed by Jaccard_score with average =" micro" to suits multilabel, I expect to have the same results but actually, this didn't happen could anyone help in interpreting this.
My code :
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.layers import Flatten, LSTM
from keras.layers.core import Activation, Dropout, Dense
from tensorflow.keras.layers import Input
# Use scikit-learn to grid search
import sklearn
import sklearn.metrics as metrics
from sklearn.model_selection import GridSearchCV, KFold
from sklearn.metrics import SCORERS
from tensorflow.keras.wrappers.scikit_learn import KerasClassifier
# Function to create model, required for KerasClassifier
def create_model_multiclass(learn_rate, dropout_rate,activation):
# Create model
model=tf.keras.Sequential()
model.add(Dense( 300, kernel_initializer='normal', activation=activation))
model.add(Dropout(dropout_rate))
model.add(Dense(150, kernel_initializer='normal', activation=activation))
model.add(Dropout(dropout_rate))
model.add(Dense(50, kernel_initializer='normal', activation=activation))
model.add(Dropout(dropout_rate))
model.add(Dense(11, activation=activation))
# Compile the model
adam = Adam(learning_rate=learn_rate)
model.compile(loss='mse', optimizer=adam, metrics=['accuracy'])
return model
### Loading train data
###################
# # create model
# # Declare parameter values
model = KerasClassifier(build_fn=create_model_multiclass, verbose=10)
# Define the parameters that you wish to use in your Grid Search along
# with the list of values that you wish to try out
learn_rate = [0.001]
dropout_rate = [0.5]
batch_size = [ 60 ]
epochs = [20,60]
activation = [ 'relu', 'sigmoid' ]
print(sorted(sklearn.metrics.SCORERS.keys()))
scoring={'jaccard':'jaccard_micro','F1_score': 'f1_micro'}
# Make a dictionary of the grid search parameters
param_grid =dict(learn_rate=learn_rate, dropout_rate=dropout_rate, batch_size=batch_size, epochs=epochs,activation=activation )
# Build and fit the GridSearchCV
print(sorted(sklearn.metrics.SCORERS.keys()))
print("Tuning hyperparameters for accuracy")
grid = (GridSearchCV(estimator=model, param_grid=param_grid,return_train_score='True', refit={'jaccard'}, cv=KFold(5), verbose=15))
grid_results = (grid).fit(X_all,Y_all)
# Summarize the results in a readable format
print("Best: {0}, using {1}".format(grid_results.best_score_, grid_results.best_params_))
print(grid.best_params_)
print(grid.best_estimator_)
means = grid.cv_results_['mean_test_score']
stds = grid.cv_results_['std_test_score']
params = grid.cv_results_['params']
for mean, stdev, param in zip(means, stds, params):
print('{0} ({1}) with: {2}'.format(mean, stdev, param))
print("The scores are computed on the full evaluation set.")
print("score",grid.score(X_dev,Y_dev))
y_pred=grid.predict_proba(X_dev)
y_pred_int=y_pred.round()
micro_multilabel_accuracy=jaccard_score(Y_dev, y_pred_int,average='micro')
print('micro_multilabel_accuracy',micro_multilabel_accuracy)
and the output I got :
Best: 0.5518947958946228, using {'activation': 'relu', 'batch_size': 60, 'dropout_rate': 0.5, 'epochs': 20, 'learn_rate': 0.001}
{'activation': 'relu', 'batch_size': 60, 'dropout_rate': 0.5, 'epochs': 20, 'learn_rate': 0.001}
<tensorflow.python.keras.wrappers.scikit_learn.KerasClassifier object at 0x0000023885BBED30>
score 0.5350427627563477
micro_multilabel_accuracy 0.3984088127294982
Related
I have the question on how to convert a Pytorch Neural Network to a Tensorflow Neural Network. The TF class as its written below does not work and I assume its die to the difference between nn.Sequential and tf.keras.Sequential.
class FullyConnected(nn.Sequential):
"""
Fully connected multi-layer network with ELU activations.
"""
def __init__(self, sizes, final_activation=None):
layers = []
for in_size, out_size in zip(sizes, sizes[1:]):
layers.append(nn.Linear(in_size, out_size))
layers.append(nn.ELU())
layers.pop(-1)
if final_activation is not None:
layers.append(final_activation)
super().__init__(*layers) here
class FullyConnected(tf.keras.Sequential):
"""
Fully connected multi-layer network with ELU activations.
"""
def __init__(self, sizes, final_activation=None):
layers = []
for out_size in sizes[1:-1]:
layers.append(Dense(units=out_size, activation='elu'))
if final_activation is not None:
layers.append(Dense(units=sizes[-1], activation='elu'))
else:
layers.append(Dense(units=sizes[-1]))
super().__init__(*layers)
If I try to initialize the network say by self.fc = FullyConnected(sizes=(sizes[:-1] + [self.dim * 2])) with sizes = [1, 128, 128, 128, 1] I get the Error: TypeError: object.__init__() takes exactly one argument (the instance to initialize) when using the TF network.
Can somebody help?
Many thanks in advance!!
I built a 4 layer keras network outputting a binary classification. tanh seemed to perform better.
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from sklearn.metrics import classification_report,confusion_matrix
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.models import Model
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
batch_size=673
data = np.random.rand(batch_size, 25)
#for item in data:
# print(item)
label = np.random.randint(0,2,(batch_size,1))
#print(data)
df=pd.DataFrame(data)
df2=pd.DataFrame(label,columns=["Target"])
df=pd.concat([df,df2],axis=1)
#print(df)
columns=[x for x in df.columns if x!="Target"]
X=df[columns]
y=df["Target"]
X_train, X_test, y_train, y_test = train_test_split(X, y,test_size=0.3, random_state=42)
scaler = StandardScaler()
scaler.fit(X_train)
X_train=scaler.transform(X_train)
X_test=scaler.transform(X_test)
model= Sequential()
model.add(Dense(25, input_shape=(25,),activation='relu'))
model.add(Dense(128, activation='relu'))
model.add(Dense(128, activation='relu'))
model.add(Dense(128, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
#model.compile(optimizer=Adam(0.01),loss='binary_crossentropy')
model.compile(loss='binary_crossentropy', optimizer=Adam(lr=0.001), metrics=['accuracy'])
model.summary()
history=model.fit(X_train, y_train,epochs = 100,verbose=0)
model.evaluate(X_test, y_test)
plt.plot(history.history['accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
I have a folder called train and in that I have three separate folders for "Covid", "Pneumonia", "Healthy". I have created VGG19 model using transfer learning. I want help with creating confusion matrix for my test data. I have split my train data as 75%,25%. I have tried everything to create confusion matrix and calculate F-1 score. It would be really great if someone provides me the code, since I am new to this.
#import needed packages
import numpy as np
import tensorflow as tf
import keras
from keras import backend as K
from keras.optimizers import Adam
from keras.metrics import categorical_crossentropy
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Model
from keras.layers import Dense,GlobalAveragePooling2D,Dropout,SeparableConv2D,BatchNormalization, Activation, Dense
from keras.applications.vgg19 import VGG19
from keras.applications.resnet import ResNet101
from keras.optimizers import Adam
from keras.layers import Input, Lambda, Dense, Flatten
import matplotlib.pyplot as plt
# dataset has 3 classes
num_class = 3
# Base model without Fully connected Layers
base_model = VGG19(include_top=False, weights='imagenet', input_shape=(224,224,3))
# don't train existing weights
for layer in base_model.layers:
layer.trainable = False
x=base_model.output
x = Flatten()(base_model.output)
##3 classes ,COVID, PNEUNOMIA, NORMAL
preds=Dense(num_class, activation='softmax')(x) #final layer with softmax activation
model=Model(inputs=base_model.input,outputs=preds)
##check the model
model.summary()
# tell the model what cost and optimization method to use
model.compile(
loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy']
)
##Preparing Data
train_datagen=ImageDataGenerator(preprocessing_function=keras.applications.vgg16.preprocess_input,
validation_split=0.25)
train_generator=train_datagen.flow_from_directory('C:/Users/prani/OneDrive/Desktop/Masters project/chest_xray/Train-250/',
target_size=(224,224),
batch_size=64,
class_mode='categorical',
subset='training')
validation_generator = train_datagen.flow_from_directory(
'C:/Users/prani/OneDrive/Desktop/Masters project/chest_xray/Train-250/', # same directory as training data
target_size=(224,224),
batch_size=64,
class_mode='categorical',
subset='validation') # set as validation data
##Training
##SEtting hyper parameter
epochs = 50
learning_rate = 0.0005
decay_rate = learning_rate / epochs
opt = Adam(lr=learning_rate, beta_1=0.9, beta_2=0.999, decay=decay_rate, amsgrad=False)
model.compile(optimizer=opt,loss='categorical_crossentropy',metrics=['accuracy'])
##Train
step_size_train = train_generator.n/train_generator.batch_size
step_size_val = validation_generator.samples // validation_generator.batch_size
r = model.fit_generator(generator=train_generator,
steps_per_epoch=step_size_train,
validation_data = validation_generator,
validation_steps =step_size_val,
epochs=50)
##Confusion Matrix (Sample code for confusion matrix but this is not working, it doesn't produce error neither results)
from sklearn.metrics import classification_report, confusion_matrix
batch_size=64
Y_pred = model.predict_generator(validation_generator,186)
y_pred = np.argmax(Y_pred, axis=1)
print('Confusion Matrix')
print(confusion_matrix(validation_generator.classes, y_pred))
print('Classification Report')
target_names = ['COVID','NORMAL','PNEUMONIA']
print(classification_report(validation_generator.classes, y_pred, target_names=target_names))
You could look at this page for information on how to code a confusion matrix and other metrics:
https://www.tensorflow.org/tutorials/structured_data/imbalanced_data
I'm just trying to explore keras and tensorflow with the famous MNIST dataset.
I already applied some basic neural networks, but when it comes to tuning some hyperparameters, especially the number of layers, thanks to the sklearn wrapper GridSearchCV, I get the error below:
Parameter values for parameter (hidden_layers) need to be a sequence(but not a string) or np.ndarray.
So you can have a better view I post the main parts of my code.
Data preparation
# Extract label
X_train=train.drop(labels = ["label"],axis = 1,inplace=False)
Y_train=train['label']
del train
# Reshape to fit MLP
X_train = X_train.values.reshape(X_train.shape[0],784).astype('float32')
X_train = X_train / 255
# Label format
from keras.utils import np_utils
Y_train = keras.utils.to_categorical(Y_train, num_classes = 10)
num_classes = Y_train.shape[1]
Keras part
from keras.wrappers.scikit_learn import KerasClassifier
from sklearn.model_selection import GridSearchCV
# Function with hyperparameters to optimize
def create_model(optimizer='adam', activation = 'sigmoid', hidden_layers=2):
# Initialize the constructor
model = Sequential()
# Add an input layer
model.add(Dense(32, activation=activation, input_shape=784))
for i in range(hidden_layers):
# Add one hidden layer
model.add(Dense(16, activation=activation))
# Add an output layer
model.add(Dense(num_classes, activation='softmax'))
#compile model
model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=
['accuracy'])
return model
# Model which will be the input for the GridSearchCV function
modelCV = KerasClassifier(build_fn=create_model, verbose=0)
GridSearchCV
from keras.activations import relu, sigmoid
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Activation
from keras.layers import Dropout
from keras.utils import np_utils
activations = [sigmoid, relu]
param_grid = dict(hidden_layers=3,activation=activations, batch_size = [256], epochs=[30])
grid = GridSearchCV(estimator=modelCV, param_grid=param_grid, scoring='accuracy')
grid_result = grid.fit(X_train, Y_train)
I just want to let you know that the same kind of question has already been asked here Grid Search the number of hidden layers with keras but the answer is not complete at all and I can't add a comment to reply to the answerer.
Thank you!
You should add:
for i in range(int(hidden_layers)):
# Add one hidden layer
model.add(Dense(16, activation=activation))
Try to add the values of param_grid as lists :
params_grid={"hidden_layers": [3]}
When you are setting your parameter hidden layer =2 it goes as a string thus an error it throw.
Ideally it should a sequence to run the code that's what you error says
This is a regression problem. Below is my code
import numpy as np
import pandas as pd
from keras.models import Sequential
from keras.layers import Dense
from keras.wrappers.scikit_learn import KerasRegressor
from sklearn.cross_validation import cross_val_score, KFold
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
os.chdir(r'C:\Users\Swapnil\Desktop\RP TD\first\Changes')
## Load the dataset
dataset1 = pd.read_csv("Main Lane Plaza 1.csv")
X_train = dataset1.iloc[:,0:11].values
Y_train = dataset1.iloc[:,11].values
dataset2 = pd.read_csv("Main Lane Plaza 1_070416010117.csv")
X_test = dataset2.iloc[:,0:11].values
Y_test = dataset2.iloc[:,11].values
##Define base model
def base_model():
model = Sequential()
model.add(Dense(11, input_dim=11, kernel_initializer='normal',
activation='sigmoid'))
model.add(Dense(7, kernel_initializer='normal', activation='sigmoid'))
model.add(Dense(1, kernel_initializer='normal'))
model.compile(loss='mean_squared_error', optimizer = 'adam')
return model
seed = 7
np.random.seed(seed)
clf = KerasRegressor(build_fn=base_model, nb_epoch=100,
batch_size=5,verbose=0)
clf.fit(X_train, Y_train)
res = clf.predict(X_train)
##Result
clf.score(X_test, Y_test)
Not sure if the score should be negative??
Kindly advise if i am doing something wrong.
Thanks in advance.
I am not able to figure it out can this be problem due to feature scaling as I did feature scaling using R and saved the csv files to use in python.
When you get a negative score for regression problem, it usually means that your the model you choose can't fit your data well.
You have layer 1 activation as sigmoid, layer 2 also as sigmoid and then final layer as 1 output.
change the activations to relu, as sigmoid would be squashing the values between 0 to 1. Making the numbers really small, causing the vanishing gradient problem over the 2 hidden layer.
def base_model():
model = Sequential()
model.add(Dense(11, input_dim=11, kernel_initializer='normal', activation='relu'))
model.add(Dense(7, kernel_initializer='normal', activation='relu'))
model.add(Dense(1, kernel_initializer='normal'))
model.compile(loss='mean_squared_error', optimizer='adam')
return model
I want to build a non linear regression model using keras to predict a +ve continuous variable.
For the below model how do I select the following hyperparameters?
Number of Hidden layers and Neurons
Dropout ratio
Use BatchNormalization or not
Activation function out of linear, relu, tanh, sigmoid
Best optimizer to use among adam, rmsprog, sgd
Code
def dnn_reg():
model = Sequential()
#layer 1
model.add(Dense(40, input_dim=13, kernel_initializer='normal'))
model.add(Activation('tanh'))
model.add(Dropout(0.2))
#layer 2
model.add(Dense(30, kernel_initializer='normal'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.4))
#layer 3
model.add(Dense(5, kernel_initializer='normal'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.4))
model.add(Dense(1, kernel_initializer='normal'))
model.add(Activation('relu'))
# Compile model
model.compile(loss='mean_squared_error', optimizer='adam')
return model
I have considered random gridsearch but instead want to use hyperopt which I believe will be faster. I initially implemented the tuning using https://github.com/maxpumperla/hyperas. Hyperas is not working with latest version of keras. I suspect that keras is evolving fast and it's difficult for the maintainer to make it compatible. So I think using hyperopt directly will be a better option.
PS: I am new to bayesian optimization for hyper parameter tuning and hyperopt.
I've had a lot of success with Hyperas. The following are the things I've learned to make it work.
1) Run it as a python script from the terminal (not from an Ipython notebook)
2) Make sure that you do not have any comments in your code (Hyperas doesn't like comments!)
3) Encapsulate your data and model in a function as described in the hyperas readme.
Below is an example of a Hyperas script that worked for me (following the instructions above).
from __future__ import print_function
from hyperopt import Trials, STATUS_OK, tpe
from keras.datasets import mnist
from keras.layers.core import Dense, Dropout, Activation
from keras.models import Sequential
from keras.utils import np_utils
import numpy as np
from hyperas import optim
from keras.models import model_from_json
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from keras.optimizers import SGD , Adam
import tensorflow as tf
from hyperas.distributions import choice, uniform, conditional
__author__ = 'JOnathan Hilgart'
def data():
"""
Data providing function:
This function is separated from model() so that hyperopt
won't reload data for each evaluation run.
"""
import numpy as np
x = np.load('training_x.npy')
y = np.load('training_y.npy')
x_train = x[:15000,:]
y_train = y[:15000,:]
x_test = x[15000:,:]
y_test = y[15000:,:]
return x_train, y_train, x_test, y_test
def model(x_train, y_train, x_test, y_test):
"""
Model providing function:
Create Keras model with double curly brackets dropped-in as needed.
Return value has to be a valid python dictionary with two customary keys:
- loss: Specify a numeric evaluation metric to be minimized
- status: Just use STATUS_OK and see hyperopt documentation if not feasible
The last one is optional, though recommended, namely:
- model: specify the model just created so that we can later use it again.
"""
model_mlp = Sequential()
model_mlp.add(Dense({{choice([32, 64,126, 256, 512, 1024])}},
activation='relu', input_shape= (2,)))
model_mlp.add(Dropout({{uniform(0, .5)}}))
model_mlp.add(Dense({{choice([32, 64, 126, 256, 512, 1024])}}))
model_mlp.add(Activation({{choice(['relu', 'sigmoid'])}}))
model_mlp.add(Dropout({{uniform(0, .5)}}))
model_mlp.add(Dense({{choice([32, 64, 126, 256, 512, 1024])}}))
model_mlp.add(Activation({{choice(['relu', 'sigmoid'])}}))
model_mlp.add(Dropout({{uniform(0, .5)}}))
model_mlp.add(Dense({{choice([32, 64, 126, 256, 512, 1024])}}))
model_mlp.add(Activation({{choice(['relu', 'sigmoid'])}}))
model_mlp.add(Dropout({{uniform(0, .5)}}))
model_mlp.add(Dense(9))
model_mlp.add(Activation({{choice(['softmax','linear'])}}))
model_mlp.compile(loss={{choice(['categorical_crossentropy','mse'])}}, metrics=['accuracy'],
optimizer={{choice(['rmsprop', 'adam', 'sgd'])}})
model_mlp.fit(x_train, y_train,
batch_size={{choice([16, 32, 64, 128])}},
epochs=50,
verbose=2,
validation_data=(x_test, y_test))
score, acc = model_mlp.evaluate(x_test, y_test, verbose=0)
print('Test accuracy:', acc)
return {'loss': -acc, 'status': STATUS_OK, 'model': model_mlp}
enter code here
if __name__ == '__main__':
import gc; gc.collect()
with K.get_session(): ## TF session
best_run, best_model = optim.minimize(model=model,
data=data,
algo=tpe.suggest,
max_evals=2,
trials=Trials())
X_train, Y_train, X_test, Y_test = data()
print("Evalutation of best performing model:")
print(best_model.evaluate(X_test, Y_test))
print("Best performing model chosen hyper-parameters:")
print(best_run)
it induced by different gc sequence, if python collect session first , the program will exit successfully, if python collect swig memory(tf_session) first, the program exit with failure.
you can force python to del session by:
del session
or if you are using keras, you cant get the session instance, you can run following code at end of your code:
import gc; gc.collect()
This can be also another approach:
from hyperopt import fmin, tpe, hp, STATUS_OK, Trials
from sklearn.metrics import roc_auc_score
import sys
X = []
y = []
X_val = []
y_val = []
space = {'choice': hp.choice('num_layers',
[ {'layers':'two', },
{'layers':'three',
'units3': hp.uniform('units3', 64,1024),
'dropout3': hp.uniform('dropout3', .25,.75)}
]),
'units1': hp.uniform('units1', 64,1024),
'units2': hp.uniform('units2', 64,1024),
'dropout1': hp.uniform('dropout1', .25,.75),
'dropout2': hp.uniform('dropout2', .25,.75),
'batch_size' : hp.uniform('batch_size', 28,128),
'nb_epochs' : 100,
'optimizer': hp.choice('optimizer',['adadelta','adam','rmsprop']),
'activation': 'relu'
}
def f_nn(params):
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.optimizers import Adadelta, Adam, rmsprop
print ('Params testing: ', params)
model = Sequential()
model.add(Dense(output_dim=params['units1'], input_dim = X.shape[1]))
model.add(Activation(params['activation']))
model.add(Dropout(params['dropout1']))
model.add(Dense(output_dim=params['units2'], init = "glorot_uniform"))
model.add(Activation(params['activation']))
model.add(Dropout(params['dropout2']))
if params['choice']['layers']== 'three':
model.add(Dense(output_dim=params['choice']['units3'], init = "glorot_uniform"))
model.add(Activation(params['activation']))
model.add(Dropout(params['choice']['dropout3']))
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy', optimizer=params['optimizer'])
model.fit(X, y, nb_epoch=params['nb_epochs'], batch_size=params['batch_size'], verbose = 0)
pred_auc =model.predict_proba(X_val, batch_size = 128, verbose = 0)
acc = roc_auc_score(y_val, pred_auc)
print('AUC:', acc)
sys.stdout.flush()
return {'loss': -acc, 'status': STATUS_OK}
trials = Trials()
best = fmin(f_nn, space, algo=tpe.suggest, max_evals=50, trials=trials)
print('best: ', best)
Source