After running the hyperparameter tuning with GridSearchCV with the code as below:
## Tuning the ANN
from keras.wrappers.scikit_learn import KerasRegressor
from sklearn.model_selection import GridSearchCV
from keras.models import Sequential
from keras.layers import Dense
def build_regressor(hidden_nodes, hidden_layers, optimizer):
regressor = Sequential()
regressor.add(Dense(units = hidden_nodes, kernel_initializer = 'uniform', activation = 'relu', input_dim = 7))
for layer_size in range(hidden_layers):
regressor.add(Dense(hidden_nodes, kernel_initializer = 'uniform', activation = 'relu'))
regressor.add(Dense(units = 1, kernel_initializer = 'uniform', activation = 'linear'))
regressor.compile(optimizer = optimizer, loss = 'mse', metrics = ['mse'])
return regressor
regressor = KerasRegressor(build_fn = build_regressor, epochs = 100)
# Create a dictionary of tuning parameters
parameters = {'hidden_nodes': list(range(2,101)), 'hidden_layers': [1,2,3], 'batch_size': [25,32], 'optimizer' : ['adam', 'nadam','RMSprop', 'adamax']}
grid_search = GridSearchCV(estimator = regressor, param_grid = parameters, scoring = 'neg_mean_squared_error', cv = 10, n_jobs = 4)
grid_search = grid_search.fit(X_train, y_train)
best_parameters = grid_search.best_params_
best_score = grid_search.best_score_
best_model = grid_search.best_estimator_
Do we have any way to extract the weights of the best model from GridSearchCV?
Thank you so much in advance,
As you want the model weights saved in a csv file you can do the following:
import numpy as np
weight = best_model.layers[0].get_weights()[0]
np.savetxt('weight.csv' , weight , fmt='%s', delimiter=',')
Related
it gives another error.
The first argument to Layer.call must always be passed.
I cannot solve the problem. input_dim cannot be set as a constant. PCA and SelectKBest will cut down on the amount of input.
And if you can help with the output of the results from the pipeline, I will be very grateful
attach a link to the data: https://1drv.ms/u/s!AlHgQsqCKEIPiIxzdyWE0BfBHNocTQ?e=cxuSuo
def modelReg(inpt, opt = 'adam', kInitializer = 'glorot_uniform', dropout = 0.05):
model = Sequential()
model.add(Dense(1024, activation='relu', input_dim = inpt, kernel_initializer=kInitializer))
model.add(Dense(1024, activation='relu', kernel_initializer=kInitializer))
model.add(Dense(512, activation='relu', kernel_initializer=kInitializer))
model.add(layers.Dropout(dropout))
model.add(Dense(1, activation='sigmoid', kernel_initializer=kInitializer))
model.compile(loss='mse',optimizer=opt, metrics=["mse", "mae"])
return model
features = []
features.append(('pca', PCA(n_components=10)))
features.append(('select_best', SelectKBest(k=10)))
feature_union = FeatureUnion(features)
regressor = KerasRegressor(build_fn = modelReg(inpt), epochs = 3, batch_size = 500, verbose = 1)
estimators = []
estimators.append(('standardize', StandardScaler()))
estimators.append(('feature_union', feature_union))
estimators.append(('regressor' regressor))
model = Pipeline(estimators)
model.fit(allData.drop(['VancouverH'], axis = 1), allData['VancouverH'])
in KerasRegressor with a function to pass arguments to the model function, they are written to the KerasRegressor arguments.
kearsEstimator = ('kR', KerasRegressor(createModel, inpt = trainDataX.shape[1],
epochs = 5, batch_size = 180, verbose = 1))
like this, not like this:
kearsEstimator = ('kR', KerasRegressor(createModel(inpt),
epochs = 5, batch_size = 180, verbose = 1))
well, and transferred the pipeline to the Grid. And the names of the parameters for the grid are written with the prefix.
estimators = []
estimators.append((kearsEstimator))
param_grid = {
'kR__optimizer':['adam'] #'RMSprop', 'Adam', 'Adamax', 'sgd'
}
grid = GridSearchCV(Pipeline(estimators), param_grid, cv = 5)
grid.fit(trainDataX, trainDataY)
I am not able to get my code to run when using n_jobs = -1(on the last line).
I get the same message :
"BrokenProcessPool: A task has failed to un-serialize. Please ensure that the arguments of the function are all picklable."
The code works with n_jobs = 1, but I need all processors as the code will take very long to execute.
I have tried using if __name__ == '__main__': , but I am not sure how to use it and cannot get the code to run.
I have tried for ages but to no avail. Any help is highly appreciated. Here is the relevant code:
from tensorflow.keras.wrappers.scikit_learn import KerasClassifier
from sklearn.model_selection import cross_val_score
from keras.models import Sequential
from keras.layers import Dense
def build_classifier():
classifier = Sequential()
classifier.add(Dense(units = 6, kernel_initializer = 'uniform', activation = 'relu', input_dim = 11))
classifier.add(Dense(units = 6, kernel_initializer = 'uniform', activation = 'relu'))
classifier.add(Dense(units = 1, kernel_initializer = 'uniform', activation = 'sigmoid'))
classifier.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])
return classifier
classifier = KerasClassifier(build_fn = build_classifier,batch_size = 10, epochs = 100)
accuracies = cross_val_score(estimator = classifier, X = X_train, y = y_train, cv = 10, n_jobs = -1)
I have built a regression model using ANN relating 8 input parameters and 1 output parameter.
code
X = data.iloc[:,:-1]
y = data.iloc[:,8:9]*100
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
X_train_us, X_test_us, y_train_us, y_test_us = train_test_split(X, y, test_size = 0.2, random_state = 0)
# Feature Scaling
from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
sc_Y = StandardScaler()
X_train = sc_X.fit_transform(X_train_us)
X_test = sc_X.transform(X_test_us)
y_train = sc_Y.fit_transform(y_train_us)
y_test = sc_Y.transform(y_test_us)
# Importing the Keras libraries and packages
from keras.models import Sequential
from keras.layers import Dense
from keras.wrappers.scikit_learn import KerasRegressor
def base_model():
# Initialising the ANN
regressor = Sequential()
# Adding the input layer and the first hidden layer
regressor.add(Dense(units = 6, kernel_initializer = 'uniform', activation = 'relu', input_dim = 8))
# Adding the second hidden layer
regressor.add(Dense(units = 4, kernel_initializer = 'uniform', activation = 'relu'))
# Adding the output layer
regressor.add(Dense(units = 1, kernel_initializer = 'uniform'))
# Compiling the ANN
regressor.compile(optimizer = 'adam', loss = 'mse', metrics = ['mae'])
return regressor
# Fitting the ANN to the Training set
regressor = KerasRegressor(build_fn=base_model, epochs=500, batch_size=32)
regressor.fit(X_train,y_train)
# Predicting the Test & Train set with regressor built
y_pred = regressor.predict(X_test)
y_pred = sc_Y.inverse_transform(y_pred)
y_test = sc_Y.inverse_transform(y_test)
#calculate r2_score
from sklearn.metrics import r2_score
score_test = r2_score(y_test,y_pred)
I get an r2_score of 98%.Unit of my output variable is currently metres. If I multiply it by 100 and change it to centi-meters and train the model and calculate the r2_score it is 91%.
Why is my r2_score changing with the unit of the dependent variable. Shouldn't scaling take care of this?
Thanks!!
When using this code I got from some tutorial I got the error that says The model is not configured to compute accuracy and that I should pass accuracy , The weird part is I am already passing metrics = ['accuracy']
I've searched a lot and all the codes I have seen works fine except mine.
Evaluating the ANN
from keras.wrappers.scikit_learn import KerasClassifier
from sklearn.model_selection import cross_val_score
from tensorflow.python.keras.models import Sequential #Used to initialize the NN
from tensorflow.python.keras.layers import Dense #Used to create the layers in the ANN
def build_classifier():
classifier = Sequential()
classifier.add(Dense(units = 6, kernel_initializer = 'uniform', activation = 'relu',input_dim = 11))
classifier.add(Dense(units= 6, kernel_initializer = 'uniform', activation = 'relu'))
classifier.add(Dense(units = 1, kernel_initializer = 'uniform', activation = 'sigmoid'))
classifier.compile(optimizer='adam', loss='binary_crossentropy', metrics= ['accuracy'])
return classifier
# Needs to be revised from evaluting video in the course if needed
classifier = KerasClassifier(build_fn = build_classifier, batch_size = 10, nb_epoch = 100)
accuracies = cross_val_score(estimator = classifier, X = X_train, y = y_train, cv = 10, n_jobs = -1)
I expect the output to be the accuarcies vector, instead i got:
ValueError: The model is not configured to compute accuracy. You should pass metrics=["accuracy"] to the model.compile() method.
Changing the parameter from metrics=['accuracy'] by metrics=['acc'] works for me.
Regards,
Joseph
I am new to keras and Neural networks. I am trying to tune the hyperparameters of a simple Neural network using GridSearchCV from scikit-learn with keras in python. Below is an example code for reference.
def base_model(input_layer_nodes = 150, optimizer = 'adam', kernel_initializer = 'normal', dropout_rate = 0.2):
model = Sequential()
model.add(Dense(units = input_layer_nodes, input_dim = 107, kernel_initializer = kernel_initializer, activation='relu'))
Dropout(dropout_rate)
model.add(Dense(units = 1, kernel_initializer = kernel_initializer, activation='sigmoid'))
# Compile model
model.compile(loss = 'binary_crossentropy', optimizer = optimizer, metrics = ['accuracy'])
return model
# Defining parameters for performing GridSearch
# optimizer = ['sgd', 'rmsprop', 'adam']
# dropout_rate = [0.1, 0.2, 0.3, 0.4, 0.5]
# input_layer_nodes = [50, 107, 150, 200]
kernel_initializer = ['uniform', 'normal']
param_grid = dict(kernel_initializer = kernel_initializer)
model = KerasClassifier(build_fn = base_model, epochs = 10, batch_size = 128, verbose = 2)
grid = GridSearchCV(estimator = model, param_grid=param_grid, n_jobs = 1, cv = 5)
grid.fit(X_train, y_train)
# View hyperparameters of best neural network
print("\nBest Training Parameters: ", grid.best_params_)
print("Best Training Accuracy: ", grid.best_score_)
When I execute the above code, I get the below error.
ValueError: ('Some keys in session_kwargs are not supported at this time: %s', dict_keys(['kernel_initializer']))
I am able to tune some of the other parameters of the network, like dropout_rate, optimizer, epochs. If the same code is working for other parameters, why is the kernel_initializer part not working? I am using keras 2.2.2, tensorflow 1.9.0-gpu and python 3.6.6. My OS windows 10 x64. Any help on this would be appreciated.