Add SVM to last layer - python

What I did:
I implement the following model using of Keras:
train_X, test_X, train_Y, test_Y = train_test_split(X, Y, test_size=0.2, random_state=np.random.seed(7), shuffle=True)
train_X = np.reshape(train_X, (train_X.shape[0], 1, train_X.shape[1]))
test_X = np.reshape(test_X, (test_X.shape[0], 1, test_X.shape[1]))
inp = Input((train_X.shape[1], train_X.shape[2]))
lstm = LSTM(1, return_sequences=False)(inp)
output = Dense(train_Y.shape[1], activation='softmax')(lstm)
model = Model(inputs=inp, outputs=output)
model.compile(loss='mean_squared_error', optimizer='adam', metrics=['accuracy'])
model.fit(train_X, train_Y, validation_split=.20, epochs=2, batch_size=50)
What I want:
I want to add SVM to the last layer in my model but i dont know how? Any idea?

This should work for adding svm as last layer.
inp = Input((train_X.shape[1], train_X.shape[2]))
lstm = LSTM(1, return_sequences=False)(inp)
output = Dense(train_Y.shape[1], activation='softmax', W_regularizer=l2(0.01)))(lstm)
model = Model(inputs=inp, outputs=output)
model.compile(loss='hinge', optimizer='adam', metrics=['accuracy'])
model.fit(train_X, train_Y, validation_split=.20, epochs=2, batch_size=50)
Here I have used hinge as loss considering binary categorised target. But if it is more than that, then you can consider using categorical_hinge

Change softmax to linear and add kernel_regularizer=l2(1e-4) instead W_regularizer=l2(0.01) using keras 2.2.4. Use loss = categorical_hinge.

Related

pandas dataframe to tensorflow input

i want to use a pandas dataset as an input to a neural net.
my neural net model is:
def build_model():
model = Sequential()
model.add(Dense(128, activation = "relu"))
model.add(Dropout(0.2))
model.add(Dense(64, activation = "relu"))
model.add(Dropout(0.1))
model.add(Dense(32, activation = "softmax"))
model.compile(
optimizer='adam',
loss=['binary_crossentropy'],
metrics=['accuracy']
)
return model
tensorboard = TensorBoard(log_dir=f"logs/{time.time()}", histogram_freq=1)
model = build_model()
history = model.fit(
x_train,
y_train,
epochs=5,
batch_size=32,
validation_data=(
x_val,
y_val
),
callbacks=[
tensorboard
]
)
and i pass my dataframe as input as such:
y_val, x_val, y_train, x_train = test_data.drop(['gender',
'comorbidities_count', 'comorbidities_significant_count',
'medication_count'],axis=1),test_data.drop(['fried'],axis=1),training_data.drop([ 'gender', 'comorbidities_count', 'comorbidities_significant_count',
'medication_count'],axis=1),training_data.drop(['fried'],axis=1)
but i get this error:
ValueError: Please provide as model inputs either a single array or a list of arrays.
Does anyone know hot to turn this dataframe into an array so i can feed it? Or is there some other issue i am not in knowledge of?
Use
y_val, x_val, y_train, x_train = test_data.drop(['gender',
'comorbidities_count', 'comorbidities_significant_count',
'medication_count'],axis=1).to_numpy().astype(np.float32) ,test_data.drop(['fried'],axis=1).to_numpy().astype(np.float32) ,training_data.drop([ 'gender', 'comorbidities_count', 'comorbidities_significant_count',
'medication_count'],axis=1).to_numpy().astype(np.float32) ,training_data.drop(['fried'],axis=1).to_numpy().astype(np.float32)
The .to_numpy() function of a pd dataframe turns it into a numpy array.

Why am I getting "Supported target types are: ('binary', 'multiclass'). Got 'continuous' instead." error?

I am writing this code and keep getting the Supported target types are: ('binary', 'multiclass'). Got 'continuous' instead. error no matter what I try. Do you see the problem within my code?
df = pd.read_csv('drain.csv')
values = df.values
seed = 7
numpy.random.seed(seed)
X = df.iloc[:,:2]
Y = df.iloc[:,2:]
def create_model():
# create model
model = Sequential()
model.add(Dense(12, input_dim=8, activation='relu'))
model.add(Dense(8, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
# Compile model
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
model = KerasClassifier(build_fn=create_model, epochs=10, batch_size=10, verbose=0)
# evaluate using 10-fold cross validation
kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=seed)
results = cross_val_score(model, X, Y, cv=kfold)
print(results.mean())
You need to convert your Y variables to binary, as specified here :
https://github.com/keras-team/keras/blob/master/examples/mnist_mlp.py
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
and then
history = model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test))
Seems like you forgot the conversion to categorical step.

Using Keras with Ensemble Voting Classifier

I am trying to use EnsembleVoteClassifier from mlxtend library, where my classifiers are ANN, SVM, Logistic Regression. I am pre-fitting the models and calling EnsembleVoteClassifier just for prediction:
ensemble=EnsembleVoteClassifier(clfs=[model_nn, model_logreg],voting='hard',refit=False)
ensemble.fit(X_train,y_train)
y_pred_ensemble = ensemble.predict(X_test)
The problem is with Keras. My code is below:
model_nn = Sequential()
model_nn.add(Dense(20, input_shape=(X_train.shape[1],),
kernel_initializer=RandomNormal(mean=0.0, stddev=0.05, seed=42),
bias_initializer=RandomNormal(mean=0.0, stddev=0.05, seed=42)))
model_nn.add(Activation('relu'))
model_nn.add(BatchNormalization())
model_nn.add(Dropout(0.5))
model_nn.add(Dense(2, activation='softmax'))
model_nn.compile (loss = 'sparse_categorical_crossentropy', optimizer=k.optimizers.Adam(lr=1e-4))
early_stopping_monitor = EarlyStopping(monitor='val_loss', mode='min', patience=20)
lr_reduce= ReduceLROnPlateau(monitor='val_loss', verbose=1, mode='min', patience=20)
history = model_nn.fit(X_train, y_train, epochs=1000,
class_weight=class_weights,
batch_size=32,
validation_data=(X_val, y_val), verbose = 1,
callbacks=[early_stopping_monitor, lr_reduce])
y_pred_nn = model_nn.predict(X_test)
y_pred_nn = y_pred_nn.argmax(axis=1)
The problem is that the shape of predicted class is (n_samples,2) which produces an error in EnsembleVoteClassifier:
raise ValueError("bad input shape {0}".format(shape))
ValueError: bad input shape (, 2)
Is there any way to pass the pipeline that will take care of the shape problem and output the keras prediction with the same shape as sklearn does?
Thank you.

Keras earlystopping: print selected epoch

Simple question. I am using Keras earlystopping in the following form:
Earlystop = EarlyStopping(monitor='val_loss', min_delta=0, patience=5, verbose=1, mode='auto')
How can I get Keras to print the selected epoch once the model has been fit? I think you have to use logs but don't quite know how.
Thanks.
Edit:
The full code is very long! Let me add a bit more than I gave. Hopefully it will help.
# Define model
def design_flexiNN(m_type, neurons, shape_timestep, shape_feature, activation, kernel_ini):
model = Sequential()
model.add(Dense(neurons, input_dim=shape_feature, activation = activation, use_bias=True, kernel_initializer=kernel_ini))
model.add(Dense(1, use_bias=True))
model.compile(loss='mae', optimizer='Adam')
return model
# fit model
def fit_flexiNN(m_type, train_X, train_y, epochs, batch_size, test_X, test_y):
history = model.fit(train_X, train_y, epochs=epochs, batch_size=batch_size, callbacks=callbacks_list, validation_data=(test_X, test_y), verbose=0, shuffle=False)
Earlystop = EarlyStopping(monitor='val_loss', min_delta=0, patience=5, verbose=1, mode='auto')
callbacks_list = [Earlystop]
model = design_flexiNN(m_type, neurons, neurons_step, train_X_feature_shape, activation, kernel_ini);
history = fit_flexiNN(m_type, train_X, train_y, ini_epochs, batch_size, test_X, test_y)
I've been able to infer the selected epoch by doing len(history.history['val_loss']) minus 1, but that doesn't work if you have a patience above zero.
Been trying to solve this myself and realised that the len(history.history['val_loss']) method is almost correct. All you need to add is:
len(history.history['val_loss']) - patience
which should give you the epoch number for the selected model (assuming that the model didnt run for the full number of epochs).
A slightly more thorough method would be:
model_loss = history.history["val_loss"]
epoch_chosen = model_loss.index(min(model_loss)) +1
print(epoch_chosen)
Hope this helps!

Shape mismatch in LSTM in keras

I am trying to run a LSTM using Keras on my custom features set. I have train and test features in separate files. Each csv file contains 11 columns with last column as class label. There are total 40 classes in my dataset. The problem is I am not able to figure out the correct input_shape to the first layer. I had explored all the stackoverflow and github but still not able to solve this
Below is my complete code.
import numpy
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
numpy.random.seed(7)
train_dataset = numpy.loadtxt("train.csv", delimiter=",")
X_train = train_dataset[:, 0:10]
y_train = train_dataset[:, 10]
test_dataset = numpy.loadtxt("test.csv", delimiter=",")
X_test = test_dataset[:, 0:10]
y_test = test_dataset[:, 10]
model = Sequential()
model.add(LSTM(32, return_sequences=True, input_shape=X_train.shape))
model.add(LSTM(32, return_sequences=True))
model.add(LSTM(32))
model.add(Dense(1, activation='softmax'))
model.compile(loss='mean_squared_error', optimizer='sgd', metrics=['accuracy'])
model.fit(X_train, y_train, batch_size=10, epochs=1)
score, acc = model.evaluate(X_test, y_test, batch_size=10)
print('Test score:', score)
print('Test accuracy:', acc * 100)
Whatever I change in input_shape parameter wither I get error in first LSTM layer of in fit method.
you don't have a time dimension in your input.
Input for RNN should be (batch_size, time_step, features) while your input has dimension (batch_size, features).
If you want to use your 10 columns one at a time you should reshape the array with
numpy.reshape(train_dataset, (-1, train_dataset.shape[1], 1))
Try this code:
train_dataset = numpy.loadtxt("train.csv", delimiter=",")
train_dataset = numpy.reshape(train_dataset, (-1, train_dataset.shape[1], 1))
X_train = train_dataset[:, 0:10]
y_train = train_dataset[:, 10]
test_dataset = numpy.loadtxt("test.csv", delimiter=",")
test_dataset = numpy.reshape(test_dataset, (-1, train_dataset.shape[1], 1))
X_test = test_dataset[:, 0:10]
y_test = test_dataset[:, 10]
model = Sequential()
model.add(LSTM(32, return_sequences=True, input_shape=(X_train.shape[1], 1)))
model.add(LSTM(32, return_sequences=True))
model.add(LSTM(32))
model.add(Dense(1, activation='softmax'))
model.compile(loss='mean_squared_error', optimizer='sgd', metrics=['accuracy'])
model.fit(X_train, y_train, batch_size=10, epochs=1)
score, acc = model.evaluate(X_test, y_test, batch_size=10)
print('Test score:', score)
print('Test accuracy:', acc * 100)

Categories

Resources