Keras autoencoder is not reconstructing 1D signals - python

I would like to train an autoencoder neural network.
Assume I have a 1D signal:
Then in order to create a dataset, I split this signal into several thousands of (overlapping) segments (each with 1024 samples). Each segment is saved into a separate .txt file.
My code is following
import tensorflow as tf
import tensorflow.keras as keras
import matplotlib.pyplot as plt
import glob
import numpy as np
def load_dataset(dataset_path, train_val_ratio=0.8):
filenames = glob.glob(dataset_path+"\\*.txt")
windows_amount = len(filenames)
lines = [line.rstrip('\n') for line in open(filenames[0])]
window_length = len(lines)
train_windows_amount = int(np.ceil(windows_amount*train_val_ratio))
val_windows_amount = int(np.ceil(windows_amount*(1-train_val_ratio)))
train_windows = np.zeros((train_windows_amount, window_length), dtype=np.float)
val_windows = np.zeros((val_windows_amount, window_length), dtype=np.float)
for j in range(len(filenames)):
if j%100 == 0:
print(str(j/windows_amount*100)+"%")
lines = [line.rstrip('\n') for line in open(filenames[j])]
if j < train_windows_amount:
for i in range(len(lines)):
train_windows[j, i] = float(lines[i])
else:
for i in range(len(lines)):
val_windows[j-train_windows_amount, i] = float(lines[i])
return train_windows, val_windows
train_path = ".\\dataset\\dataset_1\\train"
train_windows, val_windows = load_dataset(train_path)
train_windows = train_windows/1000000
val_windows = val_windows/1000000
treshold_path = ".\\dataset\\dataset_1\\treshold"
treshold_windows, _ = load_dataset(treshold_path)
treshold_windows = treshold_windows/1000000
model = keras.Sequential([
keras.layers.Dense(1024, activation='relu', name="encoder"),
keras.layers.Dense(512, activation='relu'),
keras.layers.Dense(256, activation='relu', name="bottleneck"),
keras.layers.Dense(512, activation='relu'),
keras.layers.Dense(1024, activation='sigmoid', name="decoder")
])
model.compile(optimizer = "adam",
loss='binary_crossentropy',
metrics=['accuracy'])
history = model.fit(train_windows, train_windows,
batch_size=128,
epochs=200,
verbose=1,
shuffle=True,
validation_data=(val_windows, val_windows))
plt.plot(history.history['loss'], label='loss')
plt.plot(history.history['accuracy'], label='train')
plt.plot(history.history['val_accuracy'], label='val')
plt.grid()
plt.legend()
plt.show()
test_loss, test_acc = model.evaluate(treshold_windows, treshold_windows, verbose=2)
print('Test loss:', test_loss)
model.save_weights('model.h5')
reconstructed = model.predict(treshold_windows)
plt.plot(treshold_windows[10])
plt.plot(reconstructed[0])
plt.plot(reconstructed[10])
plt.plot(reconstructed[20])
plt.plot(reconstructed[30])
plt.plot(reconstructed[40])
plt.grid()
plt.legend()
plt.show()
After the autoencoder training I get the following history plot:
And when I try to use the trained autoencoder on a segment of the previously unseen signal, I get the following plot:
where the upper signal is the input segment,
the yellow signal is the output signal of the neural network,
the other color signals are neural network outputs for other input segments.
I would rather expect that the autoencoder produced the output that is similar to its input, but it is not the case here. What am I doing wrong?

Related

How to perform Data Augmentation on Custom Generator with Siamese Networks

I have the following Custom Generator for building pairs of images for siamese networks, as unfortunately all my training data do not fit on my GPU's memory:
import numpy as np
np.random.seed(42) # for reproducibility
import random
from keras.datasets import mnist
from keras.models import Sequential, Model
from keras.layers import Dense, Dropout, Input, Lambda
from keras.optimizers import SGD, RMSprop
from keras import backend as K
from sklearn.utils import shuffle
from sklearn.model_selection import train_test_split
class DataGenerator(object):
def __init__(self, batch_sz):
print("[INFO] loading dataset...")
trainX=np.load("features_and_labels/energy/train1-images.npy")
trainY=np.load("features_and_labels/energy/train1-labels.npy")
trainX, trainY = shuffle(trainX, trainY)
trainX, testX, trainY, testY = train_test_split(trainX, trainY, test_size=0.30, stratify=trainY, random_state=42)
trainX = trainX / 255.0
testX = testX / 255.0
print("READY")
# create training+test positive and negative pairs
digit_indices = [np.where(trainY == i)[0] for i in range(8)]
self.tr_pairs, self.tr_y = self.create_pairs(trainX, digit_indices)
digit_indices = [np.where(testY == i)[0] for i in range(8)]
self.te_pairs, self.te_y = self.create_pairs(testX, digit_indices)
self.tr_pairs_0 = self.tr_pairs[:, 0]
self.tr_pairs_1 = self.tr_pairs[:, 1]
self.te_pairs_0 = self.te_pairs[:, 0]
self.te_pairs_1 = self.te_pairs[:, 1]
self.batch_sz = batch_sz
self.samples_per_train = (self.tr_pairs.shape[0]/self.batch_sz)*self.batch_sz
self.samples_per_val = (self.te_pairs.shape[0]/self.batch_sz)*self.batch_sz
self.cur_train_index=0
self.cur_val_index=0
del trainX, trainY,
#(pairTest, labelTest) = utils.make_pairs(testX, testY)
print("DONE")
del testX,testY
def create_pairs(self, x, digit_indices):
pairs = []
labels = []
n = min([len(digit_indices[d]) for d in range(8)]) - 1
for d in range(8):
for i in range(n):
z1, z2 = digit_indices[d][i], digit_indices[d][i+1]
pairs += [[x[z1], x[z2]]]
inc = random.randrange(0, 8)
dn = (d + inc) % 7
z1, z2 = digit_indices[d][i], digit_indices[dn][i]
pairs += [[x[z1], x[z2]]]
labels += [1, 0]
return np.array(pairs), np.array(labels)
def next_train(self):
while 1:
self.cur_train_index += self.batch_sz
if self.cur_train_index >= self.samples_per_train:
self.cur_train_index=0
yield ([self.tr_pairs_0[self.cur_train_index:self.cur_train_index+self.batch_sz], self.tr_pairs_1[self.cur_train_index:self.cur_train_index+self.batch_sz]],self.tr_y[self.cur_train_index:self.cur_train_index+self.batch_sz])
def next_val(self):
while 1:
self.cur_val_index += self.batch_sz
if self.cur_val_index >= self.samples_per_val:
self.cur_val_index=0
yield ([self.te_pairs_0[self.cur_val_index:self.cur_val_index+self.batch_sz], self.te_pairs_1[self.cur_val_index:self.cur_val_index+self.batch_sz]], self.te_y[self.cur_val_index:self.cur_val_index+self.batch_sz])
I would like to use the following data augmentation techniques in these pairs of images:
augmentator=ImageDataGenerator(
rotation_range=20,
zoom_range=0.15,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.15,
horizontal_flip=True,
fill_mode="nearest")
However, I have the following problem when calling it, here is how I do it
convnet = resnet50.ResNet50(weights='imagenet', include_top=False, input_shape=(224,224,3))
# Add the final fully connected layers
x = convnet.output
x = Flatten()(x)
x = Dense(1024, activation="relu")(x)
x = Dropout(0.3)(x)
preds = Dense(18, activation='sigmoid')(x) # Apply sigmoid
convnet = Model(inputs=convnet.input, outputs=preds)
#Applying above model for both the left and right images
encoded_l = convnet(left_input)
encoded_r = convnet(right_input)
# Euclidian Distance between the two images or encodings through the Resnet-50 architecture
Euc_layer = Lambda(lambda tensor:K.abs(tensor[0] - tensor[1]))
# use and add the distance function
Euc_distance = Euc_layer([encoded_l, encoded_r])
#identify the prediction
prediction = Dense(1,activation='sigmoid')(Euc_distance)
#Define the network with the left and right inputs and the ouput prediction
siamese_net = Model(inputs=[left_input,right_input],outputs=prediction)
#Calling the generator for me
datagen = mycustomgenerator_v2.DataGenerator(config.BATCH_SIZE)
print("[INFO] compiling model...")
siamese_net.compile(loss="binary_crossentropy", optimizer="sgd", metrics="accuracy")
# train the model
print("[INFO] training model...")
lr_reducer= ReduceLROnPlateau(monitor='val_loss', factor=np.sqrt(0.1), cooldown=0, patience=3, min_lr=0.5e-6)
early_stopper=EarlyStopping(monitor='val_accuracy', min_delta=0.1, patience=250,restore_best_weights=True,verbose=1)
model_checkpoint= ModelCheckpoint("best_weight.h5", monitor="val_accuracy", save_best_only=True, save_weights_only=True,mode='auto')
callbacks=[lr_reducer,early_stopper,model_checkpoint]
augmentator=ImageDataGenerator(
rotation_range=20,
zoom_range=0.15,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.15,
horizontal_flip=True,
fill_mode="nearest")
history=siamese_net.fit(augmentator.flow(datagen.next_train()), steps_per_epoch=datagen.samples_per_train, epochs=config.EPOCHS, validation_data=datagen.next_val(), validation_steps=datagen.samples_per_val, callbacks=callbacks)
Then the following error returns to me
File "train_siamese_network.py", line 163, in <module>
history=siamese_net.fit(augmentator.flow(datagen.next_train()), steps_per_epoch=datagen.samples_per_train, epochs=config.EPOCHS, validation_data=datagen.next_val(), validation_steps=datagen.samples_per_val, callbacks=callbacks)
File "/home/me/.local/lib/python3.8/site-packages/tensorflow/python/keras/preprocessing/image.py", line 854, in flow
return NumpyArrayIterator(
File "/home/me/.local/lib/python3.8/site-packages/tensorflow/python/keras/preprocessing/image.py", line 450, in __init__
super(NumpyArrayIterator, self).__init__(
File "/home/me/.local/lib/python3.8/site-packages/keras_preprocessing/image/numpy_array_iterator.py", line 121, in __init__
self.x = np.asarray(x, dtype=self.dtype)
File "/home/me/.local/lib/python3.8/site-packages/numpy/core/_asarray.py", line 83, in asarray
return array(a, dtype, copy=False, order=order)
TypeError: float() argument must be a string or a number, not 'generator'
For sure I am not calling the data augmentator on the generator correctly, I also checked other similar posts like this,this and this but they are not helping me. Therefore, how am I supposed to call data augmentation on the custom generator for my siamese network?
I don't know how to apply the ImageDataAugmentation in Pairs Dataset.
But I can tell you that a possibility is to generate the images in your disk. It means that you will generate images from the original Dataset (when there are no pairs), and in the next step you transform it to pairs.
As a disadventage, this method will put very similar images in Test and Train data, that could mean overfiting.

Why does my TensorFlow model loose its accuracy after loading

So I am training on the MNIST dataset and the code is down below.
The issue is that, on the first run, it calculates everything and gives me a fair accuracy.
But on the second run (when it's supposed to load from the saved file) the accuracy drops considerably.
Is it something wrong with my code or any practices I'm not following here ?
import tensorflow as tf
from tensorflow import keras
import numpy as np
import matplotlib.pyplot as plt
from os import environ, sep
environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
MODELFILENAME = 'TF_ZTH_02_model' + sep
labels = {
0:'T-shirt/Top',
1:'Trouser',
2:'Pullover',
3:'Dress',
4:'Coat',
5:'Sandal',
6:'Shirt',
7:'Sneaker',
8:'Bag',
9:'Ankle Boot'
}
def main():
fashionmnist = keras.datasets.fashion_mnist
(trainimages, trainlabels), (testimages, testlabels) = fashionmnist.load_data()
trainimages, testimages = trainimages/255., testimages/255.
try:
#try load model
model = keras.models.load_model(MODELFILENAME)
#files doesn't exist, train model
except:
#activation functions
#relu - rectified linear unit - return value if its greater than 0 or 0
#softmax - picks biggest number in set
model = keras.Sequential([
keras.layers.Flatten(input_shape=(28, 28)), #size of image
keras.layers.Dense(128, activation=tf.nn.relu),
keras.layers.Dense(10, activation=tf.nn.softmax) #ten clothing
])
model.compile(
optimizer = 'adam',
loss = 'sparse_categorical_crossentropy',
metrics = 'accuracy'
)
model.fit(trainimages, trainlabels, epochs=5)
#save to file
model.save(MODELFILENAME)
testloss, testacc = model.evaluate(testimages, testlabels)
print('\nEvaluation, loss and accuracy : ', testloss, testacc)
predictions = model.predict(testimages)
# predictions = model.predict(np.asarray([testimages[0]]))
while True:
x = int(input('\nEnter image number (<%d) : '%len(testimages)))
print('\nPredictions : ',
predictions[x],
predictions[x].argmax(),
labels[predictions[x].argmax()]
)
print('Actual : ', testlabels[x], labels[testlabels[x]])
plt.ioff()
plt.imshow(testimages[x])
plt.title(labels[predictions[x].argmax()])
plt.show()
#but this ds has objects centered
#in the case of an unprocessed ds, you'd need to SPOT FEATURES
#with the help of convolutional networks
try:
main()
except Exception as e:
print(e)
finally:
input()
Output on First Run
Output on Second Run

Error when checking target: expected dense_2 to have shape (6,) but got array with shape (3,)

enter image description here
I am trying to train a model that will detect the native language of the speaker from the speech data which is already taken which will be in the English language.
I got this error while I am trying to run the below code and it is unable to train the model.
What is the mistake anyone explain?
Below is the code snippet of the training model :
import pandas as pd
from collections import Counter
import sys
sys.path.append('../dialectdetect-master/src>')
import getsplit
from keras import utils
import accuracy
import multiprocessing
import librosa
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Flatten
from keras.layers.convolutional import MaxPooling2D, Conv2D
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import EarlyStopping, TensorBoard
DEBUG = True
SILENCE_THRESHOLD = .01
RATE = 24000
N_MFCC = 13
COL_SIZE = 30
EPOCHS = 10 #35#250
def to_categorical(y):
'''
Converts list of languages into a binary class matrix
:param y (list): list of languages
:return (numpy array): binary class matrix
'''
lang_dict = {}
for index,language in enumerate(set(y)):
lang_dict[language] = index
y = list(map(lambda x: lang_dict[x],y))
return utils.to_categorical(y, len(lang_dict))
def get_wav(language_num):
'''
Load wav file from disk and down-samples to RATE
:param language_num (list): list of file names
:return (numpy array): Down-sampled wav file
'''
y, sr = librosa.load('../audio/{}.wav'.format(language_num))
return(librosa.core.resample(y=y,orig_sr=sr,target_sr=RATE, scale=True))
def to_mfcc(wav):
'''
Converts wav file to Mel Frequency Ceptral Coefficients
:param wav (numpy array): Wav form
:return (2d numpy array: MFCC
'''
return(librosa.feature.mfcc(y=wav, sr=RATE, n_mfcc=N_MFCC))
def remove_silence(wav, thresh=0.04, chunk=5000):
'''
Searches wav form for segments of silence. If wav form values are lower than 'thresh' for 'chunk' samples, the values will be removed
:param wav (np array): Wav array to be filtered
:return (np array): Wav array with silence removed
'''
tf_list = []
for x in range(len(wav) / chunk):
if (np.any(wav[chunk * x:chunk * (x + 1)] >= thresh) or np.any(wav[chunk * x:chunk * (x + 1)] <= -thresh)):
tf_list.extend([True] * chunk)
else:
tf_list.extend([False] * chunk)
tf_list.extend((len(wav) - len(tf_list)) * [False])
return(wav[tf_list])
def normalize_mfcc(mfcc):
'''
Normalize mfcc
:param mfcc:
:return:
'''
mms = MinMaxScaler()
return(mms.fit_transform(np.abs(mfcc)))
def make_segments(mfccs,labels):
'''
Makes segments of mfccs and attaches them to the labels
:param mfccs: list of mfccs
:param labels: list of labels
:return (tuple): Segments with labels
'''
segments = []
seg_labels = []
for mfcc,label in zip(mfccs,labels):
for start in range(0, int(mfcc.shape[1] / COL_SIZE)):
segments.append(mfcc[:, start * COL_SIZE:(start + 1) * COL_SIZE])
seg_labels.append(label)
return(segments, seg_labels)
def segment_one(mfcc):
'''
Creates segments from on mfcc image. If last segments is not long enough to be length of columns divided by COL_SIZE
:param mfcc (numpy array): MFCC array
:return (numpy array): Segmented MFCC array
'''
segments = []
for start in range(0, int(mfcc.shape[1] / COL_SIZE)):
segments.append(mfcc[:, start * COL_SIZE:(start + 1) * COL_SIZE])
return(np.array(segments))
def create_segmented_mfccs(X_train):
'''
Creates segmented MFCCs from X_train
:param X_train: list of MFCCs
:return: segmented mfccs
'''
segmented_mfccs = []
for mfcc in X_train:
segmented_mfccs.append(segment_one(mfcc))
return(segmented_mfccs)
def train_model(X_train,y_train,X_validation,y_validation, batch_size=128): #64
'''
Trains 2D convolutional neural network
:param X_train: Numpy array of mfccs
:param y_train: Binary matrix based on labels
:return: Trained model
'''
# Get row, column, and class sizes
rows = X_train[0].shape[0]
cols = X_train[0].shape[1]
val_rows = X_validation[0].shape[0]
val_cols = X_validation[0].shape[1]
num_classes = len(y_train[0])
# input image dimensions to feed into 2D ConvNet Input layer
input_shape = (rows, cols, 1)
X_train = X_train.reshape(X_train.shape[0], rows, cols, 1 )
X_validation = X_validation.reshape(X_validation.shape[0],val_rows,val_cols,1)
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'training samples')
model = Sequential()
model.add(Conv2D(32, kernel_size=(3,3), activation='relu',
data_format="channels_last",
input_shape=input_shape))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64,kernel_size=(3,3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='adadelta',
metrics=['accuracy'])
# Stops training if accuracy does not change at least 0.005 over 10 epochs
es = EarlyStopping(monitor='acc', min_delta=.005, patience=10, verbose=1, mode='auto')
# Creates log file for graphical interpretation using TensorBoard
tb = TensorBoard(log_dir='..\logs', histogram_freq=0, batch_size=32, write_graph=True, write_grads=True,
write_images=True, embeddings_freq=0, embeddings_layer_names=None,
embeddings_metadata=None)
# Image shifting
datagen = ImageDataGenerator(width_shift_range=0.05)
# Fit model using ImageDataGenerator
model.fit_generator(datagen.flow(X_train, y_train, batch_size=batch_size),
steps_per_epoch=len(X_train) / 32
, epochs=EPOCHS,
callbacks=[es,tb], validation_data=(X_validation,y_validation))
return (model)
def save_model(model, model_filename):
'''
Save model to file
:param model: Trained model to be saved
:param model_filename: Filename
:return: None
'''
model.save('../models/{}.h5'.format(model_filename)) # creates a HDF5 file 'my_model.h5'
############################################################
#######################################
if __name__ == '__main__':
'''
Console command example:
python trainmodel.py bio_metadata.csv model50
'''
# Load arguments
# print(sys.argv)
file_name = sys.argv[1]
model_filename = sys.argv[2]
# Load metadata
df = pd.read_csv(file_name)
# Filter metadata to retrieve only files desired
filtered_df = getsplit.filter_df(df)
# filtered_df = filter_df(df)
# print(filtered_df)
# print("filterd df is empty {}".format(filtered_df))
# Train test split
X_train, X_test, y_train, y_test = getsplit.split_people(filtered_df)
# Get statistics
train_count = Counter(y_train)
test_count = Counter(y_test)
print("Entering main")
# import ipdb;
# ipdb.set_trace()
acc_to_beat = test_count.most_common(1)[0][1] / float(np.sum(list(test_count.values())))
# To categorical
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
# Get resampled wav files using multiprocessing
if DEBUG:
print('Loading wav files....')
pool = multiprocessing.Pool(processes=multiprocessing.cpu_count())
X_train = pool.map(get_wav, X_train)
X_test = pool.map(get_wav, X_test)
# Convert to MFCC
if DEBUG:
print('Converting to MFCC....')
X_train = pool.map(to_mfcc, X_train)
X_test = pool.map(to_mfcc, X_test)
# Create segments from MFCCs
X_train, y_train = make_segments(X_train, y_train)
X_validation, y_validation = make_segments(X_test, y_test)
# Randomize training segments
X_train, _, y_train, _ = train_test_split(X_train, y_train, test_size=50)
# Train model
model = train_model(np.array(X_train), np.array(y_train), np.array(X_validation),np.array(y_validation))
# Make predictions on full X_test MFCCs
y_predicted = accuracy.predict_class_all(create_segmented_mfccs(X_test), model)
# Print statistics
print('Training samples:', train_count)
print('Testing samples:', test_count)
print('Accuracy to beat:', acc_to_beat)
print('Confusion matrix of total samples:\n', np.sum(accuracy.confusion_matrix(y_predicted, y_test),axis=1))
print('Confusion matrix:\n',accuracy.confusion_matrix(y_predicted, y_test))
print('Accuracy:', accuracy.get_accuracy(y_predicted,y_test))
# Save model
save_model(model, model_filename)

Problem with KerasRegressor & multiple output

I have 3 inputs and 3 outputs. I am trying to use KerasRegressor and cross_val_score to get my prediction score.
my code is:
# Function to create model, required for KerasClassifier
def create_model():
# create model
# #Start defining the input tensor:
input_data = layers.Input(shape=(3,))
#create the layers and pass them the input tensor to get the output tensor:
layer = [2,2]
hidden1Out = Dense(units=layer[0], activation='relu')(input_data)
finalOut = Dense(units=layer[1], activation='relu')(hidden1Out)
u_out = Dense(1, activation='linear', name='u')(finalOut)
v_out = Dense(1, activation='linear', name='v')(finalOut)
p_out = Dense(1, activation='linear', name='p')(finalOut)
#define the model's start and end points
model = Model(input_data,outputs = [u_out, v_out, p_out])
model.compile(loss='mean_squared_error', optimizer='adam')
return model
#load data
...
input_var = np.vstack((AOA, x, y)).T
output_var = np.vstack((u,v,p)).T
# evaluate model
estimator = KerasRegressor(build_fn=create_model, epochs=num_epochs, batch_size=batch_size, verbose=0)
kfold = KFold(n_splits=10)
I tried:
results = cross_val_score(estimator, input_var, [output_var[:,0], output_var[:,1], output_var[:,2]], cv=kfold)
and
results = cross_val_score(estimator, input_var, [output_var[:,0:1], output_var[:,1:2], output_var[:,2:3]], cv=kfold)
and
results = cross_val_score(estimator, input_var, output_var, cv=kfold)
I got the error msg like:
Details:
ValueError: Error when checking model target: the list of Numpy arrays that you are passing to your model is not the size the model expected. Expected to see 3 array(s), but instead got the following list of 1 arrays: [array([[ 0.69945297, 0.13296847, 0.06292328],
or
ValueError: Found input variables with inconsistent numbers of samples: [72963, 3]
So how do I solve this problem?
Thanks.
The problem is the input dimension of the layer Input is not 3, but 3*feature_dim. Below is an working example
import numpy as np
import tensorflow as tf
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input,Dense,Concatenate
from sklearn.model_selection import cross_val_score,KFold
from tensorflow.keras.wrappers.scikit_learn import KerasRegressor
def create_model():
feature_dim = 10
input_data = Input(shape=(3*feature_dim,))
#create the layers and pass them the input tensor to get the output tensor:
layer = [2,2]
hidden1Out = Dense(units=layer[0], activation='relu')(input_data)
finalOut = Dense(units=layer[1], activation='relu')(hidden1Out)
u_out = Dense(1, activation='linear', name='u')(finalOut)
v_out = Dense(1, activation='linear', name='v')(finalOut)
p_out = Dense(1, activation='linear', name='p')(finalOut)
output = Concatenate()([u_out,v_out,p_out])
#define the model's start and end points
model = Model(inputs=input_data,outputs=output)
model.compile(loss='mean_squared_error', optimizer='adam')
return model
x_0 = np.random.rand(100,10)
x_1 = np.random.rand(100,10)
x_2 = np.random.rand(100,10)
input_val = np.hstack([x_0,x_1,x_2])
u = np.random.rand(100,1)
v = np.random.rand(100,1)
p = np.random.rand(100,1)
output_val = np.hstack([u,v,p])
estimator = KerasRegressor(build_fn=create_model,nb_epoch=3,batch_size=8,verbose=False)
kfold = KFold(n_splits=3, random_state=0)
results = cross_val_score(estimator=estimator,X=input_val,y=output_val,cv=kfold)
print("Results: %.2f (%.2f) MSE" % (results.mean(), results.std()))
As you can see, since the input dimension is 10, inside create_model, I specify the feature_dim.
I don't know have your data look like, but I think it how to stack them together.
I have tried to tried the following procedure
input_var = np.random.randint(0,1, size=(100,3))
x = np.sum(np.sin(input_var),axis=1,keepdims=True) # (100,1)
y = np.sum(np.cos(input_var),axis=1,keepdims=True) # (100,1)
z = np.sum(np.sin(input_var)+ np.cos(input_var),axis=1, keepdims=True) # (100,1)
output_var = np.hstack((x,y,z))
# evaluate model
estimator = KerasRegressor(build_fn=create_model, epochs=10, batch_size=8, verbose=0)
kfold = KFold(n_splits=10)
results = cross_val_score(estimator, input_var, output_var, cv=kfold)
The only issue I get is Tensorlfow complaining about not using tensor
I hope this help if not let me know the dimension of your data looks like

tf.keras.Sequential binary classification model predicting [0.5, 0.5] or close to

I am currently trying to build a model to classify whether or not the outcome of a given football match will be above or below 2.5 goals, based on the Home team, Away team & game league, using a tf.keras.Sequential model in TensorFlow 2.0RC.
The problem I am encountering is that my softmax results converge on [0.5,0.5] when using the model.predict method. What makes this odd is that my validation & test accuracy and losses are about 0.94 & 0.12 respectively after 1000 epochs of training, otherwise I would have put this down to an overfitting problem. I am aware that 1000 epochs is extremely likely to overfit, however, I want to understand why my accuracy increases until about 800 epochs in. My loss flattens at about 300 epochs.
I have tried to alter the number of layers, number of units in each layer, the activation functions, optimizers and loss functions, number of epochs and learning rates, but can only seem to increase the losses.
The results still seem to converge toward [0.5,0.5] regardless.
The full code can be viewed at https://github.com/AhmUgEk/tensorflow_football_predictions, but below is an extract showing model composition.
# Create Keras Sequential model:
model = keras.Sequential()
model.add(feature_layer) # Input processing layer.
model.add(Dense(units=32, activation='relu')) # Hidden Layer 1.
model.add(Dropout(rate=0.4))
model.add(BatchNormalization())
model.add(Dense(units=32, activation='relu')) # Hidden Layer 2.
model.add(Dropout(rate=0.4))
model.add(BatchNormalization())
model.add(Dense(units=2, activation='softmax')) # Output layer.
# Compile the model:
model.compile(
optimizer=keras.optimizers.Adam(learning_rate=0.0001),
loss=keras.losses.MeanSquaredLogarithmicError(),
metrics=['accuracy']
)
# Compile the model:
model.compile(
optimizer=keras.optimizers.Adam(learning_rate=0.0001),
loss=keras.losses.MeanSquaredLogarithmicError(),
metrics=['accuracy']
)
# Fit the model to the training dataset and validate against the
validation dataset between epochs:
model.fit(
train_dataset,
validation_data=val_dataset,
epochs=1000,
callbacks=[tensorboard_callback]
)
I would expect to receive a result of [0.282, 0.718] for example for an input of:
model.predict_classes([np.array(['E0'], dtype='object'),
np.array(['Liverpool'], dtype='object'),
np.array(['Newcastle'], dtype='object')])[0]
but as per the above, receive a result of say [0.5, 0.5].
Am I missing something obvious here?
I had made some minor changes in the model. Now, I am not getting exactly [0.5, 0.5].
Result:
[[0.61482537 0.3851746 ]
[0.5121426 0.48785746]
[0.48058605 0.51941395]
[0.48913187 0.51086813]
[0.45480043 0.5451996 ]
[0.48933673 0.5106633 ]
[0.43431875 0.5656812 ]
[0.55314165 0.4468583 ]
[0.5365097 0.4634903 ]
[0.54371756 0.45628244]]
Implementation:
import datetime
import os
import numpy as np
import pandas as pd
import tensorflow as tf
from gpu_limiter import limit_gpu
from pipe_functions import csv_to_df, dataframe_to_dataset
from sklearn.model_selection import train_test_split
from tensorflow import keras
from tensorflow.keras.layers import BatchNormalization, Dense, DenseFeatures, Dropout, Input
from tensorflow.keras.callbacks import TensorBoard, ModelCheckpoint
import tensorflow.keras.backend as K
from tensorflow.data import Dataset
# Test GPU availability and instantiate memory growth limitation if True:
if tf.test.is_gpu_available():
print('GPU Available\n')
limit_gpu()
else:
print('Running on CPU')
df = csv_to_df("./csv_files")
# Format & organise imported data, making the "Date" column the new index:
df['Date'] = pd.to_datetime(df['Date'])
df = df[['Date', 'Div', 'HomeTeam', 'AwayTeam', 'FTHG', 'FTAG']].dropna().set_index('Date').sort_index()
df['Over_2.5'] = (df['FTHG'] + df['FTAG'] > 2.5).astype(int)
df = df.drop(['FTHG', 'FTAG'], axis=1)
# Split data into training, validation and testing data:
# Note: random_state variable set to ensure reproducibility.
train, test = train_test_split(df, test_size=0.05, random_state=42)
train, val = train_test_split(train, test_size=0.05, random_state=42)
# print(df['Over_2.5'].value_counts()) # Check that data is balanced.
# Create datasets from train, val & test dataframes:
target_col = 'Over_2.5'
batch_size = 32
def df_to_dataset(features: np.ndarray, labels: np.ndarray, shuffle=True, batch_size=8) -> Dataset:
ds = Dataset.from_tensor_slices(({"feature": features}, {"target": labels}))
if shuffle:
ds = ds.shuffle(buffer_size=len(features))
ds = ds.batch(batch_size)
return ds
def get_feature_transform() -> DenseFeatures:
# Format features into feature columns to ensure data is in the correct format for feeding into the model:
feature_cols = []
for column in filter(lambda x: x != target_col, df.columns):
feature_cols.append(tf.feature_column.embedding_column(tf.feature_column.categorical_column_with_vocabulary_list(
key=column, vocabulary_list=df[column].unique()), dimension=5))
return DenseFeatures(feature_cols)
# Transforms all features into dense tensors.
feature_transform = get_feature_transform()
train_features = feature_transform(dict(train)).numpy()
val_features = feature_transform(dict(val)).numpy()
test_features = feature_transform(dict(test)).numpy()
train_dataset = df_to_dataset(train_features, train[target_col].values, shuffle=True, batch_size=batch_size)
val_dataset = df_to_dataset(val_features, val[target_col].values, shuffle=True, batch_size=batch_size) # Shuffle not required to validation data.
test_dataset = df_to_dataset(test_features, test[target_col].values, shuffle=True, batch_size=batch_size) # Shuffle not required to test data.
# Create Keras Functional API:
# Create a feature layer from the feature columns, to be placed at the input layer of the model:
def build_model(input_shape: tuple) -> keras.Model:
input_layer = keras.Input(shape=input_shape, name='feature')
model = Dense(units=1028, activation='relu', kernel_initializer='normal', name='dense0')(input_layer) # Hidden Layer 1.
model = BatchNormalization(name='bc0')(model)
model = Dense(units=1028, activation='relu', kernel_initializer='normal', name='dense1')(model) # Hidden Layer 2.
model = Dropout(rate=0.1)(model)
model = BatchNormalization(name='bc1')(model)
model = Dense(units=100, activation='relu', kernel_initializer='normal', name='dense2')(model) # Hidden Layer 3.
model = Dropout(rate=0.25)(model)
model = BatchNormalization(name='bc2')(model)
model = Dense(units=50, activation='relu', kernel_initializer='normal', name='dense3')(model) # Hidden Layer 4.
model = Dropout(rate=0.4)(model)
model = BatchNormalization(name='bc3')(model)
output_layer = Dense(units=2, activation='softmax', kernel_initializer='normal', name='target')(model) # Output layer.
model = keras.Model(inputs=input_layer, outputs=output_layer, name='better-than-chance')
# Compile the model:
model.compile(
optimizer=keras.optimizers.Adam(learning_rate=0.001),
loss='mse',
metrics=['accuracy']
)
return model
# # Create a TensorBoard log file (time appended) directory for every run of the model:
# directory = ".\\logs\\" + str(datetime.datetime.now().strftime("%Y%m%d-%H%M%S"))
# os.mkdir(directory)
# # Create a TensorBoard callback to log a record of model performance for every 1 epoch:
# tensorboard_callback = TensorBoard(log_dir=directory, histogram_freq=1, write_graph=True, write_images=True)
# Run "tensorboard --logdir .\logs" in anaconda prompt to review & compare logged results.
# Note: Make sure that the correct environment is activated before running.
model = build_model((train_features.shape[1],))
model.summary()
# checkpoint = ModelCheckpoint('model-{epoch:03d}.h5', verbose=1, monitor='val_loss',save_best_only=True, mode='auto')
# Fit the model to the training dataset and validate against the validation dataset between epochs:
model.fit(
train_dataset,
validation_data=val_dataset,
epochs=10)
# callbacks=[checkpoint]
# Saves and reloads model.
# model.save("./model.h5")
# model_from_saved = keras.models.load_model("./model.h5")
# Evaluate model accuracy against test dataset:
# scores, accuracy = model.evaluate(train_dataset)
# print('Accuracy:', accuracy)
##############
## OPTIONAL ##
##############
# DUBUGGING
# inp = model.input # input placeholder
# outputs = [layer.output for layer in model.layers] # all layer outputs
# functors = [K.function([inp], [out]) for out in outputs] # evaluation functions
# # Testing
# layer_outs = [func([test_features]) for func in functors]
# print(layer_outs)
# # # Form a prediction based on inputs:
prediction = model.predict({"feature": test_features[:10]})
print(prediction)
One thing you can do is to try some ensemble Learning methods like
RandomForest
and
XGBoost
and compare the results.
You should try is to add other Key Performance Indicators(KPI)s in
your data and then try to fit the model.

Categories

Resources