I am using LSTM RNN to detect whether a heart beat is arrhythmic or not. So the output classes are:[0,1] and n_classes=2, but when this code is executed:
# Fit training using batch data
_, loss, acc = sess.run(
[optimizer, cost, accuracy],
feed_dict={
x: batch_xs,
y: batch_ys
}
)
It gives following error
ValueError: Cannot feed value of shape (1, 1) for Tensor 'Placeholder_1:0', which has shape '(?, 2)'
Here is the whole code:
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import tensorflow as tf # Version 1.0.0 (some previous versions are used in past commits)
from sklearn import metrics
import _pickle as cPickle
import os
import pandas as pd
import functions as f
[ml2_train_input,ml2_train_output,ml2_train_peaks,ml2_test_input,ml2_test_output,ml2_test_peaks]=f.get_ml2(0.5)
ml2_train_output=f.get_binary_output(ml2_train_output[:52500])
ml2_test_output=f.get_binary_output(ml2_test_output[:52500])
# Output classes to learn how to classify
LABELS = [0,1 ]
training_data_count = len(ml2_train_input[:52500]) # training series
test_data_count = len(ml2_test_input[:52500]) # testing series
n_input = 360 # 360 input parameters per timestep
# LSTM Neural Network's internal structure
n_hidden = 8 # Hidden layer num of features
n_classes = 2 # Total classes
# Training
learning_rate = 0.005
lambda_loss_amount = 0.0015
training_iters = training_data_count * 10 # Loop 10 times on the dataset
batch_size = 500
display_iter = 1000 # To show test set accuracy during training
X_test=np.array(ml2_test_input[:52500])
y_test=np.array(ml2_test_output[:52500])
# Some debugging info
print("Some useful info to get an insight on dataset's shape and normalisation:")
print("(X shape, y shape, every X's mean, every X's standard deviation)")
print(X_test.shape, y_test.shape, np.mean(X_test), np.std(X_test))
print("The dataset is therefore properly normalised, as expected, but not yet one-hot encoded.")
def LSTM_RNN(_X, _weights, _biases):
# Function returns a tensorflow LSTM (RNN) artificial neural network from given parameters.
# Moreover, two LSTM cells are stacked which adds deepness to the neural network.
# Note, some code of this notebook is inspired from an slightly different
# RNN architecture used on another dataset, some of the credits goes to
# "aymericdamien" under the MIT license.
# (NOTE: This step could be greatly optimised by shaping the dataset once
# input shape: (batch_size, n_steps, n_input)
# permute n_steps and batch_size
# Reshape to prepare input to hidden activation
#_X = tf.reshape(_X, [-1, n_input])
# new shape: (n_steps*batch_size, n_input)
# Linear activation
_X = tf.nn.relu(tf.matmul(_X, _weights['hidden']) + _biases['hidden'])
# Split data because rnn cell needs a list of inputs for the RNN inner loop
_X = tf.split(_X, 500,0)
# new shape: n_steps * (batch_size, n_hidden)
# Define two stacked LSTM cells (two recurrent layers deep) with tensorflow
lstm_cell_1 = tf.contrib.rnn.BasicLSTMCell(n_hidden, forget_bias=1.0, state_is_tuple=True,reuse=None)
lstm_cell_2 = tf.contrib.rnn.BasicLSTMCell(n_hidden, forget_bias=1.0, state_is_tuple=True,reuse=None)
lstm_cells = tf.contrib.rnn.MultiRNNCell([lstm_cell_1, lstm_cell_2], state_is_tuple=True)
# Get LSTM cell output
outputs, states = tf.contrib.rnn.static_rnn(lstm_cells, _X, dtype=tf.float32)
# Get last time step's output feature for a "many to one" style classifier,
# as in the image describing RNNs at the top of this page
lstm_last_output = outputs[-1]
# Linear activation
return tf.matmul(lstm_last_output, _weights['out']) + _biases['out']
def extract_batch_size(_train, step, batch_size):
# Function to fetch a "batch_size" amount of data from "(X|y)_train" data.
shape = list(_train.shape)
shape[0] = batch_size
batch_s = np.empty(shape)
for i in range(batch_size):
# Loop index
index = ((step-1)*batch_size + i) % len(_train)
batch_s[i] = _train[index]
return batch_s
def one_hot(y_):
# Function to encode output labels from number indexes
# e.g.: [[5], [0], [3]] --> [[0, 0, 0, 0, 0, 1], [1, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0]]
y_ = y_.reshape(len(y_))
n_values = int(np.max(y_)) + 1
return np.eye(n_values)[np.array(y_, dtype=np.int32)] # Returns FLOATS
# Graph input/output
x = tf.placeholder(tf.float32, [None, n_input])
y = tf.placeholder(tf.float32, [None, n_classes])
# Graph weights
weights = {
'hidden': tf.Variable(tf.random_normal([n_input, n_hidden])), # Hidden layer weights
'out': tf.Variable(tf.random_normal([n_hidden, n_classes], mean=1.0))
}
biases = {
'hidden': tf.Variable(tf.random_normal([n_hidden])),
'out': tf.Variable(tf.random_normal([n_classes]))
}
pred = LSTM_RNN(x, weights, biases)
# Loss, optimizer and evaluation
l2 = lambda_loss_amount * sum(
tf.nn.l2_loss(tf_var) for tf_var in tf.trainable_variables()
) # L2 loss prevents this overkill neural network to overfit the data
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=pred)) + l2 # Softmax loss
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost) # Adam Optimizer
correct_pred = tf.equal(tf.argmax(pred,1), tf.argmax(y,1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
# To keep track of training's performance
test_losses = []
test_accuracies = []
train_losses = []
train_accuracies = []
# Launch the graph
sess = tf.InteractiveSession(config=tf.ConfigProto(log_device_placement=True))
init = tf.global_variables_initializer()
sess.run(init)
X_train=np.array(ml2_train_input[:52500])
y_train=np.array(ml2_train_output[:52500])
step = 1
while step * batch_size <= training_iters:
batch_xs = extract_batch_size(X_train, step, batch_size)
batch_ys = one_hot(extract_batch_size(y_train, step, batch_size))
# Fit training using batch data
_, loss, acc = sess.run(
[optimizer, cost, accuracy],
feed_dict={
x: batch_xs,
y: batch_ys
}
)
train_losses.append(loss)
train_accuracies.append(acc)
# Evaluate network only at some steps for faster training:
if (step*batch_size % display_iter == 0) or (step == 1) or (step * batch_size > training_iters):
# To not spam console, show training accuracy/loss in this "if"
print("Training iter #" + str(step*batch_size) + \
": Batch Loss = " + "{:.6f}".format(loss) + \
", Accuracy = {}".format(acc))
# Evaluation on the test set (no learning made here - just evaluation for diagnosis)
loss, acc = sess.run(
[cost, accuracy],
feed_dict={
x: X_test,
y: one_hot(y_test)
}
)
test_losses.append(loss)
test_accuracies.append(acc)
print("PERFORMANCE ON TEST SET: " + \
"Batch Loss = {}".format(loss) + \
", Accuracy = {}".format(acc))
step += 1
print("Optimization Finished!")
Please help!
I feel you should convert your Y values to categorical (one-hot encoded) than it should work. So try to convert your Y values to categorical
Related
I'm trying to create a RNN to guess what notes are being played on a piano, given a sound file of piano notes (WAV format). I'm currently cutting the WAV clips into ten-second chunks (2D), padding shorter sections to 10 seconds with zeroes so the input is all regular. However, when I pass in the clips to the RNN, it gives an output of one less dimension (1D) (when taking the last state - should I be taking the state series?).
I've created a simpler RNN to analyze single notes files (2D) and produce one output (1D), which has been successful. However, when trying to apply this same technique to full clips with multiple notes and notes starting/stopping it seems to break down, as I can't seem to change the output shape.
def weight_variable(shape):
initer = tf.truncated_normal_initializer(stddev=0.01)
return tf.get_variable('W', dtype=tf.float32, shape=shape, initializer=initer)
def bias_variable(shape):
initial = tf.constant(0., shape=shape, dtype=tf.float32)
return tf.get_variable('b', dtype=tf.float32,initializer=initial)
def RNN(x, weights, biases, timesteps, num_hidden):
x = tf.unstack(x, timesteps, 1)
# Define a rnn cell with tensorflow
lstm_cell = rnn.LSTMCell(num_hidden)
states_series, current_state = rnn.static_rnn(lstm_cell, x, dtype=tf.float32)
return tf.matmul(current_state[1], weights) + biases
# return [tf.matmul(temp,weights) + biases for temp in states_series]
# does this even make sense
# x is for data, y is for targets, shapes are [index, time, frequency], [index, time, output note (s)] respectively
x_train, x_valid, y_train, y_valid = load_data() # removed test
print("Size of:")
print("- Training-set:\t\t{}".format(y_train.shape[0]))
print("- Validation-set:\t{}".format(y_valid.shape[0]))
# print("- Test-set\t{}".format(len(y_test)))
learning_rate = 0.001 # The optimization initial learning rate
epochs = 1000 # Total number of training epochs
batch_size = 100 # Training batch size
display_freq = 100 # Frequency of displaying the training results
threshold = 0.7 # Threshold for determining a "note"
num_hidden_units = 15 # Number of hidden units of the RNN
# Placeholders for inputs (x) and outputs(y)
x = tf.placeholder(tf.float32, shape=(None, stepCount, num_input))
y = tf.placeholder(tf.float32, shape=(None, stepCount, n_classes))
# create weight matrix initialized randomly from N~(0, 0.01)
W = weight_variable(shape=[num_hidden_units, n_classes])
# create bias vector initialized as zero
b = bias_variable(shape=[n_classes])
output_logits = RNN(x, W, b, stepCount, num_hidden_units)
y_pred = tf.nn.softmax(output_logits)
# Define the loss function, optimizer, and accuracy, etc.
# (code removed, irrelevant)
# Creating the op for initializing all variables
init = tf.global_variables_initializer()
sess = tf.InteractiveSession()
sess.run(init)
global_step = 0
# Number of training iterations in each epoch
num_tr_iter = int(y_train.shape[0] / batch_size)
for epoch in range(epochs):
print('Training epoch: {}'.format(epoch + 1))
x_train, y_train = randomize(x_train, y_train)
for iteration in range(num_tr_iter):
global_step += 1
start = iteration * batch_size
end = (iteration + 1) * batch_size
x_batch, y_batch = get_next_batch(x_train, y_train, start, end)
# Run optimization op (backprop)
feed_dict_batch = {x: x_batch, y: y_batch}
sess.run(optimizer, feed_dict=feed_dict_batch)
if iteration % display_freq == 0:
# Calculate and display the batch loss and accuracy
loss_batch, acc_batch = sess.run([loss, accuracy],
feed_dict=feed_dict_batch)
print("iter {0:3d}:\t Loss={1:.2f},\tTraining Accuracy={2:.01%}".
format(iteration, loss_batch, acc_batch))
testLoss.append(loss_batch)
testAcc.append(acc_batch)
# Run validation after every epoch
feed_dict_valid = {x: x_valid[:1000].reshape((-1, stepCount, num_input)), y: y_valid[:1000]}
loss_valid, acc_valid = sess.run([loss, accuracy], feed_dict=feed_dict_valid)
print('---------------------------------------------------------')
print("Epoch: {0}, validation loss: {1:.2f}, validation accuracy: {2:.01%}".
format(epoch + 1, loss_valid, acc_valid))
print('---------------------------------------------------------')
validLoss.append(loss_valid)
validAcc.append(acc_batch)
Currently, this is outputting a 1D array of predictions, which really does not make sense in my scenario, but I'm not sure how to change it (it should be outputting predictions for each timestep - i.e. predictions of what notes are playing at each moment in time).
This
is an example from the TFLearn documentation. It shows how to combine TFLearn and Tensorflow, using a TFLearn trainer with a regular Tensorflow graph. However, the current training, test and validation accuracy calculations are not accessible.
import tensorflow as tf
import tflearn
...
# User defined placeholders
with tf.Graph().as_default():
# Placeholders for data and labels
X = tf.placeholder(shape=(None, 784), dtype=tf.float32)
Y = tf.placeholder(shape=(None, 10), dtype=tf.float32)
net = tf.reshape(X, [-1, 28, 28, 1])
# Using TFLearn wrappers for network building
net = tflearn.conv_2d(net, 32, 3, activation='relu')
.
.
.
net = tflearn.fully_connected(net, 10, activation='linear')
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits_v2(
logits=net,
labels=Y))
optimizer = tf.train.AdamOptimizer(learning_rate=0.01).minimize(loss)
# Initializing the variables
...
# Launch the graph
with tf.Session() as sess:
sess.run(init)
...
for epoch in range(2): # 2 epochs
...
for i in range(total_batch):
batch_xs, batch_ys = mnist_data.train.next_batch(batch_size)
sess.run(optimizer, feed_dict={X: batch_xs, Y: batch_ys})
How do I access the calculated training and validation accuracy at each step in the nested FOR loop?
UPDATE FOR CLARITY:
A solution might be as follows: Using the fit_batch method of the Trainer class, I believe I am calculating the training and validation accuracy during the nested loop.
Does this code calculate the running accuracies as the model trains?
Is there a better way of doing this with TFLearn?
I understand that tensorboard uses these values. Could I retrieve the values from the eventlogs?
def accuracy(predictions, labels):
return (100.0 * np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1))
/ predictions.shape[0])
...
network = input_data(shape=[None, image_size, image_size, num_channels],
data_preprocessing=feature_normalization,
data_augmentation=None,
name='input_d')
.
.
.
network = regression(network, optimizer='SGD',
loss='categorical_crossentropy',
learning_rate=0.05, name='targets')
model_dnn_tr = tflearn.DNN(network, tensorboard_verbose=0)
...
with tf.Session(graph=graph) as session:
...
for step in range(num_steps):
...
batch_data = train_dataset[offset:(offset + batch_size), :, :, :]
batch_labels = train_labels[offset:(offset + batch_size), :]
loss = model_dnn_tr.fit_batch({'input_d' : batch_data}, {'targets':
batch_labels})
if (step % 50 == 0):
trainAccr = accuracy(model_dnn_tr.predict({'input_d' :
batch_data}), batch_labels)
validAccr = accuracy(model_dnn_tr.predict({'input_d' :
valid_dataset}), valid_labels)
testAccr = accuracy(model_dnn_tr.predict({'input_d' : test_dataset}),
test_labels)
UPDATE with The correct answer
Could I retrieve the values from the eventlogs?
Tensorboard does have a means to download the accuracy datasets, but making use of it during training is problematic.
Does this code calculate the running accuracies as the model trains?
In a word. Yes.
The fit_batch method works as one might expect; as does the initial solution I posted below.
However, neither is the prescribed method.
Is there a better way of doing this within TFLearn?
Yes!
In order to o track and interact with the metrics of the training, a Training Callback function should be implemented.
from tflearn import callbacks as cb
class BiasVarianceStrategyCallback(cb.Callback):
def __init__(self, train_acc_thresh,run_id,rel_err=.1):
""" Note: We are free to define our init function however we please. """
def errThrshld(Tran_accuracy=train_acc_thresh,relative_err=rel_err):
Tran_err = round(1-Tran_accuracy,2)
Test_err = ...
Vald_err = ...
Diff_err = ...
return {'Tr':Tran_err,'Vl':Vald_err,'Ts':Test_err,'Df':Diff_err}
return
def update_acc_df(self,training_state,state):
...
return
def on_epoch_begin(self, training_state):
""" """
...
variance_found = ...
if trn_acc_stall or vld_acc_stall:
print("accuracy increase stalled. training epoch:"...
if trn_lss_mvNup or vld_lss_mvNup:
print("loss began increase training:"...
raise StopIteration
return
if variance_found or bias_found:
print("bias:",bias_found,"variance:",variance_found)
raise StopIteration
return
return
def on_batch_end(self, training_state, snapshot=False):
self.update_acc_df(training_state,"batch")
return
def on_epoch_end(self, training_state):
self.update_acc_df(training_state,"epoch")
return
def on_train_end(self, training_state):
self.update_acc_df(training_state,"train")
self.df = self.df.iloc[0:0]
return
Initial solution
The most satisfying solution I found thus far:
Uses the dataset object and iterators to feed data.
Not much different from the fit_batch method in the OP.
def accuracy(predictions, labels):
return (100.0 * np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1))
/ predictions.shape[0])
...
graph = tf.Graph()
with graph.as_default():
...
# create a placeholder to dynamically switch between
# validation and training batch sizes
batch_size_x = tf.placeholder(tf.int64)
data_placeholder = tf.placeholder(tf.float32,
shape=(None, image_size, image_size, num_channels))
labels_placeholder = tf.placeholder(tf.float32, shape=(None, num_labels))
# create dataset: one for training and one for test etc
dataset = tf.data.Dataset.from_tensor_slices((data_placeholder,labels_placeholder)).batch(batch_size_x).repeat()
# create a iterator
iterator = tf.data.Iterator.from_structure(dataset.output_types, dataset.output_shapes)
# get the tensor that will contain data
feature, label = iterator.get_next()
# create the initialisation operations
init_op = iterator.make_initializer(dataset)
valid_data_x = tf.constant(valid_data)
test_data_x = tf.constant(test_data)
# Model.
network = input_data(shape=[None, image_size, image_size, num_channels],
placeholder=data_placeholder,
data_preprocessing=feature_normalization,
data_augmentation=None,
name='input_d')
.
.
.
logits = fully_connected(network,...
# Training computation.
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=labels_placeholder,logits=logits))
# Optimizer.
optimizer = tf.train.GradientDescentOptimizer(0.01).minimize(loss)
prediction = tf.nn.softmax(logits)
...
with tf.Session(graph=graph) as session:
tf.global_variables_initializer().run()
# initialise iterator with train data
feed_dict = {data_placeholder: train_data,
labels_placeholder: train_data_labels,
batch_size_x: batch_size}
session.run(init_op, feed_dict = feed_dict)
for step in range(num_steps):
batch_data,batch_labels = session.run( [feature, label], feed_dict =
feed_dict )
feed_dict2 = {data_placeholder: batch_data, labels_placeholder: batch_labels}
_, l, predictions = session.run([optimizer, loss, prediction],
feed_dict=feed_dict2)
if (step % 50 == 0):
trainAccrMb = accuracy(predictions, batch_labels)
feed_dict = {data_placeholder: valid_data_x.eval(), labels_placeholder: valid_data_labels }
valid_prediction = session.run(prediction,
feed_dict=feed_dict)
validAccr= accuracy(valid_prediction, valid_data_labels)
feed_dict = {data_placeholder: test_data_x.eval(), labels_placeholder:
test_data_labels }#, batch_size_x: len(valid_data)}
test_prediction = session.run(prediction,
feed_dict=feed_dict)
testAccr = accuracy(test_prediction, test_data_labels)
I'm currently trying to monitor my TensorFlow model with tf.Summaries, tf.FileWriter and TensorBoard.
I succeeded in plotting training metrics, as well as in plotting validation (and/or testing) metrics. However, my problem is that I did not succeed in plotting both dataset metrics together in the same graph: as my validation dataset is too large, I have to batch it and I can not settle for standard solutions that currently work for MNIST and other canonical datasets (see e.g. this Github example code of Mnist with summaries, or some Stackeroverflow threads here, here and here).
As my validation dataset is multi-batched, I'm forced to use value and update ops as described e.g. by this answer or this one.
Here is a minimal working example corresponding to what I am trying to do:
import os
import tensorflow as tf
from tensorflow.contrib.learn.python.learn.datasets import mnist
dataset = mnist.read_data_sets("data", one_hot=True, reshape=False, validation_size=0)
X = tf.placeholder(tf.float32, name='X', shape=[None, 28, 28, 1])
Y = tf.placeholder(tf.float32, name='Y', shape=[None, 10])
# Conv layer
w1 = tf.Variable(tf.truncated_normal([5, 5, 1, 8]), name="weights_c1", trainable=True)
b1 = tf.Variable(tf.ones([8])/10, name="biases_c1", trainable=True)
conv1 = tf.nn.conv2d(X, w1, strides=[1, 1, 1, 1], padding="SAME", name="conv1")
conv_bias1 = tf.add(conv1, b1, name="convbias1")
relu1 = tf.nn.relu(conv_bias1, name="relu1")
# Max pooling layer
pool1 = tf.nn.max_pool(relu1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="SAME")
# Fully-connected layer
reshaped = tf.reshape(pool1, [-1, 14 * 14 * 8])
wfc = tf.Variable(tf.truncated_normal([14 * 14 * 8, 500]),
name="weights_fc", trainable=True)
bfc = tf.Variable(tf.ones([500])/10, name="biases_fc", trainable=True)
fc = tf.add(tf.matmul(reshaped, wfc), bfc, name="raw_fc")
relu_fc = tf.nn.relu(fc, name="relu_fc")
# Output layer
wo = tf.Variable(tf.truncated_normal([500, 10]), name="weights_output", trainable=True)
bo = tf.Variable(tf.ones([10])/10, name="biases_output", trainable=True)
logits = tf.add(tf.matmul(relu_fc, wo), bo, name="logits")
Y_raw_predict = tf.nn.softmax(logits, name="y_pred_raw")
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=Y)
# Optimization
loss = tf.reduce_mean(cross_entropy)
optimizer = tf.train.AdamOptimizer(0.01).minimize(loss)
correct_prediction = tf.equal(tf.argmax(Y_raw_predict, 1), tf.argmax(Y, 1))
# Accuracy computing (definition of a summary for training,
# and value/update ops for batched testing)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
acc_sum = tf.summary.scalar("accuracy", accuracy)
mean_acc_value, mean_acc_update = tf.metrics.mean(accuracy, name="mean_accuracy_op")
tf.summary.scalar("mean_accuracy", mean_acc_value, collections = ["test"])
# tf.Summary and tf.FileWriter settings
train_summary = tf.summary.merge_all()
test_summary = tf.summary.merge_all("test")
graph_path = "./logs/mnist/graph/mnist1"
train_writer = tf.summary.FileWriter(os.path.join(graph_path, "training"))
test_writer = tf.summary.FileWriter(os.path.join(graph_path, "testing"))
# tf.Session opening and graph filling
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer()) # for value/update ops
for step in range(301):
xbatch, ybatch = dataset.train.next_batch(100)
sess.run(optimizer, feed_dict={X: xbatch, Y:ybatch})
# Monitor training each 10 steps
if step % 10 == 0:
s, l, acc, accsum = sess.run([train_summary, loss, accuracy, acc_sum],
feed_dict={X: xbatch, Y: ybatch})
train_writer.add_summary(s, step)
print("step: {}, loss={:5.4f}, acc={:0.3f}".format(step, l, acc))
# Monitor testing data each 100 steps
if step % 100 == 0:
# Consider 10000 testing images by batch of 100 images
for test_step in range(101):
xtest, ytest = dataset.test.next_batch(100)
sess.run([mean_acc_update], feed_dict={X: xtest, Y: ytest})
tacc, testsum = sess.run([mean_acc_value, test_summary])
test_writer.add_summary(testsum, step)
print("Validation OK: acc={:0.3f}".format(tacc))
I get the following results on TensorBoard (two different graphs, when I want two curves on the same graph):
TensorBoard result
(expected result as this one, for instance)
Here comes the question: how to combine training and validation metrics in the same graph, when validation dataset has to be splitted into batches?
Thank you all!
I'm trying to adapt Aymeric Damien's code to visualize the dimensionality reduction performed by an autoencoder implemented in TensorFlow. All of the examples I have seen work on the mnist digits dataset but I wanted to use this method to visualize the iris dataset in 2 dimensions as a toy example so I can figure out how to tweak it for my real-world datasets.
My question is: How can one get the sample-specific 2 dimensional embeddings to visualize?
For example, the iris dataset has 150 samples with 4 attributes. I added 4 noise attributes to get a total of 8 attributes. The encoding/decoding follows: [8, 4, 2, 4, 8] but I'm not sure how to extract an array of shape (150, 2) to visualize the embeddings. I haven't found any tutorials on how to visualize the dimensionality reduction using TensorFlow.
from sklearn.datasets import load_iris
from sklearn.decomposition import PCA
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
%matplotlib inline
# Set random seeds
np.random.seed(0)
tf.set_random_seed(0)
# Load data
iris = load_iris()
# Original Iris : (150,4)
X_iris = iris.data
# Iris with noise : (150,8)
X_iris_with_noise = np.concatenate([X_iris, np.random.random(size=X_iris.shape)], axis=1).astype(np.float32)
y_iris = iris.target
# PCA
pca_xy = PCA(n_components=2).fit_transform(X_iris_with_noise)
with plt.style.context("seaborn-white"):
fig, ax = plt.subplots()
ax.scatter(pca_xy[:,0], pca_xy[:,1], c=y_iris, cmap=plt.cm.Set2)
ax.set_title("PCA | Iris with noise")
# Training Parameters
learning_rate = 0.01
num_steps = 1000
batch_size = 10
display_step = 250
examples_to_show = 10
# Network Parameters
num_hidden_1 = 4 # 1st layer num features
num_hidden_2 = 2 # 2nd layer num features (the latent dim)
num_input = 8 # Iris data input
# tf Graph input
X = tf.placeholder(tf.float32, [None, num_input], name="input")
weights = {
'encoder_h1': tf.Variable(tf.random_normal([num_input, num_hidden_1]), dtype=tf.float32, name="encoder_h1"),
'encoder_h2': tf.Variable(tf.random_normal([num_hidden_1, num_hidden_2]), dtype=tf.float32, name="encoder_h2"),
'decoder_h1': tf.Variable(tf.random_normal([num_hidden_2, num_hidden_1]), dtype=tf.float32, name="decoder_h1"),
'decoder_h2': tf.Variable(tf.random_normal([num_hidden_1, num_input]), dtype=tf.float32, name="decoder_h2"),
}
biases = {
'encoder_b1': tf.Variable(tf.random_normal([num_hidden_1]), dtype=tf.float32, name="encoder_b1"),
'encoder_b2': tf.Variable(tf.random_normal([num_hidden_2]), dtype=tf.float32, name="encoder_b2"),
'decoder_b1': tf.Variable(tf.random_normal([num_hidden_1]), dtype=tf.float32, name="decoder_b1"),
'decoder_b2': tf.Variable(tf.random_normal([num_input]), dtype=tf.float32, name="decoder_b2"),
}
# Building the encoder
def encoder(x):
# Encoder Hidden layer with sigmoid activation #1
layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(x, weights['encoder_h1']),
biases['encoder_b1']))
# Encoder Hidden layer with sigmoid activation #2
layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1, weights['encoder_h2']),
biases['encoder_b2']))
return layer_2
# Building the decoder
def decoder(x):
# Decoder Hidden layer with sigmoid activation #1
layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(x, weights['decoder_h1']),
biases['decoder_b1']))
# Decoder Hidden layer with sigmoid activation #2
layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1, weights['decoder_h2']),
biases['decoder_b2']))
return layer_2
# Construct model
encoder_op = encoder(X)
decoder_op = decoder(encoder_op)
# Prediction
y_pred = decoder_op
# Targets (Labels) are the input data.
y_true = X
# Define loss and optimizer, minimize the squared error
loss = tf.reduce_mean(tf.pow(y_true - y_pred, 2))
optimizer = tf.train.RMSPropOptimizer(learning_rate).minimize(loss)
# Initialize the variables (i.e. assign their default value)
init = tf.global_variables_initializer()
# Start Training
# Start a new TF session
with tf.Session() as sess:
# Run the initializer
sess.run(init)
# Training
for i in range(1, num_steps+1):
# Prepare Data
# Get the next batch of Iris data
idx_train = np.random.RandomState(i).choice(np.arange(X_iris_with_noise.shape[0]), size=batch_size)
batch_x = X_iris_with_noise[idx_train,:]
# Run optimization op (backprop) and cost op (to get loss value)
_, l = sess.run([optimizer, loss], feed_dict={X: batch_x})
# Display logs per step
if i % display_step == 0 or i == 1:
print('Step %i: Minibatch Loss: %f' % (i, l))
Your embedding is accessible with h = encoder(X). Then, for each batch you can get the value as follow:
_, l, embedding = sess.run([optimizer, loss, h], feed_dict={X: batch_x})
There is an even nicer solution with TensorBoard using Embeddings Visualization (https://www.tensorflow.org/programmers_guide/embedding):
from tensorflow.contrib.tensorboard.plugins import projector
config = projector.ProjectorConfig()
embedding = config.embeddings.add()
embedding.tensor_name = h.name
# Use the same LOG_DIR where you stored your checkpoint.
summary_writer = tf.summary.FileWriter(LOG_DIR)
projector.visualize_embeddings(summary_writer, config)
I use a tensorflow to implement a simple multi-layer perceptron for regression. The code is modified from standard mnist classifier, that I only changed the output cost to MSE (use tf.reduce_mean(tf.square(pred-y))), and some input, output size settings. However, if I train the network using regression, after several epochs, the output batch are totally the same. for example:
target: 48.129, estimated: 42.634
target: 46.590, estimated: 42.634
target: 34.209, estimated: 42.634
target: 69.677, estimated: 42.634
......
I have tried different batch size, different initialization, input normalization using sklearn.preprocessing.scale (my inputs range are quite different). However, none of them worked. I have also tried one of sklearn example from Tensorflow (Deep Neural Network Regression with Boston Data). But I got another error in line 40:
'module' object has no attribute 'infer_real_valued_columns_from_input'
Anyone has clues on where the problem is? Thank you
My code is listed below, may be a little bit long, but very straghtforward:
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.contrib import learn
import matplotlib.pyplot as plt
from sklearn.pipeline import Pipeline
from sklearn import datasets, linear_model
from sklearn import cross_validation
import numpy as np
boston = learn.datasets.load_dataset('boston')
x, y = boston.data, boston.target
X_train, X_test, Y_train, Y_test = cross_validation.train_test_split(
x, y, test_size=0.2, random_state=42)
total_len = X_train.shape[0]
# Parameters
learning_rate = 0.001
training_epochs = 500
batch_size = 10
display_step = 1
dropout_rate = 0.9
# Network Parameters
n_hidden_1 = 32 # 1st layer number of features
n_hidden_2 = 200 # 2nd layer number of features
n_hidden_3 = 200
n_hidden_4 = 256
n_input = X_train.shape[1]
n_classes = 1
# tf Graph input
x = tf.placeholder("float", [None, 13])
y = tf.placeholder("float", [None])
# Create model
def multilayer_perceptron(x, weights, biases):
# Hidden layer with RELU activation
layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])
layer_1 = tf.nn.relu(layer_1)
# Hidden layer with RELU activation
layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])
layer_2 = tf.nn.relu(layer_2)
# Hidden layer with RELU activation
layer_3 = tf.add(tf.matmul(layer_2, weights['h3']), biases['b3'])
layer_3 = tf.nn.relu(layer_3)
# Hidden layer with RELU activation
layer_4 = tf.add(tf.matmul(layer_3, weights['h4']), biases['b4'])
layer_4 = tf.nn.relu(layer_4)
# Output layer with linear activation
out_layer = tf.matmul(layer_4, weights['out']) + biases['out']
return out_layer
# Store layers weight & bias
weights = {
'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1], 0, 0.1)),
'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2], 0, 0.1)),
'h3': tf.Variable(tf.random_normal([n_hidden_2, n_hidden_3], 0, 0.1)),
'h4': tf.Variable(tf.random_normal([n_hidden_3, n_hidden_4], 0, 0.1)),
'out': tf.Variable(tf.random_normal([n_hidden_4, n_classes], 0, 0.1))
}
biases = {
'b1': tf.Variable(tf.random_normal([n_hidden_1], 0, 0.1)),
'b2': tf.Variable(tf.random_normal([n_hidden_2], 0, 0.1)),
'b3': tf.Variable(tf.random_normal([n_hidden_3], 0, 0.1)),
'b4': tf.Variable(tf.random_normal([n_hidden_4], 0, 0.1)),
'out': tf.Variable(tf.random_normal([n_classes], 0, 0.1))
}
# Construct model
pred = multilayer_perceptron(x, weights, biases)
# Define loss and optimizer
cost = tf.reduce_mean(tf.square(pred-y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
# Launch the graph
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
# Training cycle
for epoch in range(training_epochs):
avg_cost = 0.
total_batch = int(total_len/batch_size)
# Loop over all batches
for i in range(total_batch-1):
batch_x = X_train[i*batch_size:(i+1)*batch_size]
batch_y = Y_train[i*batch_size:(i+1)*batch_size]
# Run optimization op (backprop) and cost op (to get loss value)
_, c, p = sess.run([optimizer, cost, pred], feed_dict={x: batch_x,
y: batch_y})
# Compute average loss
avg_cost += c / total_batch
# sample prediction
label_value = batch_y
estimate = p
err = label_value-estimate
print ("num batch:", total_batch)
# Display logs per epoch step
if epoch % display_step == 0:
print ("Epoch:", '%04d' % (epoch+1), "cost=", \
"{:.9f}".format(avg_cost))
print ("[*]----------------------------")
for i in xrange(3):
print ("label value:", label_value[i], \
"estimated value:", estimate[i])
print ("[*]============================")
print ("Optimization Finished!")
# Test model
correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
# Calculate accuracy
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
print ("Accuracy:", accuracy.eval({x: X_test, y: Y_test}))
Short answer:
Transpose your pred vector using tf.transpose(pred).
Longer answer:
The problem is that pred (the predictions) and y (the labels) are not of the same shape: one is a row vector and the other a column vector. Apparently when you apply an element-wise operation on them, you'll get a matrix, which is not what you want.
The solution is to transpose the prediction vector using tf.transpose() to get a proper vector and thus a proper loss function. Actually, if you set the batch size to 1 in your example you'll see that it works even without the fix, because transposing a 1x1 vector is a no-op.
I applied this fix to your example code and observed the following behaviour. Before the fix:
Epoch: 0245 cost= 84.743440580
[*]----------------------------
label value: 23 estimated value: [ 27.47437096]
label value: 50 estimated value: [ 24.71126747]
label value: 22 estimated value: [ 23.87785912]
And after the fix at the same point in time:
Epoch: 0245 cost= 4.181439120
[*]----------------------------
label value: 23 estimated value: [ 21.64333534]
label value: 50 estimated value: [ 48.76105118]
label value: 22 estimated value: [ 24.27996063]
You'll see that the cost is much lower and that it actually learned the value 50 properly. You'll have to do some fine-tuning on the learning rate and such to improve your results of course.
There is likely a problem with your dataset loading or indexing implementation. If you only modified the cost to MSE, make sure pred and y are correctly being updated and you did not overwrite them with a different graph operation.
Another thing to help debug would be to predict the actual regression outputs. It would also help if you posted more of your code so we can see your specific data loading implementation, etc.