I try to using this Code - https://github.com/KGPML/Hyperspectral
def run_training():
"""Train MNIST for a number of steps."""
# Get the sets of images and labels for training, validation, and
# test on IndianPines.
"""Concatenating all the training and test mat files"""
for i in range(TRAIN_FILES):
Training_data = input_data.read_data_sets(os.path.join(DATA_PATH, 'Train_'+str(IMAGE_SIZE)+'_'+str(1+1)+'.mat'), 'train')
for i in range(TEST_FILES):
Test_data = input_data.read_data_sets(os.path.join(DATA_PATH, 'Test_'+str(IMAGE_SIZE)+'_'+str(0+1)+'.mat'),'test')
# Tell TensorFlow that the model will be built into the default Graph.
with tf.Graph().as_default():
# Generate placeholders for the images and labels.
images_placeholder, labels_placeholder = placeholder_inputs(FLAGS.batch_size)
# Build a Graph that computes predictions from the inference model.
logits = IndianPinesMLP.inference(images_placeholder,
FLAGS.hidden1,
FLAGS.hidden2,
FLAGS.hidden3)
# Add to the Graph the Ops for loss calculation.
loss = IndianPinesMLP.loss(labels=labels_placeholder, logits=logits)
# Add to the Graph the Ops that calculate and apply gradients.
train_op = IndianPinesMLP.training(loss, FLAGS.learning_rate)
# Add the Op to compare the logits to the labels during evaluation.
eval_correct = IndianPinesMLP.evaluation(labels=labels_placeholder, logits=logits)
# Build the summary operation based on the TF collection of Summaries.
# summary_op = tf.merge_all_summaries()
# Add the variable initializer Op.
init = tf.initialize_all_variables()
# Create a saver for writing training checkpoints.
saver = tf.train.Saver()
# Create a session for running Ops on the Graph.
sess = tf.Session()
# Instantiate a SummaryWriter to output summaries and the Graph.
# summary_writer = tf.train.SummaryWriter(FLAGS.train_dir, sess.graph)
# And then after everything is built:
# Run the Op to initialize the variables.
sess.run(init)
# Start the training loop.
for step in xrange(FLAGS.max_steps):
start_time = time.time()
# Fill a feed dictionary with the actual set of images and labels
# for this particular training step.
feed_dict = fill_feed_dict(Training_data,
images_placeholder,
labels_placeholder)
# Run one step of the model. The return values are the activations
# from the `train_op` (which is discarded) and the `loss` Op. To
# inspect the values of your Ops or variables, you may include them
# in the list passed to sess.run() and the value tensors will be
# returned in the tuple from the call.
_, loss_value = sess.run([train_op, loss],
feed_dict=feed_dict)
duration = time.time() - start_time
# Write the summaries and print an overview fairly often.
if step % 50 == 0:
# Print status to stdout.
print('Step %d: loss = %.2f (%.3f sec)' % (step, loss_value, duration))
# Update the events file.
# summary_str = sess.run(summary_op, feed_dict=feed_dict)
# summary_writer.add_summary(summary_str, step)
# summary_writer.flush()
# Save a checkpoint and evaluate the model periodically.
if (step + 1) % 1000 == 0 or (step + 1) == FLAGS.max_steps:
saver.save(sess, '.\model-MLP-'+str(IMAGE_SIZE)+'X'+str(IMAGE_SIZE)+'.ckpt', global_step=step)
# Evaluate against the training set.
print('Training Data Eval:')
do_eval(sess,
eval_correct,
images_placeholder,
labels_placeholder,
Training_data)
print('Test Data Eval:')
do_eval(sess,
eval_correct,
images_placeholder,
labels_placeholder,
Test_data)
and got an Error:
---------------------------------------------------------------------------
AssertionError Traceback (most recent call last)
<ipython-input-23-0683f80cdbe4> in <module>()
----> 1 run_training()
<ipython-input-22-b34daa52b702> in run_training()
60 feed_dict = fill_feed_dict(Training_data,
61 images_placeholder,
---> 62 labels_placeholder)
63
64 # Run one step of the model. The return values are the activations
If run these parts manually I got no errors :
<ipython-input-5-f04ef9a1e6b2> in fill_feed_dict(data_set, images_pl, labels_pl)
15 # Create the feed_dict for the placeholders filled with the next
16 # `batch size ` examples.
---> 17 images_feed, labels_feed = data_set.next_batch(batch_size)
18 feed_dict = {
19 images_pl: images_feed,
the same problem here :
~\Path to: \Spatial_dataset.py in next_batch(self, batch_size)
87 start = 0
88 self._index_in_epoch = batch_size
---> 89 assert batch_size <= self._num_examples
90 end = self._index_in_epoch
91 return self._images[start:end], np.reshape(self._labels[start:end],len(self._labels[start:end]))
AssertionError:
When I now run run_training() then the error above occurs.
What does this mean and how can I solve it, Google was not a help in this case.
Thanks for every help.
The main error is due to:
---> 89 assert batch_size <= self._num_examples
Change batch_size such that it is a factor of the number of training set files (without validation) as well as a factor of the total number of training set images (with validation).
For example, if your training set has 100 files, and 0.2 is the validation_size, 80 images will be trained and 20 will be used for validation. So choose batch_size such that it is a factor of 80, say 20. 20 is a factor of 80 as well as 100.
Related
I'm training a network to classify audio. First I extract logmel-spectrograms from my audio data, save these in arrays and train my network using these. At each epoch I inference on my test data to get an accuracy estimate.
My training dataset is 24GB and test dataset is 6GB. Both are too large for the RAM. I found that I could extract the logmel-specs from my training data before running the network, save each minibatch in a pickle file, then load these one by one during training.
However, I use .eval() to get the accuracy from my my whole test data at once. This worked when I used smaller datasets as there was no need to split my data up into chunks using different pickle files. However, I'm now trying to figure out how to run the .eval() line or equivalent so that it provides accuracy for the whole test dataset, rather than the smaller chunks I've split it into. Is there a way I can get overall accuracy for my test data using pickle files or another method?
Here is the key component of code at the end where I think this can be done:
correct = tf.equal(tf.argmax(logits, 1), tf.argmax(labels_input, 1))
test_accuracy = tf.reduce_mean(tf.cast(correct, 'float')) #changes correct to type: float
test_accuracy1 = test_accuracy.eval({features_input:X_test, labels_input:y_test})
test_accuracy_scores.append(test_accuracy1)
print('Test accuracy:', test_accuracy1)
Here is my entire codeblock for the network:
### Train NN, output results
r"""This uses the VGGish model definition within a larger model which adds two
layers on top, and then trains this larger model.
We input log-mel spectrograms (X_train) calculated above with associated labels
(y_train), and feed the batches into the model. Once the model is trained, it
is then executed on the test log-mel spectrograms (X_test) and the accuracy is output.
Alongside .csv file with the predictions for each 0.96s chunk and their true
class is also output for the test data. Column1 = the logit for the first class,
Column2 = the logit for the scond class etc. The final column is the true class.
"""
num_min_batches = len(os.listdir(pickle_files_dir))/2
os.chdir(scripts_directory)
def main(X):
with tf.Graph().as_default(), tf.Session() as sess:
# Define VGGish.
embeddings = vggish_slim.define_vggish_slim(training=FLAGS.train_vggish)
# Define a shallow classification model and associated training ops on top
# of VGGish.
with tf.variable_scope('mymodel'):
# Add a fully connected layer with 100 units. Add an activation function
# to the embeddings since they are pre-activation.
num_units = 100
fc = slim.fully_connected(tf.nn.relu(embeddings), num_units)
# Add a classifier layer at the end, consisting of parallel logistic
# classifiers, one per class. This allows for multi-class tasks.
logits = slim.fully_connected(
fc, _NUM_CLASSES, activation_fn=None, scope='logits')
tf.sigmoid(logits, name='prediction')
linear_out= slim.fully_connected(
fc, _NUM_CLASSES, activation_fn=None, scope='linear_out')
logits = tf.sigmoid(linear_out, name='logits')
# Add training ops.
with tf.variable_scope('train'):
global_step = tf.train.create_global_step()
# Labels are assumed to be fed as a batch multi-hot vectors, with
# a 1 in the position of each positive class label, and 0 elsewhere.
labels_input = tf.placeholder(
tf.float32, shape=(None, _NUM_CLASSES), name='labels')
# Cross-entropy label loss.
xent = tf.nn.sigmoid_cross_entropy_with_logits(
logits=logits, labels=labels_input, name='xent')
loss = tf.reduce_mean(xent, name='loss_op')
tf.summary.scalar('loss', loss)
# We use the same optimizer and hyperparameters as used to train VGGish.
optimizer = tf.train.AdamOptimizer(
learning_rate=vggish_params.LEARNING_RATE,
epsilon=vggish_params.ADAM_EPSILON)
train_op = optimizer.minimize(loss, global_step=global_step)
# Initialize all variables in the model, and then load the pre-trained
# VGGish checkpoint.
sess.run(tf.global_variables_initializer())
vggish_slim.load_vggish_slim_checkpoint(sess, FLAGS.checkpoint)
# The training loop.
features_input = sess.graph.get_tensor_by_name(
vggish_params.INPUT_TENSOR_NAME)
validation_accuracy_scores = []
test_accuracy_scores = []
for epoch in range(num_epochs):
epoch_loss = 0
i=0
while i < num_min_batches:
#print('mini batch'+str(i))
X_pickle_file = pickle_files_dir + 'X_train_mini_batch_' + str(i)
with open(X_pickle_file, "rb") as fp: # Unpickling
batch_x = pickle.load(fp)
y_pickle_file = pickle_files_dir + 'y_train_mini_batch_' + str(i)
with open(y_pickle_file, "rb") as fp: # Unpickling
batch_y = pickle.load(fp)
_, c = sess.run([train_op, loss], feed_dict={features_input: batch_x, labels_input: batch_y})
epoch_loss += c
i+=1
#print no. of epochs and loss
print('Epoch', epoch+1, 'completed out of', num_epochs,', loss:',epoch_loss)
#note this adds a small computational cost
correct = tf.equal(tf.argmax(logits, 1), tf.argmax(labels_input, 1))
test_accuracy = tf.reduce_mean(tf.cast(correct, 'float')) #changes correct to type: float
test_accuracy1 = test_accuracy.eval({features_input:X_test, labels_input:y_test})
test_accuracy_scores.append(test_accuracy1)
print('Test accuracy:', test_accuracy1)
if __name__ == '__main__':
tf.app.run()
I want to get predictions from my trained tensor flow model. The following is the code I have for training my model.
def train_model(self, train, test, learning_rate=0.0001, num_epochs=16, minibatch_size=32, print_cost=True, graph_filename='costs'):
# Ensure that model can be rerun without overwriting tf variables
ops.reset_default_graph()
# For reproducibility
tf.set_random_seed(42)
seed = 42
# Get input and output shapes
(n_x, m) = train.images.T.shape
n_y = train.labels.T.shape[0]
costs = []
# Create placeholders of shape (n_x, n_y)
X, Y = self.create_placeholders(n_x, n_y)
# Initialize parameters
parameters = self.initialize_parameters()
# Forward propagation
Z3 = self.forward_propagation(X, parameters)
# Cost function
cost = self.compute_cost(Z3, Y)
# Backpropagation (using Adam optimizer)
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost)
# Initialize variables
init = tf.global_variables_initializer()
# Start session to compute Tensorflow graph
with tf.Session() as sess:
# Run initialization
sess.run(init)
# Training loop
for epoch in range(num_epochs):
epoch_cost = 0.
num_minibatches = int(m / minibatch_size)
seed = seed + 1
for i in range(num_minibatches):
# Get next batch of training data and labels
minibatch_X, minibatch_Y = train.next_batch(minibatch_size)
# Execute optimizer and cost function
_, minibatch_cost = sess.run([optimizer, cost], feed_dict={X: minibatch_X.T, Y: minibatch_Y.T})
# Update epoch cost
epoch_cost += minibatch_cost / num_minibatches
# Print the cost every epoch
if print_cost == True:
print("Cost after epoch {epoch_num}: {cost}".format(epoch_num=epoch, cost=epoch_cost))
costs.append(epoch_cost)
# Plot costs
plt.figure(figsize=(16,5))
plt.plot(np.squeeze(costs), color='#2A688B')
plt.xlim(0, num_epochs-1)
plt.ylabel("cost")
plt.xlabel("iterations")
plt.title("learning rate = {rate}".format(rate=learning_rate))
plt.savefig(graph_filename, dpi=300)
plt.show()
# Save parameters
parameters = sess.run(parameters)
print("Parameters have been trained!")
# Calculate correct predictions
correct_prediction = tf.equal(tf.argmax(Z3), tf.argmax(Y))
# Calculate accuracy on test set
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
print ("Train Accuracy:", accuracy.eval({X: train.images.T, Y: train.labels.T}))
print ("Test Accuracy:", accuracy.eval({X: test.images.T, Y: test.labels.T}))
return parameters
After training the model, I want to extract the prediction from the model.
So I add
print(sess.run(accuracy, feed_dict={X: test.images.T}))
But I am seeing the below error after running the above code:
InvalidArgumentError: You must feed a value for placeholder tensor 'Y'
with dtype float and shape [10,?]
[[{{node Y}} = Placeholderdtype=DT_FLOAT, shape=[10,?], _device="/job:localhost/replica:0/task:0/device:CPU:0"]]
Any help is welcome..
The tensor accuracy is a function of the tensor correct_prediction, which in turn is a function of (among the rest) Y.
So you're correctly being told that you should feed values for that placeholder too.
I'm assuming Y hold your labels, so it should also make intuitive sense that your feed_dict would also contain the correct Y values.
Hope that helps.
Good luck!
I've made a learning on Tensorflow (MNIST) and I've saved the weights in a .ckpt.
Now I want to test my neural network on this weights, with the same images translated of a few pixels to the right and bottom.
The loading weigths works well, but when I print an eval, Tensorflow display always the same results (0.9630 for the test), whatever the translation is about 1 or 14px.
Here is my code for the function which print the eval :
def eval_translation(sess, eval_correct, images_pl, labels_pl, dataset):
print('Test Data Eval:')
for i in range(28):
true_count = 0 # Counts the number of correct predictions.
steps_per_epoch = dataset.num_examples // FLAGS.batch_size
nb_exemples = steps_per_epoch * FLAGS.batch_size
for step in xrange(steps_per_epoch):
images_feed, labels_feed = dataset.next_batch(FLAGS.batch_size)
feed_dict = {images_pl: translate_right(images_feed, i), labels_pl: labels_feed}
true_count += sess.run(eval_correct, feed_dict=feed_dict)
precision = true_count / nb_exemples
print('Translation: %d Num examples: %d Num correct: %d Precision # 1: %0.04f' % (i, nb_exemples, true_count, precision))
This is the function which with I load the datas and which with I print the test results.
Here is my translation function :
def translate_right(images, dev):
for i in range(len(images)):
for j in range(len(images[i])):
images[i][j] = np.roll(images[i][j], dev)
return images
I call this function in place of the learning just after initialise all the variables :
with tf.Graph().as_default():
# Generate placeholders for the images and labels.
images_placeholder, labels_placeholder = placeholder_inputs(FLAGS.batch_size)
# Build a Graph that computes predictions from the inference model.
weights, logits = mnist.inference(images_placeholder, neurons)
# Add to the Graph the Ops for loss calculation.
loss = mnist.loss(logits, labels_placeholder)
# Add to the Graph the Ops that calculate and apply gradients.
train_op = mnist.training(loss, learning_rate)
# Add the Op to compare the logits to the labels during evaluation.
eval_correct = mnist.evaluation(logits, labels_placeholder)
# Build the summary operation based on the TF collection of Summaries.
summary_op = tf.merge_all_summaries()
# Create a saver for writing training checkpoints.
save = {}
for i in range(len(weights)):
save['weights' + str(i)] = weights[i]
saver = tf.train.Saver(save)
# Create a session for running Ops on the Graph.
sess = tf.Session()
init = tf.initialize_all_variables()
sess.run(init)
# load weights
saver.restore(sess, restore_path)
# Instantiate a SummaryWriter to output summaries and the Graph.
summary_writer = tf.train.SummaryWriter(FLAGS.train_dir, sess.graph)
temps_total = time.time()
eval_translation(sess, eval_correct, images_placeholder, labels_placeholder, dataset.test)
I don't know what's wrong with my code, and why Tensorflow seems to ignore my images.
Can someone could help me please ?
Thanks !
You function translate_right doesn't work, because images[i, j] is just one pixel (containing 1 value if you have greyscale images).
You should use the argument axis of np.roll:
def translate_right(images, dev):
return np.roll(images, dev, axis=1)
I am trying to implement a suggestion from answers:
Tensorflow: how to save/restore a model?
I have an object which wraps a tensorflow model in a sklearn style.
import tensorflow as tf
class tflasso():
saver = tf.train.Saver()
def __init__(self,
learning_rate = 2e-2,
training_epochs = 5000,
display_step = 50,
BATCH_SIZE = 100,
ALPHA = 1e-5,
checkpoint_dir = "./",
):
...
def _create_network(self):
...
def _load_(self, sess, checkpoint_dir = None):
if checkpoint_dir:
self.checkpoint_dir = checkpoint_dir
print("loading a session")
ckpt = tf.train.get_checkpoint_state(self.checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
self.saver.restore(sess, ckpt.model_checkpoint_path)
else:
raise Exception("no checkpoint found")
return
def fit(self, train_X, train_Y , load = True):
self.X = train_X
self.xlen = train_X.shape[1]
# n_samples = y.shape[0]
self._create_network()
tot_loss = self._create_loss()
optimizer = tf.train.AdagradOptimizer( self.learning_rate).minimize(tot_loss)
# Initializing the variables
init = tf.initialize_all_variables()
" training per se"
getb = batchgen( self.BATCH_SIZE)
yvar = train_Y.var()
print(yvar)
# Launch the graph
NUM_CORES = 3 # Choose how many cores to use.
sess_config = tf.ConfigProto(inter_op_parallelism_threads=NUM_CORES,
intra_op_parallelism_threads=NUM_CORES)
with tf.Session(config= sess_config) as sess:
sess.run(init)
if load:
self._load_(sess)
# Fit all training data
for epoch in range( self.training_epochs):
for (_x_, _y_) in getb(train_X, train_Y):
_y_ = np.reshape(_y_, [-1, 1])
sess.run(optimizer, feed_dict={ self.vars.xx: _x_, self.vars.yy: _y_})
# Display logs per epoch step
if (1+epoch) % self.display_step == 0:
cost = sess.run(tot_loss,
feed_dict={ self.vars.xx: train_X,
self.vars.yy: np.reshape(train_Y, [-1, 1])})
rsq = 1 - cost / yvar
logstr = "Epoch: {:4d}\tcost = {:.4f}\tR^2 = {:.4f}".format((epoch+1), cost, rsq)
print(logstr )
self.saver.save(sess, self.checkpoint_dir + 'model.ckpt',
global_step= 1+ epoch)
print("Optimization Finished!")
return self
When I run:
tfl = tflasso()
tfl.fit( train_X, train_Y , load = False)
I get output:
Epoch: 50 cost = 38.4705 R^2 = -1.2036
b1: 0.118122
Epoch: 100 cost = 26.4506 R^2 = -0.5151
b1: 0.133597
Epoch: 150 cost = 22.4330 R^2 = -0.2850
b1: 0.142261
Epoch: 200 cost = 20.0361 R^2 = -0.1477
b1: 0.147998
However, when I try to recover the parameters (even without killing the object):
tfl.fit( train_X, train_Y , load = True)
I get strange results. First of all, the loaded value does not correspond the saved one.
loading a session
loaded b1: 0.1 <------- Loaded another value than saved
Epoch: 50 cost = 30.8483 R^2 = -0.7670
b1: 0.137484
What is the right way to load, and probably first inspect the saved variables?
TL;DR: You should try to rework this class so that self.create_network() is called (i) only once, and (ii) before the tf.train.Saver() is constructed.
There are two subtle issues here, which are due to the code structure, and the default behavior of the tf.train.Saver constructor. When you construct a saver with no arguments (as in your code), it collects the current set of variables in your program, and adds ops to the graph for saving and restoring them. In your code, when you call tflasso(), it will construct a saver, and there will be no variables (because create_network() has not yet been called). As a result, the checkpoint should be empty.
The second issue is that—by default—the format of a saved checkpoint is a map from the name property of a variable to its current value. If you create two variables with the same name, they will be automatically "uniquified" by TensorFlow:
v = tf.Variable(..., name="weights")
assert v.name == "weights"
w = tf.Variable(..., name="weights")
assert v.name == "weights_1" # The "_1" is added by TensorFlow.
The consequence of this is that, when you call self.create_network() in the second call to tfl.fit(), the variables will all have different names from the names that are stored in the checkpoint—or would have been if the saver had been constructed after the network. (You can avoid this behavior by passing a name-Variable dictionary to the saver constructor, but this is usually quite awkward.)
There are two main workarounds:
In each call to tflasso.fit(), create the whole model afresh, by defining a new tf.Graph, then in that graph building the network and creating a tf.train.Saver.
RECOMMENDED Create the network, then the tf.train.Saver in the tflasso constructor, and reuse this graph on each call to tflasso.fit(). Note that you might need to do some more work to reorganize things (in particular, I'm not sure what you do with self.X and self.xlen) but it should be possible to achieve this with placeholders and feeding.
Hi I am trying to modify the mnist example to match it to my dataset. I only try to use the mlp example and it gives a strange error.
Tha dataset is a matrix with 2100 rows and 17 columns, and the output should be one of the 16 possible classes. The error seems happening in the secon phase of the training. The model is build correctly (log info confirmed).
Here is the error log:
ValueError: y_i value out of bounds
Apply node that caused the error:
CrossentropySoftmaxArgmax1HotWithBias(Dot22.0, b, targets)
Toposort index: 33
Inputs types: [TensorType(float64, matrix), TensorType(float64, vector), >TensorType(int32, vector)]
Inputs shapes: [(100, 17), (17,), (100,)]
Inputs strides: [(136, 8), (8,), (4,)]
Inputs values: ['not shown', 'not shown', 'not shown']
Outputs clients: [[Sum{acc_dtype=float64}(CrossentropySoftmaxArgmax1HotWithBias.0)], [CrossentropySoftmax1HotWithBiasDx(Assert{msg='sm and dy do not have the same shape.'}.0, CrossentropySoftmaxArgmax1HotWithBias.1, targets)], []]
HINT: Re-running with most Theano optimization disabled could give you a >back-trace of when this node was created. This can be done with by >setting the Theano flag 'optimizer=fast_compile'. If that does not work, >Theano optimizations can be disabled with 'optimizer=None'.
HINT: Use the Theano flag 'exception_verbosity=high' for a debugprint and storage map footprint of this apply node.
Here is the code:
def build_mlp(input_var=None):
l_in = lasagne.layers.InputLayer(shape=(None, 16),
input_var=input_var)
# Apply 20% dropout to the input data:
l_in_drop = lasagne.layers.DropoutLayer(l_in, p=0.2)
# Add a fully-connected layer of 800 units, using the linear rectifier, and
# initializing weights with Glorot's scheme (which is the default anyway):
l_hid1 = lasagne.layers.DenseLayer(
l_in_drop, num_units=10,
nonlinearity=lasagne.nonlinearities.rectify,
W=lasagne.init.GlorotUniform())
# We'll now add dropout of 50%:
l_hid1_drop = lasagne.layers.DropoutLayer(l_hid1, p=0.5)
# Another 800-unit layer:
l_hid2 = lasagne.layers.DenseLayer(
l_hid1_drop, num_units=10,
nonlinearity=lasagne.nonlinearities.rectify)
# 50% dropout again:
l_hid2_drop = lasagne.layers.DropoutLayer(l_hid2, p=0.5)
# Finally, we'll add the fully-connected output layer, of 10 softmax units:
l_out = lasagne.layers.DenseLayer(
l_hid2_drop, num_units=17,
nonlinearity=lasagne.nonlinearities.softmax)
# Each layer is linked to its incoming layer(s), so we only need to pass
# the output layer to give access to a network in Lasagne:
return l_out
def main(model='mlp', num_epochs=300):
# Load the dataset
print("Loading data...")
X_train, y_train, X_val, y_val, X_test, y_test = load_dataset()
# Prepare Theano variables for inputs and targets
input_var = T.matrix('inputs')
target_var = T.ivector('targets')
# Create neural network model (depending on first command line parameter)
print("Building model and compiling functions...")
if model == 'cnn':
network = build_cnn(input_var)
elif model == 'mlp':
network = build_mlp(input_var)
elif model == 'lstm':
network = build_lstm(input_var)
else:
print("Unrecognized model type %r." % model)
# Create a loss expression for training, i.e., a scalar objective we want
# to minimize (for our multi-class problem, it is the cross-entropy loss):
prediction = lasagne.layers.get_output(network)
loss = lasagne.objectives.categorical_crossentropy(prediction, target_var)
loss = loss.mean()
# We could add some weight decay as well here, see lasagne.regularization.
# Create update expressions for training, i.e., how to modify the
# parameters at each training step. Here, we'll use Stochastic Gradient
# Descent (SGD) with Nesterov momentum, but Lasagne offers plenty more.
params = lasagne.layers.get_all_params(network, trainable=True)
updates = lasagne.updates.nesterov_momentum(
loss, params, learning_rate=0.01, momentum=0.9)
# Create a loss expression for validation/testing. The crucial difference
# here is that we do a deterministic forward pass through the network,
# disabling dropout layers.
test_prediction = lasagne.layers.get_output(network, deterministic=True)
test_loss = lasagne.objectives.categorical_crossentropy(test_prediction,
target_var)
test_loss = test_loss.mean()
# As a bonus, also create an expression for the classification accuracy:
test_acc = T.mean(T.eq(T.argmax(test_prediction, axis=1), target_var),
dtype=theano.config.floatX)
# Compile a function performing a training step on a mini-batch (by giving
# the updates dictionary) and returning the corresponding training loss:
train_fn = theano.function([input_var, target_var], loss, updates=updates)
# Compile a second function computing the validation loss and accuracy:
val_fn = theano.function([input_var, target_var], [test_loss, test_acc])
# Finally, launch the training loop.
print("Starting training...")
# We iterate over epochs:
for epoch in range(num_epochs):
# In each epoch, we do a full pass over the training data:
train_err = 0
train_batches = 0
start_time = time.time()
for batch in iterate_minibatches(X_train, y_train, 100, shuffle=True):
inputs, targets = batch
train_err += train_fn(inputs, targets)
train_batches += 1
# And a full pass over the validation data:
val_err = 0
val_acc = 0
val_batches = 0
for batch in iterate_minibatches(X_val, y_val, 100, shuffle=False):
inputs, targets = batch
err, acc = val_fn(inputs, targets)
val_err += err
val_acc += acc
val_batches += 1
# Then we print the results for this epoch:
print("Epoch {} of {} took {:.3f}s".format(
epoch + 1, num_epochs, time.time() - start_time))
print(" training loss:\t\t{:.6f}".format(train_err / train_batches))
print(" validation loss:\t\t{:.6f}".format(val_err / val_batches))
print(" validation accuracy:\t\t{:.2f} %".format(
val_acc / val_batches * 100))
# After training, we compute and print the test error:
test_err = 0
test_acc = 0
test_batches = 0
for batch in iterate_minibatches(X_test, y_test, 100, shuffle=False):
inputs, targets = batch
err, acc = val_fn(inputs, targets)
test_err += err
test_acc += acc
test_batches += 1
print("Final results:")
print(" test loss:\t\t\t{:.6f}".format(test_err / test_batches))
print(" test accuracy:\t\t{:.2f} %".format(
test_acc / test_batches * 100))
I Figured out the problem:
my dataset does not have an output for every target, becouse it is too small! There are 17 target outputs but my dataset has only 16 different outputs, and it is missing examples of the 17th output.
In order to resolve this problem, just change the softmax with rectify,
from this:
l_out = lasagne.layers.DenseLayer(
l_hid2_drop, num_units=17,
nonlinearity=lasagne.nonlinearities.softmax)
to this:
l_out = lasagne.layers.DenseLayer(
l_hid2_drop, num_units=17,
nonlinearity=lasagne.nonlinearities.rectify)