scheduled sampling in Tensorflow - python

The newest Tensorflow api about seq2seq model has included scheduled sampling:
https://www.tensorflow.org/api_docs/python/tf/contrib/seq2seq/ScheduledEmbeddingTrainingHelper
https://www.tensorflow.org/api_docs/python/tf/contrib/seq2seq/ScheduledOutputTrainingHelper
The original paper of scheduled sampling can be found here:
https://arxiv.org/abs/1506.03099
I read the paper but I cannot understand the difference between ScheduledEmbeddingTrainingHelper and ScheduledOutputTrainingHelper. The documentation only says ScheduledEmbeddingTrainingHelper is a training helper that adds scheduled sampling while ScheduledOutputTrainingHelper is a training helper that adds scheduled sampling directly to outputs.
I wonder what's the difference between these two helpers?

I contacted the engineer behind this, and he responded:
The output sampler either emits the raw rnn output or the raw ground truth at that time step. The embedding sampler treats the rnn output as logits of a distribution and either emits the embedding lookup of a sampled id from that categorical distribution or the raw ground truth at that time step.

Here's a basic example of using ScheduledEmbeddingTrainingHelper, using TensorFlow 1.3 and some higher level tf.contrib APIs. It's a sequence2sequence model, where the decoder's initial hidden state is the final hidden state of the encoder. It shows only how to train on a single batch (and apparently the task is "reverse this sequence"). For actual training tasks, I suggest looking at tf.contrib.learn APIs such as learn_runner, Experiment and tf.estimator.Estimator.
import tensorflow as tf
import numpy as np
from tensorflow.python.layers.core import Dense
vocab_size = 7
embedding_size = 5
lstm_units = 10
src_batch = np.array([[1, 2, 3], [4, 5, 6]])
trg_batch = np.array([[3, 2, 1], [6, 5, 4]])
# *_seq will have shape (2, 3), *_seq_len will have shape (2)
source_seq = tf.placeholder(shape=(None, None), dtype=tf.int32)
target_seq = tf.placeholder(shape=(None, None), dtype=tf.int32)
source_seq_len = tf.placeholder(shape=(None,), dtype=tf.int32)
target_seq_len = tf.placeholder(shape=(None,), dtype=tf.int32)
# add Start of Sequence (SOS) tokens to each sequence
batch_size, sequence_size = tf.unstack(tf.shape(target_seq))
sos_slice = tf.zeros([batch_size, 1], dtype=tf.int32) # 0 = start of sentence token
decoder_input = tf.concat([sos_slice, target_seq], axis=1)
embedding_matrix = tf.get_variable(
name="embedding_matrix",
shape=[vocab_size, embedding_size],
dtype=tf.float32)
source_seq_embedded = tf.nn.embedding_lookup(embedding_matrix, source_seq) # shape=(2, 3, 5)
decoder_input_embedded = tf.nn.embedding_lookup(embedding_matrix, decoder_input) # shape=(2, 4, 5)
unused_encoder_outputs, encoder_state = tf.nn.dynamic_rnn(
tf.contrib.rnn.LSTMCell(lstm_units),
source_seq_embedded,
sequence_length=source_seq_len,
dtype=tf.float32)
# Decoder:
# At each time step t and for each sequence in the batch, we get x_t by either
# (1) sampling from the distribution output_layer(t-1), or
# (2) reading from decoder_input_embedded.
# We do (1) with probability sampling_probability and (2) with 1 - sampling_probability.
# Using sampling_probability=0.0 is equivalent to using TrainingHelper (no sampling).
# Using sampling_probability=1.0 is equivalent to doing inference,
# where we don't supervise the decoder at all: output at t-1 is the input at t.
sampling_prob = tf.Variable(0.0, dtype=tf.float32)
helper = tf.contrib.seq2seq.ScheduledEmbeddingTrainingHelper(
decoder_input_embedded,
target_seq_len,
embedding_matrix,
sampling_probability=sampling_prob)
output_layer = Dense(vocab_size)
decoder = tf.contrib.seq2seq.BasicDecoder(
tf.contrib.rnn.LSTMCell(lstm_units),
helper,
encoder_state,
output_layer=output_layer)
outputs, state, seq_len = tf.contrib.seq2seq.dynamic_decode(decoder)
loss = tf.contrib.seq2seq.sequence_loss(
logits=outputs.rnn_output,
targets=target_seq,
weights=tf.ones(trg_batch.shape))
train_op = tf.contrib.layers.optimize_loss(
loss=loss,
global_step=tf.contrib.framework.get_global_step(),
optimizer=tf.train.AdamOptimizer,
learning_rate=0.001)
with tf.Session() as session:
session.run(tf.global_variables_initializer())
_, _loss = session.run([train_op, loss], {
source_seq: src_batch,
target_seq: trg_batch,
source_seq_len: [3, 3],
target_seq_len: [3, 3],
sampling_prob: 0.5
})
print("Loss: " + str(_loss))
For ScheduledOutputTrainingHelper, I would expect to just swap out the helper and use:
helper = tf.contrib.seq2seq.ScheduledOutputTrainingHelper(
target_seq,
target_seq_len,
sampling_probability=sampling_prob)
However this gives an error, since the LSTM cell expects a multidimensional input per timestep (of shape (batch_size, input_dims)). I will raise an issue in GitHub to find out if this is a bug, or there's some other way to use ScheduledOutputTrainingHelper.

This might also help you. This is for the case where you want to do scheduled sampling at each decoding step separately.
import tensorflow as tf
import numpy as np
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import categorical
from tensorflow.python.ops.distributions import bernoulli
batch_size = 64
vocab_size = 50000
emb_dim = 128
output = tf.get_variable('output',
initializer=tf.constant(np.random.rand(batch_size,vocab_size)))
base_next_inputs = tf.get_variable('input',
initializer=tf.constant(np.random.rand(batch_size,emb_dim)))
embedding = tf.get_variable('embedding',
initializer=tf.constant(np.random.rand(vocab_size,emb_dim)))
select_sampler = bernoulli.Bernoulli(probs=0.99, dtype=tf.bool)
select_sample = select_sampler.sample(sample_shape=batch_size,
seed=123)
sample_id_sampler = categorical.Categorical(logits=output)
sample_ids = array_ops.where(
select_sample,
sample_id_sampler.sample(seed=123),
gen_array_ops.fill([batch_size], -1))
where_sampling = math_ops.cast(
array_ops.where(sample_ids > -1), tf.int32)
where_not_sampling = math_ops.cast(
array_ops.where(sample_ids <= -1), tf.int32)
sample_ids_sampling = array_ops.gather_nd(sample_ids, where_sampling)
inputs_not_sampling = array_ops.gather_nd(base_next_inputs,
where_not_sampling)
sampled_next_inputs = tf.nn.embedding_lookup(embedding,
sample_ids_sampling)
base_shape = array_ops.shape(base_next_inputs)
result1 = array_ops.scatter_nd(indices=where_sampling,
updates=sampled_next_inputs, shape=base_shape)
result2 = array_ops.scatter_nd(indices=where_not_sampling,
updates=inputs_not_sampling, shape=base_shape)
result = result1 + result2
I used the tensorflow documentation code to make this example.
https://github.com/tensorflow/tensorflow/blob/r1.5/tensorflow/contrib/seq2seq/python/ops/helper.py

Related

Multi-instance classification using tranformer model

I use the transformer from this Keras documentation example for multi-instance classification. The class of each instance depends on other instances that come in one bag. I use transformer model because:
It makes no assumptions about the temporal/spatial relationships across the data. This is ideal for processing a set of objects
For example, each bag may have maximal 5 instances and there are 3 features per instance.
# Generate data
max_length = 5
x_lst = []
y_lst = []
for _ in range(10):
num_instances = np.random.randint(2, max_length + 1)
x_bag = np.random.randint(0, 9, size=(num_instances, 3))
y_bag = np.random.randint(0, 2, size=num_instances)
x_lst.append(x_bag)
y_lst.append(y_bag)
Features and labels of first 2 bags (with 5 and 2 instances):
x_lst[:2]
[array([[8, 0, 3],
[8, 1, 0],
[4, 6, 8],
[1, 6, 4],
[7, 4, 6]]),
array([[5, 8, 4],
[2, 1, 1]])]
y_lst[:2]
[array([0, 1, 1, 1, 0]), array([0, 0])]
Next, I pad features with zeros and targets with -1:
x_padded = []
y_padded = []
for x, y in zip(x_lst, y_lst):
x_p = np.zeros((max_length, 3))
x_p[:x.shape[0], :x.shape[1]] = x
x_padded.append(x_p)
y_p = np.negative(np.ones(max_length))
y_p[:y.shape[0]] = y
y_padded.append(y_p)
X = np.stack(x_padded)
y = np.stack(y_padded)
where X.shape is equal to (10, 5, 3) and y.shape is equal to (10, 5).
I made two changes to the original model: added the Masking layer
after the Input layer and set the number of units in the last Dense layer to the maximal size of the bag (plus 'sigmoid' activation):
def transformer_encoder(inputs, head_size, num_heads, ff_dim, dropout=0):
# Attention and Normalization
x = layers.MultiHeadAttention(
key_dim=head_size, num_heads=num_heads, dropout=dropout
)(inputs, inputs)
x = layers.Dropout(dropout)(x)
x = layers.LayerNormalization(epsilon=1e-6)(x)
res = x + inputs
# Feed Forward Part
x = layers.Conv1D(filters=ff_dim, kernel_size=1, activation="relu")(res)
x = layers.Dropout(dropout)(x)
x = layers.Conv1D(filters=inputs.shape[-1], kernel_size=1)(x)
x = layers.LayerNormalization(epsilon=1e-6)(x)
return x + res
def build_model(
input_shape,
head_size,
num_heads,
ff_dim,
num_transformer_blocks,
mlp_units,
dropout=0,
mlp_dropout=0,
):
inputs = keras.Input(shape=input_shape)
inputs = keras.layers.Masking(mask_value=0)(inputs) # ADDED MASKING LAYER
x = inputs
for _ in range(num_transformer_blocks):
x = transformer_encoder(x, head_size, num_heads, ff_dim, dropout)
x = layers.GlobalAveragePooling1D(data_format="channels_first")(x)
for dim in mlp_units:
x = layers.Dense(dim, activation="relu")(x)
x = layers.Dropout(mlp_dropout)(x)
outputs = layers.Dense(5, activation='sigmoid')(x) # CHANGED ACCORDING TO MY OUTPUT
return keras.Model(inputs, outputs)
input_shape = (5, 3)
model = build_model(
input_shape,
head_size=256,
num_heads=4,
ff_dim=4,
num_transformer_blocks=4,
mlp_units=[128],
mlp_dropout=0.4,
dropout=0.25,
)
model.compile(
loss="binary_crossentropy",
optimizer=keras.optimizers.Adam(learning_rate=1e-4),
metrics=["binary_accuracy"],
)
model.summary()
It looks like my model doesn't learn much. If I use the number of true values for each bag (y.sum(axis=1) and Dense(1)) as a target instead of classification of each instance, the model learns good. Where is my error? How should I build the output layer in this case? Do I need a custom lost function?
UPDATE:
I made a custom loss function:
def my_loss_fn(y_true, y_pred):
mask = tf.cast(tf.math.not_equal(y_true, tf.constant(-1.)), tf.float32)
y_true, y_pred = tf.expand_dims(y_true, axis=-1), tf.expand_dims(y_pred, axis=-1)
bce = tf.keras.losses.BinaryCrossentropy(reduction='none')
return tf.reduce_sum(tf.cast(bce(y_true, y_pred), tf.float32) * mask)
mask = (y_test != -1).astype(int)
pd.DataFrame({'n_labels': mask.sum(axis=1), 'preds': ((preds * mask) >= .5).sum(axis=1)}).plot(figsize=(20, 5))
And it looks like the model learns:
But it predicts all nonmasked labels as 1.
#thushv89 This is my problem. I take 2 time points: t1 and t2 and look for all vehicles that are in maintenance at the time t1 and for all vehicles that are planned to be in maintenance at the time t2. So, this is my bag of items. Then I calculate features like how much time t1 vehicles have already spent in maintenance, how much time from t1 to the plan start for t2 vehicle etc. My model learns well if I try to predict the number of vehicles in maintenance at the time t2, but I would like to predict which of them will leave and which of them will come in (3 vs [True, False, True, True] for 4 vehicles in the bag).
There are three important improvements:
Remove the GlobalAveragePooling1D. It's a kind of bottleneck (data compression) if you make a prediction for each item. Without this layer, you also get a two-dimensional tensor with the max number of items in the first dimension (5 in my case) for free in the output. So, you can set the last Dense layer to the number of categories (1 in my case).
Add a custom loss function to exclude target padding from calculation (already added to my question) and a custom metric function if you want to see the real metric.
Add an attention_mask to the MultiHeadAttention (instead of Masking layer) to mask the padding.
Just a simple add-on to #Mykola_Zotko 's improvement answer to those new users who are learning deep-learning with keras and tensorflow.
Remove the GlobalAveragePooling1D
For context, this GlobalAveragePooling1D is basically a Global average pooling operation for temporal data. So basically when you remove this method call, you are removing the "pooling" operation, or in simpler terms by #Mykola_Zotko:
... you get a two-dimensional tensor with the max number of items in the first dimension (5 in my case) for free in the output
The alias is:
tf.keras.layers.GlobalAvgPool1D
and the code for this method:
tf.keras.layers.GlobalAveragePooling1D (
data_format = "channels_last", **kwargs
)
The source for this can be found on:
Github
TensorFlow 1 version
TensorFlow.org doc
Add a custom loss function
What a loss function simply does is to "to generate the quantity that a model should seek to minimize during training time". Source
Or in other terms:
In mathematical optimization, statistics, machine learning and Deep Learning the Loss Function (also known as Cost Function or Error Function) is a function that defines a correlation between a series of values and a real number. That number represents conceptually the cost associated with an event or a set of values. In general, the goal of an optimization procedure is to minimize the loss function. Towardsdatascience - custom loss function in tensorflow
Add an attention_mask to the MultiHeadAttention
Alias:
tf.keras.layers.MultiHeadAttention
Code for the method:
tf.keras.layers.MultiHeadAttention(
num_heads,
key_dim,
value_dim=None,
dropout=0.0,
use_bias=True,
output_shape=None,
attention_axes=None,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs
)
Source on:
Github
TensorFlow.org doc
Previous improvements that were made to the code:
metrics=["accuracy"] to metrics=["binary_accuracy"]
model.compile(
loss="binary_crossentropy",
optimizer=keras.optimizers.Adam(learning_rate=1e-4),
metrics=["binary_accuracy"],
)
Using Crossentropy in the custom loss function

How to prepare the inputs in Keras implementation of Wavenet for time-series prediction

In Keras implementation of Wavenet, the input shape is (None, 1). I have a time series (val(t)) in which the target is to predict the next data point given a window of past values (the window size depends on maximum dilation). The input-shape in wavenet is confusing. I have few questions about it:
How Keras figure out the input dimension (None) when a full sequence is given? According to dilations, we want the input to have a length of 2^8.
If a input series of shape (1M, 1) is given as training X, do we need to generate vectors of 2^8 time-steps as input? It seems, we can just use the input series as input of wave-net (Not sure why raw time series input does not give error).
In general, how we can debug such Keras networks. I tried to apply the function on numerical data like Conv1D(16, 1, padding='same', activation='relu')(inputs), however, it gives error.
#
n_filters = 32
filter_width = 2
dilation_rates = [2**i for i in range(7)] * 2
from keras.models import Model
from keras.layers import Input, Conv1D, Dense, Activation, Dropout, Lambda, Multiply, Add, Concatenate
from keras.optimizers import Adam
history_seq = Input(shape=(None, 1))
x = history_seq
skips = []
for dilation_rate in dilation_rates:
# preprocessing - equivalent to time-distributed dense
x = Conv1D(16, 1, padding='same', activation='relu')(x)
# filter
x_f = Conv1D(filters=n_filters,
kernel_size=filter_width,
padding='causal',
dilation_rate=dilation_rate)(x)
# gate
x_g = Conv1D(filters=n_filters,
kernel_size=filter_width,
padding='causal',
dilation_rate=dilation_rate)(x)
# combine filter and gating branches
z = Multiply()([Activation('tanh')(x_f),
Activation('sigmoid')(x_g)])
# postprocessing - equivalent to time-distributed dense
z = Conv1D(16, 1, padding='same', activation='relu')(z)
# residual connection
x = Add()([x, z])
# collect skip connections
skips.append(z)
# add all skip connection outputs
out = Activation('relu')(Add()(skips))
# final time-distributed dense layers
out = Conv1D(128, 1, padding='same')(out)
out = Activation('relu')(out)
out = Dropout(.2)(out)
out = Conv1D(1, 1, padding='same')(out)
# extract training target at end
def slice(x, seq_length):
return x[:,-seq_length:,:]
pred_seq_train = Lambda(slice, arguments={'seq_length':1})(out)
model = Model(history_seq, pred_seq_train)
model.compile(Adam(), loss='mean_absolute_error')
you are using extreme values for dilatation rate, they don't make sense. try to reduce them using, for example, a sequence made of [1, 2, 4, 8, 16, 32]. the dilatation rates aren't a constraint on the dimension of the input passed
your network work simply passing this input
n_filters = 32
filter_width = 2
dilation_rates = [1, 2, 4, 8, 16, 32]
....
model = Model(history_seq, pred_seq_train)
model.compile(Adam(), loss='mean_absolute_error')
n_sample = 5
time_step = 100
X = np.random.uniform(0,1, (n_sample,time_step,1))
model.predict(X)
specify a None dimension in Keras means to leave the model free to receive every dimension. this not means you can pass samples of various dimension, they always must have the same format... you can build the model every time with a different dimension size
for time_step in np.random.randint(100,200, 4):
print('temporal dim:', time_step)
n_sample = 5
model = Model(history_seq, pred_seq_train)
model.compile(Adam(), loss='mean_absolute_error')
X = np.random.uniform(0,1, (n_sample,time_step,1))
print(model.predict(X).shape)
I suggest also you a premade library in Keras which provide WAVENET implementation: https://github.com/philipperemy/keras-tcn you can use it as a baseline and investigate also the code to create a WAVENET

TensorFlow Error: No gradients provided for any variable, check your graph for ops that do not support gradients

Trying to use Derived class of Tensorflow FIFOQueue. I override the enqueue function. It takes in the images and enqueues the output from the final dense layer, in the queue.
Now I dequeue the output tensor and try to calculate Cost function and minimize it using Adam Optimiser.
On calculating cost and minimizing it inside the enqueue function itself, my code works fine. But as soon as I shift the loss_op (i.e my cost) outside the Derived class, I get the error: "No gradients provided for any variable, check your graph for ops that do not support gradients"
Import
from tensorflow.python.ops.data_flow_ops import FIFOQueue
import tensorflow as tf
from tensorflow.python.framework import dtypes as _dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import gen_data_flow_ops
Read the data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
n_input = 784 # MNIST data input (img shape: 28*28)
n_classes = 10 # MNIST total classes (0-9 digits)
Y = mnist.train.labels
X = mnist.train.images
Derived Queue
class MyQueue(FIFOQueue):
def enqueue(self, x,Y,name=None):
#Reshape
x = tf.reshape(x, shape=[-1, 28, 28, 1])
# 1st conv_2d layer
conv1_mp = tf.layers.conv2d(x, 32, 5, activation=tf.nn.relu,name = 'Q1_c1')
# 1st max pool layer
conv1 = tf.layers.max_pooling2d(conv1_mp, 2, 2,name='Q1_mp1')
# 2nd conv_2d layer
conv2 = tf.layers.conv2d(conv1, 64, 3, activation=tf.nn.relu,name = 'Q1_c2')
# 2nd max pool layer
conv2_mp = tf.layers.max_pooling2d(conv2, 2, 2,name='Q1_mp2')
#Flatten
flat = tf.contrib.layers.flatten(conv2_mp)
#Dense 1
dense_1 = tf.layers.dense(tf.reshape(flat,[-1,1600]), 1024,name = 'Q2_D1' )
#Dropout = 0.8
drop = tf.layers.dropout(dense_1, rate=0.8, training=True,name='Q2_Dp')
#Output class = 10
out = tf.layers.dense(drop, n_classes,name = 'Q2_Op')
#update vals to put "out" in the queue
vals = out
# Rest of the enqueue operation which has not been changed
with ops.name_scope(name, "%s_enqueue" % self._name,
self._scope_vals(vals)) as scope:
vals = self._check_enqueue_dtypes(vals)
# NOTE(mrry): Not using a shape function because
# we need access to the `QueueBase` object.
for val, shape in zip(vals, self._shapes):
val.get_shape().assert_is_compatible_with(shape)
if self._queue_ref.dtype == _dtypes.resource:
return gen_data_flow_ops.queue_enqueue_v2(
self._queue_ref, vals, name=scope)
else:
return gen_data_flow_ops.queue_enqueue(
self._queue_ref, vals, name=scope)
Main
q_pred = MyQueue( capacity=1, dtypes=tf.float32 )
enqueue_op = q_pred.enqueue(X,Y)
data_pred = q_pred.dequeue()
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
sess.run(enqueue_op)
out = data_pred
#Calculating Cost
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
logits=out, labels=Y),name = 'Q2_loss')
# Adam optimiser
optimizer = tf.train.AdamOptimizer(learning_rate=0.001)
#Write in the graph
writer = tf.summary.FileWriter("logs\MyDerivedQueue", sess.graph)
####### ERROR LINE ###################
# Minimising the cost.
train_op = optimizer.minimize(cost)
correct_pred = tf.equal(tf.argmax(out, 1), tf.argmax(Y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
Using multiple hit and trial methods. I came to the conclusion that this won't work as backpropagation isn't in our control. While using multi GPU, every GPU will give it's feedforward and now while back propagating, we won't get to know which weights/parameters should be updated.

Have tf.layers.dense connected to different inputs OR have tf.train.optimizer optimize tensors?

I am new to tensorflow and more advanced machine learning, so I tried to get a better grasp of RNNs by implementing one by hand instead of using tf.contrib.rnn.RNNCell. My first problem was that I needed to unroll the net for backpropogation so I looped through my sequence and I needed to keep consistent weights and biases, so I couldn't reinitialize a dense layer with tf.layers.dense each time, but I also needed to have my layer connected to the current timestep of my sequence and I couldn't find a way to change what a dense layer was connected to. To work around this I tried to implement my own version of tf.layers.dense, and this worked fine until I got the error: NotImplementedError("Trying to update a Tensor " ...) when I tried to optimize my custom dense layers.
My code:
import tensorflow as tf
import numpy as np
from tensorflow.contrib import rnn
import random
# -----------------
# WORD PARAMETERS
# -----------------
target_string = ['Hello ','Hello ','World ','World ', '!']
number_input_words = 1
# --------------------------
# TRAINING HYPERPARAMETERS
# --------------------------
training_steps = 4000
batch_size = 9
learning_rate = 0.01
display_step = 150
hidden_cells = 20
# ----------------------
# PREPARE DATA AS DICT
# ----------------------
# TODO AUTOMATICALLY CREATE DICT
dictionary = {'Hello ': 0, 'World ': 1, '!': 2}
reverse_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
vocab_size = len(dictionary)
# ------------
# LSTM MODEL
# ------------
class LSTM:
def __init__(self, sequence_length, number_input_words, hidden_cells, mem_size_x, mem_size_y, learning_rate):
self.sequence = tf.placeholder(tf.float32, (sequence_length, vocab_size), 'sequence')
self.memory = tf.zeros([mem_size_x, mem_size_y])
# sequence_length = self.sequence.shape[0]
units = [vocab_size, 5,4,2,6, vocab_size]
weights = [tf.random_uniform((units[i-1], units[i])) for i in range(len(units))[1:]]
biases = [tf.random_uniform((1, units[i])) for i in range(len(units))[1:]]
self.total_loss = 0
self.outputs = []
for word in range(sequence_length-1):
sequence_w = tf.reshape(self.sequence[word], [1, vocab_size])
layers = []
for i in range(len(weights)):
if i == 0:
layers.append(tf.matmul(sequence_w, weights[0]) + biases[0])
else:
layers.append(tf.matmul(layers[i-1], weights[i]) + biases[i])
percentages = tf.nn.softmax(logits=layers[-1])
self.outputs.append(percentages)
self.total_loss += tf.losses.absolute_difference(tf.reshape(self.sequence[word+1], (1, vocab_size)), tf.reshape(percentages, (1, vocab_size)))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
self.train_operation = optimizer.minimize(loss=self.total_loss, var_list=weights+biases, global_step=tf.train.get_global_step())
lstm = LSTM(len(target_string), number_input_words, hidden_cells, 10, 5, learning_rate)
# ---------------
# START SESSION
# ---------------
with tf.Session() as sess:
sess.run(tf.local_variables_initializer())
sess.run(tf.global_variables_initializer())
sequence = []
for i in range(len(target_string)):
x = [0]*vocab_size
x[dictionary[target_string[i]]] = 1
sequence.append(x)
print(sequence)
for x in range(1000):
sess.run(lstm.train_operation, feed_dict={lstm.sequence: sequence})
prediction, loss = sess.run((lstm.outputs, lstm.total_loss), feed_dict= {lstm.sequence: sequence})
print(prediction)
print(loss)
Any answers that tell me either how to either connect tf.layers.dense to different variables each time or tell me how to get around my NotImplementedError would be greatly appreciated. I apologize if this question is lengthy or just badly worded, i'm still new to stackoverflow.
EDIT:
I've updated the LSTM class part of my code to:
(Inside def init)
self.sequence = [tf.placeholder(tf.float32, (batch_size, vocab_size), 'sequence') for _ in range(sequence_length-1)]
self.total_loss = 0
self.outputs = []
rnn_cell = rnn.BasicLSTMCell(hidden_cells)
h = tf.zeros((batch_size, hidden_cells))
for i in range(sequence_length-1):
current_sequence = self.sequence[i]
h = rnn_cell(current_sequence, h)
self.outputs.append(h)
But I still get an error on the line: h = rnn_cell(current_sequence, h) about not being able to iterate over tensors. I'm not trying to iterate over any tensors, and if I am I don't mean to.
So there's a standard way of approaching this issue (this is the best approach I know from my knowledge) Instead of trying to create a new list of dense layers. Do the following. Before that lets assume your hidden layer size is h_dim and number of steps to unroll is num_unroll and batch size batch_size
In a for loop, you calculate the output of the RNNCell for each unrolled input
h = tf.zeros(...)
outputs= []
for ui in range(num_unroll):
out, state = rnn_cell(x[ui],state)
outputs.append(out)
Now concat all the outputs to a single tensor of size, [batch_size*num_unroll, h_dim]
Send this through a single dense layer of size [h_dim, num_classes]
logits = tf.matmul(tf.concat(outputs,...), w) + b
predictions = tf.nn.softmax(logits)
You have the logits for all the unrolled inputs now. Now it's just a matter of reshaping the tensor to a [batch_size, num_unroll, num_classes] tensor.
Edited (Feeding in Data): The data will be presented in the form of a list of num_unroll many placeholders. So,
x = [tf.placeholder(shape=[batch_size,3]...) for ui in range(num_unroll)]
Now say you have data like below,
Hello world bye
Bye hello world
Here batch size is 2, sequence length is 3. Once converted to one hot encoding, you're data looks like below (shape [time_steps, batch_size, 3].
data = [ [ [1,0,0], [0,0,1] ], [ [0,1,0], [1,0,0] ], [ [0,0,1], [0,1,0] ] ]
Now feed data in, in the following format.
feed_dict = {}
for ui in range(3):
feed_dict[x[ui]] = data[ui]

ValueError: Error when checking input: expected gru_5_input to have shape (None, None, 10) but got array with shape (1, 4, 1)

I am trying to make hourly predictions using a recurrent neural network using TensorFlow and Keras in Python.I have assigned my inputs of the neural network to be (None, None, 5) shown in my .
However, I am getting the errorː
ValueError: Error when checking input: expected gru_3_input to have shape (None, None, 10) but got array with shape (1, 4, 1) My MVCE code isː
%matplotlib inline
#!pip uninstall keras
#!pip install keras==2.1.2
import tensorflow as tf
import pandas as pd
from pandas import DataFrame
import math
#####Create the Recurrent Neural Network###
model = Sequential()
model.add(GRU(units=5,
return_sequences=True,
input_shape=(None, num_x_signals)))
## This line is going to map the above 512 values to just 1 (num_y_signal)
model.add(Dense(num_y_signals, activation='sigmoid'))
if False:
from tensorflow.python.keras.initializers import RandomUniform
# Maybe use lower init-ranges.##### I may have to change these during debugging####
init = RandomUniform(minval=-0.05, maxval=0.05)
model.add(Dense(num_y_signals,
activation='linear',
kernel_initializer=init))
warmup_steps = 5
def loss_mse_warmup(y_true, y_pred):
#
# Ignore the "warmup" parts of the sequences
# by taking slices of the tensors.
y_true_slice = y_true[:, warmup_steps:, :]
y_pred_slice = y_pred[:, warmup_steps:, :]
# These sliced tensors both have this shape:
# [batch_size, sequence_length - warmup_steps, num_y_signals]
# Calculate the MSE loss for each value in these tensors.
# This outputs a 3-rank tensor of the same shape.
loss = tf.losses.mean_squared_error(labels=y_true_slice,
predictions=y_pred_slice)
loss_mean = tf.reduce_mean(loss)
return loss_mean
optimizer = RMSprop(lr=1e-3) ### This is somthing related to debugging
model.compile(loss=loss_mse_warmup, optimizer=optimizer)#### I may have to make the output a singnal rather than the whole data set
print(model.summary())
model.fit_generator(generator=generator,
epochs=20,
steps_per_epoch=100,
validation_data=validation_data)
I am not sure why this could be, but i believe it could something to do with reshaping my training and testing data. ɪ have also attached my full error message to my code to make the problem reproducible.
I'm unsure about the correctness but here it is:
%matplotlib inline
#!pip uninstall keras
#!pip install keras==2.1.2
import tensorflow as tf
import pandas as pd
from pandas import DataFrame
import math
import numpy
from sklearn.preprocessing import MinMaxScaler
from keras.models import Sequential
import datetime
from keras.layers import Input, Dense, GRU, Embedding
from keras.optimizers import RMSprop
from keras.callbacks import EarlyStopping, ModelCheckpoint, TensorBoard, ReduceLROnPlateau
datetime = [datetime.datetime(2012, 1, 1, 1, 0, 0) + datetime.timedelta(hours=i) for i in range(10)]
X=np.array([2.25226244,1.44078451,0.99174488,0.71179491,0.92824542,1.67776948,2.96399534,5.06257161,7.06504245,7.77817664
,0.92824542,1.67776948,2.96399534,5.06257161,7.06504245,7.77817664])
y= np.array([0.02062136,0.00186715,0.01517354,0.0129046 ,0.02231125,0.01492537,0.09646542,0.28444476,0.46289928,0.77817664
,0.02231125,0.01492537,0.09646542,0.28444476,0.46289928,0.77817664])
X = X[1:11]
y= y[1:11]
df = pd.DataFrame({'date':datetime,'y':y,'X':X})
df['t']= [x for x in range(10)]
df['X-1'] = df['X'].shift(-1)
x_data = df['X-1'].fillna(0)
y_data = y
num_data = len(x_data)
#### training and testing split####
train_split = 0.6
num_train = int(train_split*num_data)
num_test = num_data-num_train## number of observations in test set
#input train test
x_train = x_data[0:num_train].reshape(-1, 1)
x_test = x_data[num_train:].reshape(-1, 1)
#print (len(x_train) +len( x_test))
#output train test
y_train = y_data[0:num_train].reshape(-1, 1)
y_test = y_data[num_train:].reshape(-1, 1)
#print (len(y_train) + len(y_test))
### number of input signals
num_x_signals = x_data.shape[0]
# print (num_x_signals)
## number of output signals##
num_y_signals = y_data.shape[0]
#print (num_y_signals)
####data scalling'###
x_scaler = MinMaxScaler(feature_range=(0,1))
x_train_scaled = x_scaler.fit_transform(x_train)
x_test_scaled = MinMaxScaler(feature_range=(0,1)).fit_transform(x_test)
y_scaler = MinMaxScaler()
y_train_scaled = y_scaler.fit_transform(y_train)
y_test_scaled = MinMaxScaler(feature_range=(0,1)).fit_transform(y_test)
def batch_generator(batch_size, sequence_length):
"""
Generator function for creating random batches of training-data.
"""
# Infinite loop. providing the neural network with random data from the
# datase for x and y
while True:
# Allocate a new array for the batch of input-signals.
x_shape = (batch_size, sequence_length, num_x_signals)
x_batch = np.zeros(shape=x_shape, dtype=np.float16)
# Allocate a new array for the batch of output-signals.
y_shape = (batch_size, sequence_length, num_y_signals)
y_batch = np.zeros(shape=y_shape, dtype=np.float16)
# Fill the batch with random sequences of data.
for i in range(batch_size):
# Get a random start-index.
# This points somewhere into the training-data.
idx = np.random.randint(num_train - sequence_length)
# Copy the sequences of data starting at this index.
x_batch[i] = x_train_scaled[idx:idx+sequence_length]
y_batch[i] = y_train_scaled[idx:idx+sequence_length]
yield (x_batch, y_batch)
batch_size =20
sequence_length = 2
generator = batch_generator(batch_size=batch_size,
sequence_length=sequence_length)
x_batch, y_batch = next(generator)
#########Validation Set Start########
def batch_generator(batch_size, sequence_length):
"""
Generator function for creating random batches of training-data.
"""
# Infinite loop. providing the neural network with random data from the
# datase for x and y
while True:
# Allocate a new array for the batch of input-signals.
x_shape = (batch_size, sequence_length, num_x_signals)
x_batch = np.zeros(shape=x_shape, dtype=np.float16)
# Allocate a new array for the batch of output-signals.
y_shape = (batch_size, sequence_length, num_y_signals)
y_batch = np.zeros(shape=y_shape, dtype=np.float16)
# Fill the batch with random sequences of data.
for i in range(batch_size):
# Get a random start-index.
# This points somewhere into the training-data.
idx = np.random.randint(num_train - sequence_length)
# Copy the sequences of data starting at this index.
x_batch[i] = x_test_scaled[idx:idx+sequence_length]
y_batch[i] = y_test_scaled[idx:idx+sequence_length]
yield (x_batch, y_batch)
validation_data= next(batch_generator(batch_size,sequence_length))
# validation_data = (np.expand_dims(x_test_scaled, axis=0),
# np.expand_dims(y_test_scaled, axis=0))
#Validation set end
#####Create the Recurrent Neural Network###
model = Sequential()
model.add(GRU(units=5,
return_sequences=True,
input_shape=(None, num_x_signals)))
## This line is going to map the above 512 values to just 1 (num_y_signal)
model.add(Dense(num_y_signals, activation='sigmoid'))
if False:
from tensorflow.python.keras.initializers import RandomUniform
# Maybe use lower init-ranges.##### I may have to change these during debugging####
init = RandomUniform(minval=-0.05, maxval=0.05)
model.add(Dense(num_y_signals,
activation='linear',
kernel_initializer=init))
warmup_steps = 5
def loss_mse_warmup(y_true, y_pred):
#
# Ignore the "warmup" parts of the sequences
# by taking slices of the tensors.
y_true_slice = y_true[:, warmup_steps:, :]
y_pred_slice = y_pred[:, warmup_steps:, :]
# These sliced tensors both have this shape:
# [batch_size, sequence_length - warmup_steps, num_y_signals]
# Calculate the MSE loss for each value in these tensors.
# This outputs a 3-rank tensor of the same shape.
loss = tf.losses.mean_squared_error(labels=y_true_slice,
predictions=y_pred_slice)
loss_mean = tf.reduce_mean(loss)
return loss_mean
optimizer = RMSprop(lr=1e-3) ### This is somthing related to debugging
model.compile(loss=loss_mse_warmup, optimizer=optimizer)#### I may have to make the output a singnal rather than the whole data set
print(model.summary())
model.fit_generator(generator=generator,
epochs=20,
steps_per_epoch=100,
validation_data=validation_data)
I've only changed part of code between validation set start and validation set end.

Categories

Resources