Many to One RNN with Variable Sequence Length:¶
In this tutorial we implement
Fig1. Unfolded representation of the implemented RNN structure
0. Import the required libraries:¶
We will start with importing the required libraries to our Python environment.
In [1]:
# imports
import tensorflow as tf
import numpy as np
1. Generate some data¶
For this tutorial ...
1.1. Data dimension¶
Here, we specify the dimensions of the data samples which will be used in the code. Defining these variables makes it easier (compared with using hard-coded number all throughout the code) to modify them later. Ideally these would be inferred from the data that has been read, but here we just write the numbers.
In [ ]:
# Data Dimensions
input_dim = 1 # input dimension
seq_max_len = 4 # sequence maximum length
out_dim = 1 # output dimension
1.2. Generate data and display the sizes¶
Now we can use the defined helper function in "train" mode which loads the train and validation images and their corresponding labels. We'll also display their sizes:
In [ ]:
def generate_data(count=1000, max_length=4, dim=1):
x = np.random.randint(0, 10, size=(count, max_length, dim))
length = np.random.randint(1, max_length+1, count)
for i in range(count):
x[i, length[i]:, :] = 0
y = np.sum(x, axis=1)
return x, y, length
In [ ]:
x_train, y_train, seq_len_train = generate_data(count=1000, max_length=seq_max_len, dim=input_dim)
x_test, y_test, seq_len_test = generate_data(count=5, max_length=seq_max_len, dim=input_dim)
print("Size of:")
print("- Training-set:\t\t{}".format(len(y_train)))
print("- Test-set:\t{}".format(len(y_test)))
To get batches of samples:
In [ ]:
def next_batch(x, y, seq_len, batch_size):
N = x.shape[0]
batch_indeces = np.random.permutation(N)[:batch_size]
x_batch = x[batch_indeces]
y_batch = y[batch_indeces]
seq_len_batch = seq_len[batch_indeces]
return x_batch, y_batch, seq_len_batch
2. Hyperparameters¶
In [ ]:
# Parameters
learning_rate = 0.01 # The optimization initial learning rate
training_steps = 10000 # Total number of training steps
batch_size = 10 # batch size
display_freq = 1000 # Frequency of displaying the training results
2. Hyperparameters¶
In [ ]:
learning_rate = 0.001 # The optimization initial learning rate
epochs = 10 # Total number of training epochs
batch_size = 100 # Training batch size
display_freq = 100 # Frequency of displaying the training results
3. Network configuration¶
In [ ]:
num_hidden_units = 10 # number of hidden units
In [ ]:
# weight and bais wrappers
def weight_variable(shape):
"""
Create a weight variable with appropriate initialization
:param name: weight name
:param shape: weight shape
:return: initialized weight variable
"""
initer = tf.truncated_normal_initializer(stddev=0.01)
return tf.get_variable('W',
dtype=tf.float32,
shape=shape,
initializer=initer)
def bias_variable(shape):
"""
Create a bias variable with appropriate initialization
:param name: bias variable name
:param shape: bias variable shape
:return: initialized bias variable
"""
initial = tf.constant(0., shape=shape, dtype=tf.float32)
return tf.get_variable('b',
dtype=tf.float32,
initializer=initial)
4.2. Helper-function for creating a RNN¶
In [ ]:
def RNN(x, weights, biases, n_hidden, seq_max_len, seq_len):
"""
:param x: inputs of shape [batch_size, max_time, input_dim]
:param weights: matrix of fully-connected output layer weights
:param biases: vector of fully-connected output layer biases
:param n_hidden: number of hidden units
:param seq_max_len: sequence maximum length
:param seq_len: length of each sequence of shape [batch_size,]
"""
cell = tf.nn.rnn_cell.BasicRNNCell(n_hidden)
outputs, states = tf.nn.dynamic_rnn(cell, x, sequence_length=seq_len, dtype=tf.float32)
# Hack to build the indexing and retrieve the right output.
batch_size = tf.shape(outputs)[0]
# Start indices for each sample
index = tf.range(0, batch_size) * seq_max_len + (seq_len - 1)
# Indexing
outputs = tf.gather(tf.reshape(outputs, [-1, n_hidden]), index)
out = tf.matmul(outputs, weights) + biases
return out
In [ ]:
# Placeholders for inputs(x), input sequence lengths (seqLen) and outputs(y)
x = tf.placeholder(tf.float32, [None, seq_max_len, input_dim])
seqLen = tf.placeholder(tf.int32, [None])
y = tf.placeholder(tf.float32, [None, 1])
5.2. Define the network¶
In [ ]:
# create weight matrix initialized randomly from N~(0, 0.01)
W = weight_variable(shape=[num_hidden_units, out_dim])
# create bias vector initialized as zero
b = bias_variable(shape=[out_dim])
# Network predictions
pred_out = RNN(x, W, b, num_hidden_units, seq_max_len, seqLen)
5.3. Define the loss function and optimizer¶
In [ ]:
# Define the loss function (i.e. mean-squared error loss) and optimizer
cost = tf.reduce_mean(tf.square(pred_out - y))
train_op = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
5.4. Initialize all variables¶
In [ ]:
# Creating the op for initializing all variables
init = tf.global_variables_initializer()
6. Train¶
In [ ]:
with tf.Session() as sess:
sess.run(init)
print('----------Training---------')
for i in range(training_steps):
x_batch, y_batch, seq_len_batch = next_batch(x_train, y_train, seq_len_train, batch_size)
_, mse = sess.run([train_op, cost], feed_dict={x: x_batch, y: y_batch, seqLen: seq_len_batch})
if i % display_freq == 0:
print('Step {0:<6}, MSE={1:.4f}'.format(i, mse))
In [ ]:
# Test
y_pred = sess.run(pred_out, feed_dict={x: x_test, seqLen: seq_len_test})
print('--------Test Results-------')
for i, x in enumerate(y_test):
print("When the ground truth output is {}, the model thinks it is {}"
.format(y_test[i], y_pred[i]))