Projector in Tensorboard python application - python

I have the following code and sample which is working fine and exactly I want it to:
import numpy as np
import pandas as pd
import sklearn
import sklearn.preprocessing
import datetime
import os
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.contrib.tensorboard.plugins import projector
valid_set_size_percentage = 3
test_set_size_percentage = 3
seq_len = 5 # choose sequence length
df = pd.read_csv("Test.csv", encoding = 'utf-16',sep=',',index_col = 0)
df.head()
def normalize_data(df):
cols = list(df_stock.columns.values)
min_max_scaler = sklearn.preprocessing.MinMaxScaler()
df = pd.DataFrame(min_max_scaler.fit_transform(df.values))
df.columns = cols
return df
def load_data(stock, seq_len):
data_raw = stock.as_matrix() # convert to numpy array
data = []
print(data_raw.shape)
for index in range(len(data_raw) - seq_len):
data.append(data_raw[index: index + seq_len])
data = np.array(data);
valid_set_size = int(np.round(valid_set_size_percentage/100*data.shape[0]));
test_set_size = int(np.round(test_set_size_percentage/100*data.shape[0]));
train_set_size = data.shape[0] - (valid_set_size + test_set_size);
x_train = data[:train_set_size,:-1,:]
y_train = data[:train_set_size,-1,:4]
x_valid = data[train_set_size:train_set_size+valid_set_size,:-1,:]
y_valid = data[train_set_size:train_set_size+valid_set_size,-1,:4]
x_test = data[train_set_size+valid_set_size:,:-1,:]
y_test = data[train_set_size+valid_set_size:,-1,:4]
return [x_train, y_train, x_valid, y_valid, x_test, y_test]
df_stock = df.copy()
cols = list(df_stock.columns.values)
print('df_stock.columns.values = ', cols)
df_stock_norm = df_stock.copy()
df_stock_norm = normalize_data(df_stock_norm)
x_train, y_train, x_valid, y_valid, x_test, y_test = load_data(df_stock_norm, seq_len)
print(y_train[:2])
print('x_train.shape = ',x_train.shape)
print('y_train.shape = ', y_train.shape)
print('Inputs = ',x_train.shape[2])
print('Outputs = ', y_train.shape[1])
print('x_valid.shape = ',x_valid.shape)
print('y_valid.shape = ', y_valid.shape)
print('x_test.shape = ', x_test.shape)
print('y_test.shape = ',y_test.shape)
index_in_epoch = 0;
perm_array = np.arange(x_train.shape[0])
np.random.shuffle(perm_array)
def get_next_batch(batch_size):
global index_in_epoch, x_train, perm_array
if index_in_epoch > x_train.shape[0]:
start = 0 # start next epoch
index_in_epoch = 0#batch_size
start = index_in_epoch
index_in_epoch += batch_size
end = index_in_epoch
return x_train[perm_array[start:end]], y_train[perm_array[start:end]]
n_steps = seq_len -1
n_inputs = x_train.shape[2]
n_neurons = 100
n_outputs = y_train.shape[-1]
n_layers = 2
learning_rate = 0.001
batch_size =10
n_epochs = 100
train_set_size = x_train.shape[0]
test_set_size = x_test.shape[0]
tf.reset_default_graph()
X = tf.placeholder(tf.float32, [None, n_steps, n_inputs])
y = tf.placeholder(tf.float32, [None,n_outputs])
layers = [tf.contrib.rnn.LSTMCell(num_units=n_neurons,
activation=tf.nn.leaky_relu, use_peepholes = True)
for layer in range(n_layers)]
multi_layer_cell = tf.contrib.rnn.MultiRNNCell(layers)
rnn_outputs, states = tf.nn.dynamic_rnn(multi_layer_cell, X, dtype=tf.float32)
stacked_rnn_outputs = tf.reshape(rnn_outputs, [-1, n_neurons])
stacked_outputs = tf.layers.dense(stacked_rnn_outputs, n_outputs)
outputs = tf.reshape(stacked_outputs, [-1, n_steps, n_outputs])
outputs = outputs[:,n_steps-1,:] # keep only last output of sequence
loss = tf.reduce_mean(tf.squared_difference(outputs, y)) # loss function = mean squared error
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
training_op = optimizer.minimize(loss)
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for iteration in range(int(n_epochs*train_set_size/batch_size)):
x_batch, y_batch = get_next_batch(batch_size) # fetch the next training batch
sess.run(training_op, feed_dict={X: x_batch, y: y_batch})
if iteration % int(1*train_set_size/batch_size) == 0:
mse_train = loss.eval(feed_dict={X: x_train, y: y_train})
mse_valid = loss.eval(feed_dict={X: x_valid, y: y_valid})
mse_test = loss.eval(feed_dict={X: x_test, y: y_test})
print('%.2f epochs: MSE train/valid/test = %.3f/%.3f/%.3f'%(
iteration*batch_size/train_set_size, mse_train, mse_valid,mse_test))
try:
save_path = saver.save(sess, "modelfile\\model"+str(iteration)+".ckpt")
except Exception as e:
print(e)
if not os.path.exists("modelfile\\"):
os.makedirs("modelfile\\")
save_path = saver.save(sess, "modelfile\\model"+str(iteration)+".ckpt")
The following is my sample of what I am trying to execute:
Same data Please click and see
I am willing to add the Projector of the Tensorboard to my code. But I could not understand how I can make it. I want to visualize the different inputs I am giving for my training. I am supplying the following columns and trying to predict the ohlc values.
'o', 'h', 'l', 'c', 'rel1', 'rel2', 'rel3', 'rel4', 'rel5', 'rel6', 'rel7', 'rel8'
I want to visualize the above columns in the projector to know how they are relating with each other to give me the output.
Please let me know what I can do to get what I am willing to.
EDITED:
I have tried something as follows but cannot see the projector tab:
tf.reset_default_graph()
X = tf.placeholder(tf.float32, [None, n_steps, n_inputs])
y = tf.placeholder(tf.float32, [None,n_outputs])
symbols = tf.placeholder(tf.int32, [None, 1], name='stock_labels')
embed_matrix = tf.Variable(
tf.random_uniform([1, n_inputs],0.0, 1.0),
name="embed_matrix"
)
stacked_symbols = tf.tile(symbols, [batch_size,n_steps], name='stacked_stock_labels')
stacked_embeds = tf.nn.embedding_lookup(embed_matrix, stacked_symbols)
# stacked_embeds = tf.nn.embedding_lookup(embed_matrix)
# After concat, inputs.shape = (batch_size, num_steps, input_size + embed_size)
inputs_with_embed = tf.concat([X, stacked_embeds], axis=2, name="inputs_with_embed")
embed_matrix_summ = tf.summary.histogram("embed_matrix", embed_matrix)
And edited the following lines in the session code:
merged_sum = tf.summary.merge_all()
global_step = 0
# Set up the logs folder
writer = tf.summary.FileWriter('logs')
writer.add_graph(sess.graph)
projector_config = projector.ProjectorConfig()
# You can add multiple embeddings. Here we add only one.
added_embed = projector_config.embeddings.add()
added_embed.tensor_name = embed_matrix.name
# Link this tensor to its metadata file (e.g. labels).
shutil.copyfile("logs\\metadata.tsv",
"logs\\metadata1.tsv")
added_embed.metadata_path = "metadata.tsv"
# The next line writes a projector_config.pbtxt in the LOG_DIR. TensorBoard will
# read this file during startup.
projector.visualize_embeddings(writer, projector_config)
sess.run(tf.global_variables_initializer())
if iteration % int(1*train_set_size/batch_size) == 0:
global_step += 1
mse_train = loss.eval(feed_dict={X: x_train, y: y_train})
mse_valid = loss.eval(feed_dict={X: x_valid, y: y_valid})
mse_test = loss.eval(feed_dict={X: x_test, y: y_test})
_,train_merge = sess.run([outputs,merged_sum], feed_dict={X: x_train, y: y_train})
writer.add_summary(train_merge, global_step=global_step)
Here is teh metadata.tsv file
Please let me know what I missed.

Related

Cost function in classification gives NaN as a result

I am quite new to both python and machine learning and I am trying to create an ANN for a binary classification model in order to determine whether something is faulty or not. When I run the code down below, the cost function seem to come up as nan. Unfortunately, in order to come up with an accuracy percentage, I need the cost to be an actual number. Is there an obvious error in my code or is there another way to circumvent this problem by writing it differently? Any help will be much appreciated. Thanks.
import csv
import tensorflow as tf
import numpy as np
import pandas as pd
import urllib.request as request
import matplotlib.pyplot as plt
train_data = pd.read_csv("C:/Python35/train_data.csv", sep=',', header = None)
test_data = pd.read_csv("C:/Python35/test_data.csv", sep=',', header = None)
X_train = np.asarray(train_data)
X_test = np.asarray(test_data)
train_label = pd.read_csv("C:/Python35/train_label.csv", sep=',', header = None)
test_label = pd.read_csv("C:/Python35/test_label.csv", sep=',', header = None)
y_train = np.asarray(train_label)
y_test = np.asarray(test_label)
labels_train = (np.arange(2) == y_train[:,None]).astype(np.float32)
labels_test = (np.arange(2) == y_test[:,None]).astype(np.float32)
inputs = tf.placeholder(tf.float32, shape=(None, X_train.shape[1]), name='inputs')
label = tf.placeholder(tf.float32, shape=(None, 2), name='labels')
hid1_size = 128
w1 = tf.Variable(tf.random_normal([hid1_size, X_train.shape[1]], stddev=0.01), name='w1')
b1 = tf.Variable(tf.constant(0.1, shape=(hid1_size, 1)), name='b1')
y1 = tf.nn.dropout(tf.nn.relu(tf.add(tf.matmul(w1, tf.transpose(inputs)), b1)), keep_prob=0.5)
hid2_size = 256
w2 = tf.Variable(tf.random_normal([hid2_size, hid1_size], stddev=0.01), name='w2')
b2 = tf.Variable(tf.constant(0.1, shape=(hid2_size, 1)), name='b2')
y2 = tf.nn.dropout(tf.nn.relu(tf.add(tf.matmul(w2, y1), b2)), keep_prob=0.5)
wo = tf.Variable(tf.random_normal([2, hid2_size], stddev=0.01), name='wo')
bo = tf.Variable(tf.random_normal([2, 1]), name='bo')
yo = tf.transpose(tf.add(tf.matmul(wo, y2), bo))
lr = tf.placeholder(tf.float32, shape=(), name='learning_rate')
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=yo, labels=label))
optimizer = tf.train.GradientDescentOptimizer(lr).minimize(loss)
pred = tf.nn.softmax(yo)
pred_label = tf.argmax(pred, 1)
correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(label, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
init = tf.global_variables_initializer()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.InteractiveSession(config=config)
sess.run(init)
for learning_rate in [0.05, 0.01]:
for epoch in range(50):
avg_cost = 0.0
for i in range(X_train.shape[0]):
_, c = sess.run([optimizer, loss], feed_dict={lr:learning_rate,
inputs: X_train[i, None],
label: labels_train[i, None].reshape(-1,2)})
avg_cost += c
avg_cost /= X_train.shape[0]
if epoch % 10 == 0:
print("Epoch: {:3d} Train Cost: {:.4f}".format(epoch, avg_cost))
acc_train = accuracy.eval(feed_dict={inputs: X_train, label: labels_train.reshape(-1,2)})
print("Train accuracy: {:3.2f}%".format(acc_train*100.0))
acc_test = accuracy.eval(feed_dict={inputs: X_test, label: labels_test.reshape(-1,2)})
print("Test accuracy: {:3.2f}%".format(acc_test*100.0))

Implement inference bayesian network using session tensorflow

I am a new with machine learning. I have a final project about prediction using two algorithms, Artificial Neural Network and Bayesian Neural Network. I want to compare the prediction result between ANN and BNN. I have finished the ANN program, but I have a problem with the BNN. I try a tutorial from this link: bayesian neural network tutorial. This is my ANN sample code to train and evaluate the model.
keep_prob = tf.placeholder("float", name="keep_prob")
x = tf.placeholder(tf.float32, [None, n_input], name="x")
y = tf.placeholder(tf.float32, name="y")
training_epochs = 5000
display_step = 1000
batch_size = 5
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=predictions, labels=y), name="cost_function")
optimizer = tf.train.AdamOptimizer(learning_rate=0.0001, name="Adam").minimize(cost)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for epoch in tqdm(range(training_epochs)):
avg_cost = 0.0
total_batch = int(len(x_train) / batch_size)
x_batches = np.array_split(x_train, total_batch)
y_batches = np.array_split(y_train, total_batch)
for i in range(total_batch):
batch_x, batch_y = x_batches[i], y_batches[i]
_, c = sess.run([optimizer, cost], feed_dict={x: batch_x, y: batch_y, keep_prob: 0.8})
avg_cost += c / total_batch
if epoch % display_step == 0:
print("Epoch:", '%04d' % (epoch + 1), "cost=", "{:.9f}".format(avg_cost))
print("Optimization Finished!")
correct_prediction = tf.equal(tf.argmax(predictions, 1), tf.argmax(y, 1), name="corr_pred")
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"), name="accuracy")
# print('Accuracy: ', sess.run(accuracy, feed_dict={x: x_test, y: y_test}))
print("Accuracy:", accuracy.eval({x: x_test, y: y_test, keep_prob: 1.0}))
and this is my BNN code:
# Importing required libraries
from math import floor
import edward as ed
import numpy as np
import pandas as pd
import tensorflow as tf
from edward.models import Normal, NormalWithSoftplusScale
from fancyimpute import KNN
from sklearn import preprocessing
# Read data
features_dummies_nan = pd.read_csv('csv/features_dummies_with_label.csv', sep=',')
# Function: impute missing value by KNN
def impute_missing_values_by_KNN():
home_data = features_dummies_nan[[col for col in features_dummies_nan.columns if 'hp' in col]]
away_data = features_dummies_nan[[col for col in features_dummies_nan.columns if 'ap' in col]]
label_data = features_dummies_nan[[col for col in features_dummies_nan.columns if 'label' in col]]
home_filled = pd.DataFrame(KNN(3).complete(home_data))
home_filled.columns = home_data.columns
home_filled.index = home_data.index
away_filled = pd.DataFrame(KNN(3).complete(away_data))
away_filled.columns = away_data.columns
away_filled.index = away_data.index
data_frame_out = pd.concat([home_filled, away_filled, label_data], axis=1)
return data_frame_out
features_dummies = impute_missing_values_by_KNN()
target = features_dummies.loc[:, 'label'].values
data = features_dummies.drop('label', axis=1)
data = data.values
perm = np.random.permutation(len(features_dummies))
data = data[perm]
target = target[perm]
train_size = 0.9
train_cnt = floor(features_dummies.shape[0] * train_size)
x_train = data[0:train_cnt] # data_train
y_train = target[0:train_cnt] # target_train
x_test = data[train_cnt:] # data_test
y_test = target[train_cnt:] # target_test
keep_prob = tf.placeholder("float", name="keep_prob")
n_input = data.shape[1] # D
n_classes = 3
n_hidden_1 = 100 # H0
n_hidden_2 = 100 # H1
n_hidden_3 = 100 # H2
def neural_network(X, W_0, W_1, W_2, W_out, b_0, b_1, b_2, b_out):
hidden1 = tf.nn.relu(tf.matmul(X, W_0) + b_0)
hidden2 = tf.nn.relu(tf.matmul(hidden1, W_1) + b_1)
hidden3 = tf.nn.relu(tf.matmul(hidden2, W_2) + b_2)
output = tf.matmul(hidden3, W_out) + b_out
return tf.reshape(output, [-1])
scaler = preprocessing.StandardScaler().fit(x_train)
data_train_scaled = scaler.transform(x_train)
data_test_scaled = scaler.transform(x_test)
W_0 = Normal(loc=tf.zeros([n_input, n_hidden_1]), scale=5.0 * tf.ones([n_input, n_hidden_1]))
W_1 = Normal(loc=tf.zeros([n_hidden_1, n_hidden_2]), scale=5.0 * tf.ones([n_hidden_1, n_hidden_2]))
W_2 = Normal(loc=tf.zeros([n_hidden_2, n_hidden_3]), scale=5.0 * tf.ones([n_hidden_2, n_hidden_3]))
W_out = Normal(loc=tf.zeros([n_hidden_3, 1]), scale=5.0 * tf.ones([n_hidden_3, 1]))
b_0 = Normal(loc=tf.zeros(n_hidden_1), scale=5.0 * tf.ones(n_hidden_1))
b_1 = Normal(loc=tf.zeros(n_hidden_2), scale=5.0 * tf.ones(n_hidden_2))
b_2 = Normal(loc=tf.zeros(n_hidden_3), scale=5.0 * tf.ones(n_hidden_3))
b_out = Normal(loc=tf.zeros(1), scale=5.0 * tf.ones(1))
qW_0 = NormalWithSoftplusScale(loc=tf.Variable(tf.random_normal([n_input, n_hidden_1])),
scale=tf.Variable(tf.random_normal([n_input, n_hidden_1])))
qW_1 = NormalWithSoftplusScale(loc=tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),
scale=tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])))
qW_2 = NormalWithSoftplusScale(loc=tf.Variable(tf.random_normal([n_hidden_2, n_hidden_3])),
scale=tf.Variable(tf.random_normal([n_hidden_2, n_hidden_3])))
qW_out = NormalWithSoftplusScale(loc=tf.Variable(tf.random_normal([n_hidden_3, 1])),
scale=tf.Variable(tf.random_normal([n_hidden_3, 1])))
qb_0 = NormalWithSoftplusScale(loc=tf.Variable(tf.random_normal([n_hidden_1])),
scale=tf.Variable(tf.random_normal([n_hidden_1])))
qb_1 = NormalWithSoftplusScale(loc=tf.Variable(tf.random_normal([n_hidden_2])),
scale=tf.Variable(tf.random_normal([n_hidden_2])))
qb_2 = NormalWithSoftplusScale(loc=tf.Variable(tf.random_normal([n_hidden_3])),
scale=tf.Variable(tf.random_normal([n_hidden_3])))
qb_out = NormalWithSoftplusScale(loc=tf.Variable(tf.random_normal([1])),
scale=tf.Variable(tf.random_normal([1])))
sigma_y = 1.0
x = tf.placeholder(tf.float32, [None, n_input])
y = Normal(loc=neural_network(x, W_0, W_1, W_2, W_out, b_0, b_1, b_2, b_out), scale=sigma_y)
inference = ed.KLqp({W_0: qW_0, b_0: qb_0,
W_1: qW_1, b_1: qb_1,
W_2: qW_2, b_2: qb_2,
W_out: qW_out, b_out: qb_out}, data={x: x_train, y: y_train})
global_step = tf.Variable(0, trainable=False)
starter_learning_rate = 0.05
learning_rate = tf.train.exponential_decay(starter_learning_rate, global_step,
1000, 0.3, staircase=True)
optimizer = tf.train.AdamOptimizer(learning_rate)
inference.run(n_iter=5000, optimizer=optimizer, global_step=global_step)
But, I want to compare two algorithms result. So, I want to make some variables will be same between ANN and BNN, for example sum of epoch. Then I want to adapt my ANN code above for this BNN code section.
sigma_y = 1.0
x = tf.placeholder(tf.float32, [None, n_input])
y = Normal(loc=neural_network(x, W_0, W_1, W_2, W_out, b_0, b_1, b_2, b_out), scale=sigma_y)
inference = ed.KLqp({W_0: qW_0, b_0: qb_0,
W_1: qW_1, b_1: qb_1,
W_2: qW_2, b_2: qb_2,
W_out: qW_out, b_out: qb_out}, data={x: x_train, y: y_train})
global_step = tf.Variable(0, trainable=False)
starter_learning_rate = 0.05
learning_rate = tf.train.exponential_decay(starter_learning_rate, global_step,
1000, 0.3, staircase=True)
optimizer = tf.train.AdamOptimizer(learning_rate)
inference.run(n_iter=5000, optimizer=optimizer, global_step=global_step)
I have several things that I don't understand. There is y = tf.placeholder(tf.float32, name="y") in ANN but in BNN is y = Normal(loc=neural_network(x, W_0, W_1, W_2, W_out, b_0, b_1, b_2, b_out), scale=sigma_y). Then, there is scale in BNN but not in ANN. So, can I adapt my ANN train and test sample code to BNN sample code above? I want to make inference on BNN run like in sess.run() on ANN so I can count the BNN prediction accuracy result. Can I do that?

Seq2Seq in Python using tensorflow and tensorlayer

I am trying to build a chatbot in python using tensorflow and tensorlayer. My code is the following:
from sklearn.feature_extraction.text import CountVectorizer
from nltk.tokenize import word_tokenize
import os
import time
import re
import tensorflow as tf
import tensorlayer as tl
import cPickle as pickle
FILE_DIR = os.path.dirname(os.path.realpath(__file__))
PAD_TOKEN = '<PAD>'
START_TOKEN = '<START>'
END_TOKEN = '<END>'
UNK_TOKEN = '<UNK>'
PAD_ID = 0
START_ID = 1
END_ID = 2
UNK_ID = 3
STARTING_VOCAB = {PAD_TOKEN: PAD_ID, START_TOKEN: START_ID, END_TOKEN: END_ID, UNK_TOKEN: UNK_ID}
_DIGIT_RE = re.compile(br"\d")
class Chatbot(object):
def __init__(self, embedding_dim, n_layers=3):
self.embedding_dim = embedding_dim
self.n_layers = n_layers
self.w2idx = STARTING_VOCAB
self.idx2w = {}
self.encode_seqs = None
self.decode_seqs = None
self.net = None
self.net_rnn = None
self.y = None
#staticmethod
def load():
with open(os.path.join(location, 'object.pkl'), 'rb') as pickle_file:
obj = pickle.load(pickle_file)
obj.encode_seqs = tf.placeholder(dtype=tf.int64, shape=[1, None], name="encode_seqs")
obj.decode_seqs = tf.placeholder(dtype=tf.int64, shape=[1, None], name="decode_seqs")
obj.net, obj.net_rnn = Chatbot.model(obj.encode_seqs, obj.decode_seqs, obj.idx2w, obj.embedding_dim, obj.n_layers, is_train=False, reuse=True)
obj.y = tf.nn.softmax(obj.net.outputs)
new_saver = tf.train.import_meta_graph(os.path.join(location, 'my-model.meta'))
new_saver.restore(sess, tf.train.latest_checkpoint(location))
return obj
#staticmethod
def model(encode_seqs, decode_seqs, idx2w, embedding_dim, n_layers, is_train=True, reuse=False):
with tf.variable_scope("model", reuse=reuse):
# for chatbot, you can use the same embedding layer,
# for translation, you may want to use 2 seperated embedding layers
with tf.variable_scope("embedding") as vs:
net_encode = tl.layers.EmbeddingInputlayer(inputs=encode_seqs,
vocabulary_size=len(idx2w),
embedding_size=embedding_dim,
name='seq_embedding')
vs.reuse_variables()
tl.layers.set_name_reuse(True)
net_decode = tl.layers.EmbeddingInputlayer(inputs=decode_seqs,
vocabulary_size=len(idx2w),
embedding_size=embedding_dim,
name='seq_embedding')
net_rnn = tl.layers.Seq2Seq(net_encode,
net_decode,
cell_fn=tf.contrib.rnn.BasicLSTMCell,
n_hidden=embedding_dim,
initializer=tf.random_uniform_initializer(-0.1, 0.1),
encode_sequence_length=tl.layers.retrieve_seq_length_op2(encode_seqs),
decode_sequence_length=tl.layers.retrieve_seq_length_op2(decode_seqs),
initial_state_encode=None,
dropout=(0.5 if is_train else None),
n_layer=n_layers,
return_seq_2d=True, name='seq2seq')
net_out = tl.layers.DenseLayer(net_rnn, n_units=len(idx2w), act=tf.identity, name='output')
return net_out, net_rnn
def train(self, X_train, y_train, sess, batch_size, n_epochs):
n_step = int(len(X_train) / batch_size)
# Create vocabulary
X_train = [re.sub(_DIGIT_RE, UNK_TOKEN, x.decode('utf-8')) for x in X_train]
y_train = [re.sub(_DIGIT_RE, UNK_TOKEN, x.decode('utf-8')) for x in y_train]
vectorizer = CountVectorizer(tokenizer=word_tokenize)
all_sentences = X_train + y_train
vectorizer.fit_transform(all_sentences)
for k, v in vectorizer.vocabulary_.iteritems():
vectorizer.vocabulary_[k] = v + len(self.w2idx)
self.w2idx.update(vectorizer.vocabulary_)
self.idx2w = dict((v, k) for k, v in self.w2idx.iteritems())
# Transform data from sentences to sequences of ids
for i in range(len(X_train)):
X_train_id_seq, y_train_id_seq = [], []
for w in word_tokenize(X_train[i]):
if w.lower() in self.w2idx:
X_train_id_seq.append(self.w2idx[w.lower()])
else:
X_train_id_seq.append(self.w2idx[UNK_TOKEN])
X_train[i] = X_train_id_seq + [PAD_ID]
for w in word_tokenize(y_train[i]):
if w.lower() in self.w2idx:
y_train_id_seq.append(self.w2idx[w.lower()])
else:
y_train_id_seq.append(self.w2idx[UNK_TOKEN])
y_train[i] = y_train_id_seq + [PAD_ID]
training_encode_seqs = tf.placeholder(dtype=tf.int64, shape=[batch_size, None], name="encode_seqs")
training_decode_seqs = tf.placeholder(dtype=tf.int64, shape=[batch_size, None], name="decode_seqs")
training_target_seqs = tf.placeholder(dtype=tf.int64, shape=[batch_size, None], name="target_seqs")
training_target_mask = tf.placeholder(dtype=tf.int64, shape=[batch_size, None], name="target_mask")
training_net_out, _ = Chatbot.model(training_encode_seqs, training_decode_seqs, self.idx2w, self.embedding_dim, self.n_layers, is_train=True, reuse=False)
# model for inferencing
self.encode_seqs = tf.placeholder(dtype=tf.int64, shape=[1, None], name="encode_seqs")
self.decode_seqs = tf.placeholder(dtype=tf.int64, shape=[1, None], name="decode_seqs")
self.net, self.net_rnn = Chatbot.model(self.encode_seqs, self.decode_seqs, self.idx2w, self.embedding_dim, self.n_layers, is_train=False, reuse=True)
self.y = tf.nn.softmax(self.net.outputs)
loss = tl.cost.cross_entropy_seq_with_mask(logits=training_net_out.outputs, target_seqs=training_target_seqs,
input_mask=training_target_mask, return_details=False, name='cost')
lr = 0.0001
train_op = tf.train.AdamOptimizer(learning_rate=lr).minimize(loss)
tl.layers.initialize_global_variables(sess)
for epoch in range(n_epochs):
epoch_time = time.time()
# shuffle training data
from sklearn.utils import shuffle
X_train, y_train = shuffle(X_train, y_train, random_state=0)
# train an epoch
total_err, n_iter = 0, 0
for X, Y in tl.iterate.minibatches(inputs=X_train, targets=y_train, batch_size=batch_size, shuffle=False):
step_time = time.time()
X = tl.prepro.pad_sequences(X)
_target_seqs = tl.prepro.sequences_add_end_id(Y, end_id=END_ID)
_target_seqs = tl.prepro.pad_sequences(_target_seqs)
_decode_seqs = tl.prepro.sequences_add_start_id(Y, start_id=START_ID, remove_last=False)
_decode_seqs = tl.prepro.pad_sequences(_decode_seqs)
_target_mask = tl.prepro.sequences_get_mask(_target_seqs)
_, err = sess.run([train_op, loss],
{training_encode_seqs: X,
training_decode_seqs: _decode_seqs,
training_target_seqs: _target_seqs,
training_target_mask: _target_mask})
print("Epoch[%d/%d] step:[%d/%d] loss:%f took:%.5fs" % (
epoch, n_epochs, n_iter, n_step, err, time.time() - step_time))
total_err += err;
n_iter += 1
print("Epoch[%d/%d] averaged loss:%f took:%.5fs" % (epoch, n_epochs, total_err / n_iter,
time.time() - epoch_time))
def save(self):
if not os.path.exists(location):
os.makedirs(location)
saver = tf.train.Saver()
saver.save(sess, os.path.join(location, 'my-model'))
tf.train.write_graph(sess.graph, location, 'my-graph.pbtxt')
self.net = None
self.net_rnn = None
self.y = None
self.encode_seqs = None
self.decode_seqs = None
with open(os.path.join(location, 'object.pkl'), 'wb') as pickle_file:
pickle.dump(self, pickle_file)
I can do training perfectly fine, I pass a list of sentences as X_train and another as y_train. What I need help with is saving the model and then reloading it later for training or testing. I tried just using pickle but it gives an error. How can I save and load seq2seq models in python using tensorflow and tensorlayer?
pickle object is not the best way to save your model.
After you finished training use
saver = tf.train.Saver()
saver.save(sess, model_name)
and the load it
saver = tf.train.Saver()
sess = tf.Session()
saver.restore(sess = sess, save_path='sentiment_analysis_tsm')
in order to load it, you'll have to previously build an equivalent model to the one you trained with.
To save the graph object, try using tf.train.write_graph

Predicting the tensorflow model

I am new to Machine Learning. I am studying the Iris dataset. And used Sepal length, Sepal width, Petal length to predict Petal Width using neural network. Thus making 3 input nodes as A1 with bias b1, 10 hidden node as A2 with bias b2 and 1 output node.
Further, x_val_train, x_val_test,y_val_train,y_val_test variables are used for training and testing
The main function is as below.
x_val = np.array([x[0:3] for x in iris.data])
y_val = np.array([x[3] for x in iris.data])
hidden_layer_size = 10
#Generate a 1D array of random numbers range round(len(x_val)*0.8
train_indices = np.random.choice(len(x_val), round(len(x_val)*0.8), replace = False)
#Create a set which does not contain the numbers in train_indices and turn it into array
test_indices = np.array(list(set(range(len(x_val))) - set(train_indices)))
#print("Train Indexes\n",train_indices,test_indices)
x_val_train = x_val[train_indices]
x_val_test = x_val[test_indices]
y_val_train = y_val[train_indices]
y_val_test = y_val[test_indices]
x_data = tf.placeholder(shape=[None, 3], dtype = tf.float32)
y_target = tf.placeholder(shape = [None, 1], dtype = tf.float32) #Figure out usage of None
#Create Layers for NN
A1 = tf.Variable(tf.random_normal(shape = [3,hidden_layer_size])) #Input -> Hidden
b1 = tf.Variable(tf.random_normal(shape = [hidden_layer_size])) #bias in Input for hidden
A2 = tf.Variable(tf.random_normal(shape = [hidden_layer_size,1])) #Hidden -> Output
b2 = tf.Variable(tf.random_normal(shape=[1])) #Hidden Layer Bias
#Generation of Model
hidden_output = tf.nn.relu(tf.add(tf.matmul(x_data,A1),b1))
final_output = tf.nn.relu(tf.add(tf.matmul(hidden_output,A2),b2))
cost = tf.reduce_mean(tf.square(y_target - final_output))
learning_rate = 0.01
model = tf.train.AdamOptimizer(learning_rate).minimize(cost)
init = tf.global_variables_initializer()
sess.run(init)
#Training Loop
loss_vec = []
test_loss = []
epoch = 500
for i in range(epoch):
#generates len(x_val_train) random numbers
rand_index = np.random.choice(len(x_val_train), size = batch_size)
#Get len(x_val_train) data with its 3 input notes or
rand_x = x_val_train[rand_index]
#print(rand_index,rand_x)
rand_y = np.transpose([y_val_train[rand_index]])
sess.run(model, feed_dict = {x_data: rand_x, y_target: rand_y})
temp_loss = sess.run(cost, feed_dict = {x_data: rand_x, y_target : rand_y})
loss_vec.append(np.sqrt(temp_loss))
test_temp_loss = sess.run(cost, feed_dict = {x_data : x_val_test, y_target : np.transpose([y_val_test])})
test_loss.append(np.sqrt(test_temp_loss))
if (i+1)%50!=0:
print('Generation: ' + str(i+1) + '.loss = ' + str(temp_loss))
predict = tf.argmax(tf.add(tf.matmul(hidden_output,A2),b2), 1)
test = np.matrix('2 3 4')
pred = predict.eval(session = sess, feed_dict = {x_data : test})
print("pred: ", pred)
plt.plot(loss_vec, 'k-', label='Train Loss')
plt.plot(test_loss, 'r--', label='Test Loss')
plt.show()
Also, In this code,
hidden_output = tf.nn.relu(tf.add(tf.matmul(x_data,A1),b1))`
I have successfully trained my model after normalizing my data. But i need to predict the output by user input data.
Here,
test = np.matrix('2 3 4')
pred = predict.eval(session = sess, feed_dict = {x_data : test})
print("pred: ", pred)
I have written this code to predict the result, but pred always returns 0. I also tried for more than 100 samples, It still returns 0. Can you please tell me where i am getting wrong?
Summary
Let's take a look at
predict = tf.argmax(tf.add(tf.matmul(hidden_output,A2),b2), 1)
This is (almost) equal to
predict = tf.argmax(final_output)
The argmax is the main issue. If final_output was a 1-hot encoding then argmax would make sense, but final_output is just an array of scalars.
Full Working Code
Here is the full working code given that you have
import numpy as np
import tensorflow as tf
import os
import urllib
# Data sets
IRIS_TRAINING = "iris_training.csv"
IRIS_TRAINING_URL = "http://download.tensorflow.org/data/iris_training.csv"
IRIS_TEST = "iris_test.csv"
IRIS_TEST_URL = "http://download.tensorflow.org/data/iris_test.csv"
# If the training and test sets aren't stored locally, download them.
if not os.path.exists(IRIS_TRAINING):
raw = urllib.urlopen(IRIS_TRAINING_URL).read()
with open(IRIS_TRAINING, "w") as f:
f.write(raw)
if not os.path.exists(IRIS_TEST):
raw = urllib.urlopen(IRIS_TEST_URL).read()
with open(IRIS_TEST, "w") as f:
f.write(raw)
training_set = tf.contrib.learn.datasets.base.load_csv_with_header( filename=IRIS_TRAINING, target_dtype=np.int, features_dtype=np.float32)
test_set = tf.contrib.learn.datasets.base.load_csv_with_header( filename=IRIS_TEST, target_dtype=np.int, features_dtype=np.float32)
x_val_train = training_set.data[:,:3]
x_val_test = test_set.data[:,:3]
y_val_train = training_set.data[:,3].reshape([-1,1])
y_val_test = test_set.data[:,3].reshape([-1,1])
x_data = tf.placeholder(shape=[None, 3], dtype = tf.float32)
y_target = tf.placeholder(shape = [None, 1], dtype = tf.float32) #Figure out usage of None
#Create Layers for NN
hidden_layer_size = 20
A1 = tf.Variable(tf.random_normal(shape = [3,hidden_layer_size])) #Input -> Hidden
b1 = tf.Variable(tf.random_normal(shape = [hidden_layer_size])) #bias in Input for hidden
A2 = tf.Variable(tf.random_normal(shape = [hidden_layer_size,1])) #Hidden -> Output
b2 = tf.Variable(tf.random_normal(shape = [1])) #Hidden Layer Bias
#Generation of model
hidden_output = tf.nn.relu(tf.add(tf.matmul(x_data,A1),b1))
final_output = tf.add(tf.matmul(hidden_output,A2),b2)
loss = tf.reduce_mean(tf.square(y_target - final_output))
learning_rate = 0.01
train = tf.train.AdamOptimizer(learning_rate).minimize(loss)
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
#Training Loop
loss_vec = []
test_loss = []
epoch = 2000
batch_size = 100
def oneTrainingSession(epoch,loss_vec,test_loss,batch_size) :
rand_index = np.random.choice(len(x_val_train), size = batch_size)
rand_x = x_val_train #[rand_index,:]
rand_y = y_val_train #[rand_index,:]
temp_loss,_ = sess.run([loss,train], feed_dict = {x_data: rand_x, y_target : rand_y})
loss_vec.append(np.sqrt(temp_loss))
test_temp_loss = sess.run(loss, feed_dict = {x_data : x_val_test, y_target : y_val_test})
test_loss.append(np.sqrt(test_temp_loss))
if (i+1)%500 == 0:
print('Generation: ' + str(i+1) + '.loss = ' + str(temp_loss))
for i in range(epoch):
oneTrainingSession(epoch,loss_vec,test_loss,batch_size)
test = x_val_test[:3,:]
print "The test values are"
print test
print ""
pred = sess.run(final_output, feed_dict = {x_data : test})
print("pred: ", pred)
Output
Generation: 500.loss = 0.12768
Generation: 1000.loss = 0.0389756
Generation: 1500.loss = 0.0370268
Generation: 2000.loss = 0.0361797
The test values are
[[ 5.9000001 3. 4.19999981]
[ 6.9000001 3.0999999 5.4000001 ]
[ 5.0999999 3.29999995 1.70000005]]
('pred: ', array([[ 1.45187187],
[ 1.92516518],
[ 0.36887735]], dtype=float32))

Tensorflow different result using estimators and same generic model

I am working with the dbpedia training set (small_train, small_test). Using estimators with bag_of_words_model gives an accuracy of 0.70
Tensorflow version 0.9
Why is the following code behaving differently than the estimators ?
In estimators graph_action's _run_with_monitors does the same thing by running the train_op for given no of steps
outputs = session.run(tensors, feed_dict=feed_dict)
While the same model implemented as below gives accuracy of 0.67
with tf.Graph().as_default():
sess = tf.Session()
tf.set_random_seed(1618)
keys_placeholder = tf.placeholder(tf.int64, shape=(None,),name='keys')
keys = tf.identity(keys_placeholder)
labels_placeholder = tf.placeholder(tf.int64, shape=(None,),name='labels')
input_placeholder = tf.placeholder(tf.int64, shape=(None, 10),name='input')
inputs = {'key': keys_placeholder.name, 'inputs_': input_placeholder.name}
tf.add_to_collection('inputs', json.dumps(inputs))
target = tf.one_hot(labels_placeholder, 15, 1, 0)
word_vectors = learn.ops.categorical_variable(input_placeholder, n_classes=n_words,
embedding_size=EMBEDDING_SIZE, name='words')
features = tf.reduce_max(word_vectors, reduction_indices=1)
prediction, loss = learn.models.logistic_regression(features, target)
prediction = tf.argmax(prediction, 1)
train_op = tf.contrib.layers.optimize_loss(
loss, tf.contrib.framework.get_global_step(),
optimizer='Adam', learning_rate=0.01)
outputs = {'key': keys.name, 'prediction': prediction.name}
tf.add_to_collection('outputs', json.dumps(outputs))
init = tf.initialize_all_variables()
sess.run(init)
for step in xrange(100):
fill_feed_dict = {
keys_placeholder: np.array(range(len(x_train))),
labels_placeholder: y_train,
input_placeholder: x_train }
start_time = time.time()
_, loss_value = sess.run([train_op, loss],
feed_dict=fill_feed_dict)
duration = time.time() - start_time
print("Done Training")
y_predicted = sess.run(prediction, feed_dict={input_placeholder: x_test})
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy: {0:f}'.format(score))
I am reading the data as follows:-
global n_words
training_set = pandas.read_csv('dbpedia_data/dbpedia_csv/train_small.csv', header=None)
testing_set = pandas.read_csv('dbpedia_data/dbpedia_csv/test_small.csv', header=None)
x_train = training_set[2]
y_train = training_set[0]
x_test = testing_set[2]
y_test = testing_set[0]
# Process vocabulary
np.random.seed(1)
vocab_processor = learn.preprocessing.VocabularyProcessor(MAX_DOCUMENT_LENGTH)
x_train = np.array(list(vocab_processor.fit_transform(x_train)))
x_test = np.array(list(vocab_processor.transform(x_test)))
n_words = len(vocab_processor.vocabulary_)

Categories

Resources