import matplotlib.pyplot as plt
import tensorflow as tf
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
df = pd.read_csv("FuelConsumption.csv")
df = df[['ENGINE SIZE','CYLINDERS', 'Mcity', 'Mhwy', 'Mcmb', 'McmbMPG', 'CO2']]
features = np.asanyarray(df[['ENGINE SIZE','CYLINDERS', 'Mcity', 'Mhwy', 'Mcmb', 'McmbMPG']])
label = np.asanyarray(df[['CO2']])
mu = np.mean(features,axis=0)
sigma = np.std(features,axis=0)
feature_normalized = (features - mu)/sigma
n_training_samples = feature_normalized.shape[0]
n_dim = feature_normalized.shape[1]
feature_reshaped = np.reshape(features,[n_training_samples,n_dim])
label_reshaped = np.reshape(label,[n_training_samples,1])
train_X, test_X, train_Y, test_Y = train_test_split\
(feature_reshaped, label_reshaped,shuffle = True , test_size=0.25, random_state=42)
print("shape of training input = ", train_X.shape)
print("shape of training output = ", train_Y.shape)
numFeatures = train_X.shape[1]
print("Number of features = ", numFeatures)
numLabels = train_Y.shape[1]
print("Number of labels = ", numLabels)
learning_rate = 0.01
training_epochs = 1000
X = tf.placeholder(tf.float32,[None,numFeatures])
Y = tf.placeholder(tf.float32,[None,numLabels])
W = tf.Variable(tf.ones([numFeatures,numLabels]))
B = tf.Variable(tf.ones([1,numLabels]))
init = tf.global_variables_initializer()
Y_model = tf.add(tf.matmul(X, W, name="apply_weights"), B, name="add_bias")
cost = tf.reduce_mean(tf.square(Y_model - Y))
training_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
sess = tf.Session()
sess.run(init)
loss_values = []
train_data = []
for epoch in range(training_epochs):
_, loss_val, a_val, b_val = sess.run([training_step, cost, W, B],feed_dict={X: train_X,Y: train_Y})
loss_values.append(loss_val)
if epoch % 20 == 0:
print(epoch, loss_val, a_val, b_val)
train_data.append([a_val, b_val])
plt.plot(loss_values, '--')
plt.show()
I am trying to predict CO2 emission using multiple variables like Cyliners, Milage, engine size etc using linear regression. I used above code in tensorflow. When I try to run the model loss value, weight and biases are getting updated only for 20 iterations and after that they are infinity(Nan). The problem here is with the code or selection of cost function/Optimizer?
I have plotted loss values which looks like .
If only Engine size and cylinders are used as features result is good. Any other features(Milage in city, Milage in highway, Milage combined) results in the above mentioned problem. I am attching the scttered plot of Mcity, Mhwy. Is it a problem with data itself?Please take a look at scattered plot of milage in city and hwy.
Related
I have created an LSTM predictor model that works very good on the train (8 years) and test (2 years) sets, but I now need to predict beyond the dates in the entire dataset.
I would like the predictions to go beyond the data in the dataset (after the 10 years).
import pandas as pd
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import r2_score, mean_squared_error
import matplotlib.pyplot as plt
from rnn import RNN
import numpy as np
from torch import nn
import torch
from torch.autograd import variable
ss_X_dep = StandardScaler()
ss_y_dep = StandardScaler()
def rmse(y1, y2):
return np.sqrt(mean_squared_error(y1, y2))
data = pd.read_csv('data/W042 RAIS/W042-Pz-4-Rais-SEAAl62-70.csv')
Inputs = data.drop('Year', axis=1).drop('Depth', axis=1)
Outputs = data['Depth']
Inputs = Inputs.to_numpy()
Outputs = Outputs.to_numpy().reshape(-1, 1)
# First 08 years of data
X_train_dep = Inputs[:62]
y_train_dep = Outputs[:62]
# Last 02 years of data
X_test_dep = Inputs[62:]
print("X_train_dep shape", X_train_dep.shape)
print("y_train_dep shape", y_train_dep.shape)
print("X_test_dep shape", X_test_dep.shape)
X = np.concatenate([X_train_dep, X_test_dep], axis=0)
# Standardization (Normalisation)
X = ss_X_dep.fit_transform(X)
# First 08 years of data
X_train_dep_std = X[:62]
y_train_dep_std = ss_y_dep.fit_transform(y_train_dep)
# All 10 years of data
X_test_dep_std = X
X_train_dep_std = np.expand_dims(X_train_dep_std, axis=0)
y_train_dep_std = np.expand_dims(y_train_dep_std, axis=0)
X_test_dep_std = np.expand_dims(X_test_dep_std, axis=0)
# Transfer to Pytorch Variable
X_train_dep_std = variable(torch.from_numpy(X_train_dep_std).float())
y_train_dep_std = variable(torch.from_numpy(y_train_dep_std).float())
X_test_dep_std = variable(torch.from_numpy(X_test_dep_std).float())
# Define rnn model
model = RNN(input_size=5, hidden_size=40, num_layers=1, class_size=1, dropout=0.5, rnn_type='lstm')
# Define optimization function
optimizer = torch.optim.Adam(model.parameters(), lr=1e-4) # -4 optimize all rnn parameters
# Define loss function
loss_func = nn.MSELoss()
# Start training
for iter in range(2000+1): # 20000 Iterations
model.train()
prediction = model(X_train_dep_std)
#X_train_dep_std.append(prediction)
# prediction = model(X_train_dep_std)
loss = loss_func(prediction, y_train_dep_std)
optimizer.zero_grad() # clear gradients for this training step
loss.backward() # back propagation, compute gradients
optimizer.step()
if iter % 100 == 0:
print("iteration: %s, loss: %s" % (iter, loss.item()))
# Save model
save_filename = 'checkpoints/LSTM_DOUBLE_FC.pth'
torch.save(model, save_filename)
print('Saved as %s' % save_filename)
# Start evaluating model
model.eval()
y_pred_dep_ = model(X_test_dep_std).detach().numpy()
y_pred_dep = ss_y_dep.inverse_transform(y_pred_dep_[0, 0:])
print('The value of Root mean squared error (RMSE) of water table depth is :', rmse(Outputs[0:], y_pred_dep))
print('The value of mean squared error (MSE) of water table depth is :', mean_squared_error(Outputs[0:], y_pred_dep))
print('The value of R-squared (R2) of water table depth is :', r2_score(Outputs[0:], y_pred_dep))
f, ax1 = plt.subplots(1, 1, sharex=True, figsize=(15, 7))
ax1.plot(Outputs[0:], color="blue", linestyle="-", linewidth=2.5, label="Measurements")
ax1.plot(y_pred_dep, color="r", linestyle="--", linewidth=2.5, label="Proposed model")
plt.legend(loc='upper center')
plt.xticks(fontsize=10,fontweight='normal')
plt.yticks(fontsize=10,fontweight='normal')
plt.title('Predictions LSTM model W042-Pz-4-Rais-SEAAl 62-66', fontsize=15)
plt.xlabel('Mois d_apres le 2009-09', fontsize=15)
plt.ylabel('Variation de niveau statique NS (m)', fontsize=15)
plt.xlim(0, 85)
plt.savefig('./plots/lstm_doubl_aGood_Result.png', format='png')
plt.show()
Thanks for looking into this question! :)
I attempted to train an LSTM network to predict next 10-day stock prices of Google based on past 30-day stock prices. I trained the LSTM but the loss barely reduced even after 200 iterations. I suspected that the issue might be due to the feed_dict in tf Session. However, I have not identified any issue with that (perhaps due to my superficial knowledge). It seems that the optimizer refreshes every iterations in the tf Session.
Would appreciate if I could seek advice on what might have gone wrong in the code, if my understanding on the usage of Optimizer has been wrong.
Thanks for your help!!
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import sys
import csv
import random
import tensorflow as tf
from tensorflow.contrib import rnn
# Define data reader
def read_data(fname):
with open(fname) as f:
data = list(csv.reader(f))
d_mat = np.array(data)
d_trsp = np.transpose(d_mat)
date = np.transpose(d_trsp[0])
p_open = d_trsp[1]
vol = d_trsp[6]
chg = d_trsp[7]
chg = [float(i) for i in chg]
return vol, chg
vol, training_data = read_data('GOOGL.csv')
training_data = training_data[0:300]
print("Loading training data..")
#Split data for learning
ratio_train = 0.70
ratio_valid = 0.90-ratio_train
ratio_test = 0.10 #fixed at 10% of dataset
# Parameters
learning_rate = 0.005
training_iters = 100
display_step = 1
x_size = 30
y_size = 5
n_hidden = 256
# Variables
x = tf.placeholder("float", [265, x_size])
y = tf.placeholder("float", [265, y_size])
weights = {
'out': tf.Variable(tf.random_normal([n_hidden, y_size]))
}
biases = {
'out': tf.Variable(tf.random_normal([y_size]))
}
# Preprocess Data
def prod_data(data):
x = []
y = []
iter = len(data)-x_size-y_size
for i in range(0, iter):
x.append(data[i:i+x_size])
y.append(data[i+x_size+1: i+x_size+1+y_size])
return x, y
a,b = prod_data(training_data)
# Define RNN architecture
def RNN(x, weights, biases):
# Reshape x to [1, n_input]
x = tf.reshape(x, [-1, x_size])
x = tf.split(x, x_size, 1)
rnn_cell = rnn.MultiRNNCell([rnn.BasicLSTMCell(n_hidden), rnn.BasicLSTMCell(n_hidden)])
outputs, states = rnn.static_rnn(rnn_cell, x, dtype = tf.float32)
return tf.matmul(outputs[-1], weights['out'] + biases['out'])
pred = RNN(x, weights, biases)
# Loss and Optimizer
cost = tf.reduce_mean((pred-y)**2)
optimizer = tf.train.RMSPropOptimizer(learning_rate = learning_rate).minimize(cost)
# Initialization
init = tf.global_variables_initializer()
# Launch Tensor graph
with tf.Session() as sess:
sess.run(init)
step = 0
loss_total = 0
loss_coll = []
end_offset = len(training_data)-y_size-x_size-1
while step < training_iters:
_, loss, model_pred = sess.run([optimizer, cost, pred], \
feed_dict={x: a, y: b})
# Update total loss and accuracy
loss_total += loss
loss_coll.append(loss)
if (step+1) % display_step == 0:
print("Loss at step " + str(step) + " = " + str(loss))
loss_total = 0
step += 1
print("Optimization Finished!")
I am a beginner in TensorFlow. I implemented a tensorFlow MLP network to predict values between 0 and - 1. Input values are float values between 0 and 1 and weights are random float between 0 and 1. But the output always returns 0 or 1 where I expect to return float values between 0 and 1. The code is given below.
import tensorflow as tf
import numpy as np
from sklearn import datasets`enter code here`
from sklearn.model_selection import train_test_split
from scipy.io import loadmat
import matplotlib.pyplot as plt
from sklearn.preprocessing import MinMaxScaler
RANDOM_SEED = 42
tf.set_random_seed(RANDOM_SEED)
def init_weights(shape):
""" Weight initialization """
weights = tf.random_normal(shape, stddev=0.01)
return tf.Variable(weights)
def forwardprop(X, w_1, w_2):
h = tf.nn.sigmoid(tf.matmul(X, w_1)) # The \sigma function
yhat = tf.matmul(h, w_2) # The \varphi function
return yhat
def load_data():
dw = loadmat('dw.mat')
dv = loadmat('dv.mat')
dw_2 = loadmat('test_dw.mat')
dv_2 = loadmat('test_dv.mat')
train_dw = dw['dw']
train_dv = dv['dv']
test_dw = dw_2['test_dw']
test_dv = dv_2['test_dv']
scaler = MinMaxScaler()
train_dw = scaler.fit_transform(train_dw)
train_dv = scaler.fit_transform(train_dv)
test_dw = scaler.fit_transform(test_dw)
test_dv = scaler.fit_transform(test_dv)
rows = len(train_dw)
# train_input = dv(t),dw(t),dw(-1),dw(t-2),dw_pred_neighbor1(t),dw_pred_neighbor2(t)
train_input = np.column_stack((np.ones((rows-3)),train_dv[2:(rows-1),1],train_dw[2:(rows-1),1],train_dw[1:(rows-2),1],train_dw[0:(rows-3),1],train_dw[2:(rows-1),2],train_dw[2:(rows-1),4]))
# train target = dw(t+1)
train_target = np.column_stack((np.ones((rows-3)),train_dw[3:rows,1]))
test_rows = len(test_dw)
# test_input = dv(t),dw(t),dw(-1),dw(t-2),dw_pred_neighbor1(t),dw_pred_neighbor2(t)
test_input = np.column_stack((np.ones((test_rows-3)),test_dv[2:(test_rows-1),1],test_dw[2:(test_rows-1),1],test_dw[1:(test_rows-2),1],test_dw[0:(test_rows-3),1],test_dw[2:(test_rows-1),2],test_dw[2:(test_rows-1),4]))
# test target = dw(t+1)
test_target = np.column_stack((np.ones((test_rows-3)),test_dw[3:test_rows,1]))
return train_input, test_input, train_target, test_target
def main():
train_X, test_X, train_y, test_y = load_data()
# Layer's sizes
x_size = train_X.shape[1] # Number of input nodes
h_size = 10 # Number of hidden nodes
y_size = train_y.shape[1] # Number of outputs
# Symbols
X = tf.placeholder(dtype = tf.float32, shape=[None, x_size])
y = tf.placeholder(dtype = tf.float32, shape=[None, y_size])
# Weight initializations
w_1 = init_weights((x_size, h_size))
w_2 = init_weights((h_size, y_size))
# Forward propagation
yhat = forwardprop(X, w_1, w_2)
predict = tf.argmax(yhat, axis=1)
# Backward propagation
#cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=yhat))
cost = tf.losses.mean_squared_error(y,yhat)
updates = tf.train.GradientDescentOptimizer(0.0001).minimize(cost)
# Run SGD
sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)
cost_history = np.empty(shape=[1],dtype=float)
for epoch in range(1000):
#Train with each example
for i in range(len(train_X)):
sess.run(updates, feed_dict={X: train_X, y: train_y})
pred = sess.run(predict,feed_dict={X: train_X})
print(pred)
plt.plot(range(len(pred)),pred)
plt.show()
cost_history = np.append(cost_history, sess.run(cost, feed_dict={X: train_X, y: train_y}))
train_accuracy = np.mean(np.argmax(train_y, axis=0) ==
sess.run(predict, feed_dict={X: train_X, y: train_y}))
test_accuracy = np.mean(np.argmax(test_y, axis=0) ==
sess.run(predict, feed_dict={X: test_X, y: test_y}))
print("Epoch = %d, train accuracy = %.2f%%, test accuracy = %.2f%%"
% (epoch + 1, 100. * train_accuracy, 100. * test_accuracy))
plt.plot(range(len(cost_history)),cost_history)
plt.axis([0,epoch,0,np.max(cost_history)])
plt.show()
sess.close()
if __name__ == '__main__':
main()
tf.argmax returns the index in the vector which has the max value.
If you want to find the exact class probabilities, you can get that using tf.max
I'm really a beginner with tensor flow and in all of this field, but I've seen all the lectures of Andrej Karpathy in CS231n class so I'm understanding the code.
So this is the code (not mine): https://github.com/nfmcclure/tensorflow_cookbook/tree/master/09_Recurrent_Neural_Networks/02_Implementing_RNN_for_Spam_Prediction
# Implementing an RNN in TensorFlow
# ----------------------------------
#
# We implement an RNN in TensorFlow to predict spam/ham from texts
#
# https://github.com/nfmcclure/tensorflow_cookbook/blob/master/09_Recurrent_Neural_Networks/02_Implementing_RNN_for_Spam_Prediction/02_implementing_rnn.py
import os
import re
import io
import glob
import requests
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from zipfile import ZipFile
from tensorflow.python.framework import ops
ops.reset_default_graph()
# Start a graph
sess = tf.Session()
# Set RNN parameters
epochs = 20
batch_size = 250
max_sequence_length = 25
rnn_size = 10
embedding_size = 50
min_word_frequency = 10
learning_rate = 0.0005
dropout_keep_prob = tf.placeholder(tf.float32)
# Download or open data
data_dir = 'temp'
data_file = 'text_data.txt'
if not os.path.exists(data_dir):
os.makedirs(data_dir)
if not os.path.isfile(os.path.join(data_dir, data_file)):
zip_url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00228/smsspamcollection.zip'
r = requests.get(zip_url)
z = ZipFile(io.BytesIO(r.content))
file = z.read('SMSSpamCollection')
# Format Data
text_data = file.decode()
text_data = text_data.encode('ascii', errors='ignore')
text_data = text_data.decode().split('\n')
# Save data to text file
with open(os.path.join(data_dir, data_file), 'w') as file_conn:
for text in text_data:
file_conn.write("{}\n".format(text))
else:
# Open data from text file
text_data = []
with open(os.path.join(data_dir, data_file), 'r') as file_conn:
for row in file_conn:
text_data.append(row)
text_data = text_data[:-1]
text_data = [x.split('\t') for x in text_data if len(x) >= 1]
text_data = [x for x in text_data if len(x) > 1]
print([list(x) for x in zip(*text_data)])
[text_data_target, text_data_train] = [list(x) for x in zip(*text_data)]
# Create a text cleaning function
def clean_text(text_string):
text_string = re.sub(r'([^\s\w]|_|[0-9])+', '', text_string)
text_string = " ".join(text_string.split())
text_string = text_string.lower()
return (text_string)
# Clean texts
text_data_train = [clean_text(x) for x in text_data_train]
# Change texts into numeric vectors
vocab_processor = tf.contrib.learn.preprocessing.VocabularyProcessor(max_sequence_length,
min_frequency=min_word_frequency)
text_processed = np.array(list(vocab_processor.fit_transform(text_data_train)))
# Shuffle and split data
text_processed = np.array(text_processed)
text_data_target = np.array([1 if x == 'ham' else 0 for x in text_data_target])
shuffled_ix = np.random.permutation(np.arange(len(text_data_target)))
x_shuffled = text_processed[shuffled_ix]
y_shuffled = text_data_target[shuffled_ix]
# Split train/test set
ix_cutoff = int(len(y_shuffled) * 0.80)
x_train, x_test = x_shuffled[:ix_cutoff], x_shuffled[ix_cutoff:]
y_train, y_test = y_shuffled[:ix_cutoff], y_shuffled[ix_cutoff:]
vocab_size = len(vocab_processor.vocabulary_)
print("Vocabulary Size: {:d}".format(vocab_size))
print("80-20 Train Test split: {:d} -- {:d}".format(len(y_train), len(y_test)))
# Create placeholders
x_data = tf.placeholder(tf.int32, [None, max_sequence_length])
y_output = tf.placeholder(tf.int32, [None])
# Create embedding
embedding_mat = tf.Variable(tf.random_uniform([vocab_size, embedding_size], -1.0, 1.0))
embedding_output = tf.nn.embedding_lookup(embedding_mat, x_data)
# embedding_output_expanded = tf.expand_dims(embedding_output, -1)
# Define the RNN cell
# tensorflow change >= 1.0, rnn is put into tensorflow.contrib directory. Prior version not test.
if tf.__version__[0] >= '1':
cell = tf.contrib.rnn.BasicRNNCell(num_units=rnn_size)
else:
cell = tf.nn.rnn_cell.BasicRNNCell(num_units=rnn_size)
output, state = tf.nn.dynamic_rnn(cell, embedding_output, dtype=tf.float32)
output = tf.nn.dropout(output, dropout_keep_prob)
# Get output of RNN sequence
output = tf.transpose(output, [1, 0, 2])
last = tf.gather(output, int(output.get_shape()[0]) - 1)
weight = tf.Variable(tf.truncated_normal([rnn_size, 2], stddev=0.1))
bias = tf.Variable(tf.constant(0.1, shape=[2]))
logits_out = tf.matmul(last, weight) + bias
# Loss function
losses = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits_out,
labels=y_output) # logits=float32, labels=int32
loss = tf.reduce_mean(losses)
accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(logits_out, 1), tf.cast(y_output, tf.int64)), tf.float32))
optimizer = tf.train.RMSPropOptimizer(learning_rate)
train_step = optimizer.minimize(loss)
init = tf.global_variables_initializer()
sess.run(init)
train_loss = []
test_loss = []
train_accuracy = []
test_accuracy = []
# Start training
for epoch in range(epochs):
# Shuffle training data
shuffled_ix = np.random.permutation(np.arange(len(x_train)))
x_train = x_train[shuffled_ix]
y_train = y_train[shuffled_ix]
num_batches = int(len(x_train) / batch_size) + 1
# TO DO CALCULATE GENERATIONS ExACTLY
for i in range(num_batches):
# Select train data
min_ix = i * batch_size
max_ix = np.min([len(x_train), ((i + 1) * batch_size)])
x_train_batch = x_train[min_ix:max_ix]
y_train_batch = y_train[min_ix:max_ix]
# Run train step
train_dict = {x_data: x_train_batch, y_output: y_train_batch, dropout_keep_prob: 0.5}
sess.run(train_step, feed_dict=train_dict)
# Run loss and accuracy for training
temp_train_loss, temp_train_acc = sess.run([loss, accuracy], feed_dict=train_dict)
train_loss.append(temp_train_loss)
train_accuracy.append(temp_train_acc)
# Run Eval Step
test_dict = {x_data: x_test, y_output: y_test, dropout_keep_prob: 1.0}
temp_test_loss, temp_test_acc = sess.run([loss, accuracy], feed_dict=test_dict)
test_loss.append(temp_test_loss)
test_accuracy.append(temp_test_acc)
print('Epoch: {}, Test Loss: {:.2}, Test Acc: {:.2}'.format(epoch + 1, temp_test_loss, temp_test_acc))
# Plot loss over time
epoch_seq = np.arange(1, epochs + 1)
plt.plot(epoch_seq, train_loss, 'k--', label='Train Set')
plt.plot(epoch_seq, test_loss, 'r-', label='Test Set')
plt.title('Softmax Loss')
plt.xlabel('Epochs')
plt.ylabel('Softmax Loss')
plt.legend(loc='upper left')
plt.show()
# Plot accuracy over time
plt.plot(epoch_seq, train_accuracy, 'k--', label='Train Set')
plt.plot(epoch_seq, test_accuracy, 'r-', label='Test Set')
plt.title('Test Accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend(loc='upper left')
plt.show()
def findFiles(path): return glob.glob(path)
pred_array = "words"
pred_num = np.array(list(vocab_processor.fit_transform(pred_array)))
print(pred_num)
pred_output = tf.placeholder(tf.float32,[1,len(pred_array),max_sequence_length])
feed_dict = {pred_output: [pred_num]}
classification = sess.run(losses, feed_dict)
print(classification)
It's a RNN spam classifier, and It's working great (accept for the part I wrote at the end where I'm trying to create the predictions).
I'm just want to understand how to create a prediction function to this, something that looks like that:
def predict(text): # text is a string (my mail)
# Doing prediction stuff
return (top result) # ham or spam
The last few lines are my last try is giving me the following error:
tensorflow.python.framework.errors_impl.InvalidArgumentError: You must feed a value for placeholder tensor 'Placeholder' with dtype float
[[Node: Placeholder = Placeholder[dtype=DT_FLOAT, shape=<unknown>, _device="/job:localhost/replica:0/task:0/cpu:0"]()]]
Also I tried to do something using Making predictions with a TensorFlow model, and I also read https://www.tensorflow.org/serving/serving_basic and every thing I've tried failed...
Since I'm just a beginner explanations are welcomed, but I'm not sure I'll know how to code it so can you please post the code answer too.
(Python 3.6 btw)
Thanks!
If you take a look at how the original code does the training and testing steps, specifically how they set up their train_dict and test_dict, you see that they feed values to each of the tensors defined as placeholder in the graph. Basically placeholders need to be given some value if they are going to be used in whatever calculation you are asking your network to do. Since you are looking for predictions from the network, you probably do not need to provide an expected output, but you will need to give it input data x_data, and a value for dropout_keep_prob. This should be dropout_keep_prob=1.0 for prediction.
You also want a prediction, not the loss of the network. The loss is basically a measure of how far your network's output is from what you expect, but since you are trying to predict something for new data you really just want to see what the network says it is. You can do this using the logits_out op directly, or we can add an op that converts your logits into a probability distribution over your classes. Either way you can look at the distribution to get an idea of how likely the network thinks your data falls into each category, or you can take the max value of this vector to just output the network's best guess.
So you might try something like:
prediction = tf.nn.softmax(logits_out)
feed_dict = {x_data: your_input_data, dropout_keep_prob: 1.0}
pred = sess.run(prediction, feed_dict)
best_guess = np.argmax(pred) # highest-rated class
I have a dataset with 5 columns, I am feeding in first 3 columns as my Inputs and the other 2 columns as my outputs. I have successfully executed the program but i am not sure how to test the model by giving my own values as input and getting a predicted output from the model.
Can anyone please help me, How can I actually test the model with my own value after training is done ? I am using Tensorflow in Python..I am able to display accuracy of testing,but How do I actually predict with value if I pass some random input(here,I need to pass 3 input values to get 2 output values)
Here,is my code:
# Implementation of a simple MLP network with one hidden layer. Tested on the iris data set.
# Requires: numpy, sklearn>=0.18.1, tensorflow>=1.0
# NOTE: In order to make the code simple, we rewrite x * W_1 + b_1 = x' * W_1'
# where x' = [x | 1] and W_1' is the matrix W_1 appended with a new row with elements b_1's.
# Similarly, for h * W_2 + b_2
import tensorflow as tf
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
import pandas as pd
RANDOM_SEED = 1000
tf.set_random_seed(RANDOM_SEED)
def init_weights(shape):
""" Weight initialization """
weights = tf.random_normal(shape, stddev=0.1)
return tf.Variable(weights)
def forwardprop(X, w_1, w_2):
"""
Forward-propagation.
IMPORTANT: yhat is not softmax since TensorFlow's softmax_cross_entropy_with_logits() does that internally.
"""
h = tf.nn.sigmoid(tf.matmul(X, w_1)) # The \sigma function
yhat = tf.matmul(h, w_2) # The \varphi function
return yhat
def get_iris_data():
""" Read the iris data set and split them into training and test sets """
df = pd.read_csv("H:\MiniThessis\Sample.csv")
train_X = np.array(df[df.columns[0:3]])
train_Y = np.array(df[df.columns[3:]])
print(train_X)
# Convert into one-hot vectors
#num_labels = len(np.unique(train_Y))
#all_Y = np.eye(num_labels)[train_Y] # One liner trick!
#print()
return train_test_split(train_X, train_Y, test_size=0.33, random_state=RANDOM_SEED)
def main():
train_X, test_X, train_y, test_y = get_iris_data()
# Layer's sizes
x_size = train_X.shape[1] # Number of input nodes: 4 features and 1 bias
h_size = 256 # Number of hidden nodes
y_size = train_y.shape[1] # Number of outcomes (3 iris flowers)
# Symbols
X = tf.placeholder("float", shape=[None, x_size])
y = tf.placeholder("float", shape=[None, y_size])
# Weight initializations
w_1 = init_weights((x_size, h_size))
w_2 = init_weights((h_size, y_size))
# Forward propagation
yhat = forwardprop(X, w_1, w_2)
predict = tf.argmax(yhat, axis=1)
# Backward propagation
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=yhat))
updates = tf.train.GradientDescentOptimizer(0.01).minimize(cost)
# Run SGD
sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)
for epoch in range(3):
# Train with each example
for i in range(len(train_X)):
sess.run(updates, feed_dict={X: train_X[i: i + 1], y: train_y[i: i + 1]})
train_accuracy = np.mean(np.argmax(train_y, axis=1) == sess.run(predict, feed_dict={X: train_X, y: train_y}))
test_accuracy = np.mean(np.argmax(test_y, axis=1) ==sess.run(predict, feed_dict={X: test_X, y: test_y}))
print("Epoch = %d, train accuracy = %.2f%%, test accuracy = %.2f%%"
% (epoch + 1, 100. * train_accuracy, 100. * test_accuracy))
correct_Prediction = tf.equal((tf.arg_max(predict,1)),(tf.arg_max(y,1)))
best = sess.run([predict], feed_dict={X: np.array([[20.14, 46.93, 1014.66]])})
#print(correct_Prediction)
print(best)
sess.close()
if __name__ == '__main__':
main()