Tensorflow: Same input data, different output - python

After training the model, I save it and load to make some tests. But every time I reload the model I get a different accuracy and results with the exactly same input data. After training the model I print the accuracy and it always gets a nice value (0.8 ~ 0.9), but when I reload it goes down to something like (0.1 ~ 0.5) - I dont know if it is something related to the problem btw thats weird.
import tensorflow as tf
import numpy as np
import json
n_nodes_hl1 = 1600
n_nodes_hl2 = 800
n_nodes_hl3 = 400
n_nodes_hl4 = 200
n_classes = 4
batch_size = 50
input_lenght = 65
x = tf.placeholder('float', [None, input_lenght])
y = tf.placeholder('float')
def train_network(x):
prediction = neural_network_model(x)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=prediction, labels=y))
optimizer = tf.train.AdamOptimizer(learning_rate=0.00001).minimize(cost)
hm_epochs = 20000
saver = tf.train.Saver()
init_op = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init_op)
epoch = 0
for epoch in range(hm_epochs):
epoch_cost = 0
i = 0
while i < len(train_x):
start = i
end = i + batch_size
batch_x = np.array(train_x[start:end])
batch_y = np.array(train_y[start:end])
_, c = sess.run([optimizer, cost], feed_dict={x: batch_x, y: batch_y})
epoch_cost += c
i += batch_size
save_path = saver.save(sess, "drive/My Drive/datasets/tensorflow/model")
correct = tf.equal(tf.argmax(prediction, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct, 'float'))
print("accuracy:", accuracy.eval({x: test_x, y: test_y}, session=sess))
sess.close()
def group_test_train(features_data, labels_data, test_size):
featureset = []
for i in range(test_size):
featureset += [[features_data[i], labels_data[i]]]
featureset = np.array(featureset)
np.random.shuffle(featureset)
train_x = list(featureset[:, 0][:test_size // 2])
train_y = list(featureset[:, 1][:test_size // 2])
test_x = list(featureset[:, 0][test_size // 2:])
test_y = list(featureset[:, 1][test_size // 2:])
return train_x, train_y, test_x, test_y
def neural_network_model(data):
hidden1 = {'weights': tf.Variable(tf.random_uniform([input_lenght, n_nodes_hl1], -1, 1)),
'biases': tf.Variable(tf.random_normal([n_nodes_hl1]))
}
hidden2 = {'weights': tf.Variable(tf.random_uniform([n_nodes_hl1, n_nodes_hl2], -1, 1)),
'biases': tf.Variable(tf.random_normal([n_nodes_hl2]))
}
hidden3 = {'weights': tf.Variable(tf.random_uniform([n_nodes_hl2, n_nodes_hl3], -1, 1)),
'biases': tf.Variable(tf.random_normal([n_nodes_hl3]))
}
hidden4 = {'weights': tf.Variable(tf.random_uniform([n_nodes_hl3, n_nodes_hl4], -1, 1)),
'biases': tf.Variable(tf.random_normal([n_nodes_hl4]))
}
l_output = {'weights': tf.Variable(tf.random_uniform([n_nodes_hl4, n_classes], -1, 1)),
'biases': tf.Variable(tf.random_normal([n_classes]))
}
l1 = tf.add(tf.matmul(data, hidden1['weights']), hidden1['biases'])
l1 = tf.nn.relu(l1)
l2 = tf.add(tf.matmul(l1, hidden2['weights']), hidden2['biases'])
l2 = tf.nn.relu(l2)
l3 = tf.add(tf.matmul(l2, hidden3['weights']), hidden3['biases'])
l3 = tf.nn.relu(l3)
l4 = tf.add(tf.matmul(l3, hidden4['weights']), hidden4['biases'])
l4 = tf.nn.relu(l4)
output = tf.add(tf.matmul(l4, l_output['weights']), l_output['biases'])
return output
version = 'end'
with open('drive/My Drive/datasets/json/' + 'data-'+ version +'.json') as json_file:
x_, y_ = json.load(json_file)
train_x, train_y, test_x, test_y = group_test_train(x_, y_, len(x_) )
train_network(x)
Every time I run this part down bellow the accuracy changes and the output as well.
prediction = neural_network_model(x)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=prediction, labels=y))
optimizer = tf.train.AdamOptimizer(learning_rate=0.00001).minimize(cost)
init_op = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init_op)
new_saver = tf.train.import_meta_graph('drive/My Drive/datasets/tensorflow/model.meta')
new_saver.restore(sess, tf.train.latest_checkpoint('drive/My Drive/datasets/tensorflow/'))
correct = tf.equal(tf.argmax(prediction, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct, 'float'))
print("accuracy:", accuracy.eval({x: train_x, y: train_y}, session=sess))

Related

ValueError: Cannot feed value of shape (24500, 50, 50, 1) for Tensor 'Placeholder_34:0', which has shape '(?, 2500)'

This is the cat vs dog problem from kaggle competition. My code is look like correct but still a value error annoying me. I think I have given the correct size of the input but still the error comes.
Please help me to find out the error.
Here's my full code:
import cv2
import numpy as np
import os
import matplotlib.pyplot as plt
from random import shuffle
import tensorflow
from tqdm import tqdm
TRAIN_DIR = 'C:\\Users\Kashif\PycharmProjects\DeepLearning-Tensorflow (Sentdex)\Learnings\Cat_VS_Dog\TrainingData'
TEST_DIR = 'C:\\Users\Kashif\PycharmProjects\DeepLearning-Tensorflow (Sentdex)\Learnings\Cat_VS_Dog\TestingData'
IMG_SIZE = 50
MODEL_NAME = 'dogvscat-{}-{}.model'.format(LR, '2conv-basic')
def label_img(img):
word_label = img.split('.')[-3]
if word_label == 'cat': return [1,0]
elif word_label == 'dog': return [0,1]
def create_train_data():
training_data = []
for img in tqdm(os.listdir(TRAIN_DIR)):
label = label_img(img)
path = os.path.join(TRAIN_DIR,img)
img = cv2.imread(path,cv2.IMREAD_GRAYSCALE)
img = cv2.resize(img, (IMG_SIZE,IMG_SIZE))
training_data.append([np.array(img),np.array(label)])
shuffle(training_data)
np.save('train_data.npy', training_data)
return training_data
def process_test_data():
testing_data=[]
for img in tqdm(os.listdir(TEST_DIR)):
path = os.path.join(TEST_DIR, img)
img_num = img.split('.')[0]
img = cv2.resize(cv2.imread(path, cv2.IMREAD_GRAYSCALE), (IMG_SIZE, IMG_SIZE))
testing_data.append([np.array(img), img_num])
np.save('test_data.npy', testing_data)
return testing_data
train_data = create_train_data()
learning_rate = 0.01
epochs = 10
batch_size = 128
n_classes = 2
drop_out = 0.8
filter_h_w = 5
depth_in = 1
depth_out_1 = 32
depth_out_2 = 64
x = tf.placeholder('float', [None, IMG_SIZE * IMG_SIZE])
y = tf.placeholder('float', [None, n_classes])
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def maxpool2d(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
def conv_nural_network(x):
weights = {
'W_conv1': tf.Variable(tf.random_normal([filter_h_w, filter_h_w, depth_in, depth_out_1])),
'W_conv2': tf.Variable(tf.random_normal([filter_h_w, filter_h_w, depth_out_1, depth_out_2])),
'W_fc': tf.Variable(tf.random_normal([ int(IMG_SIZE/4) * int(IMG_SIZE/4) * depth_out_2, 1024])),
'out': tf.Variable(tf.random_normal([1024, n_classes]))
}
biases = {
'b_conv1': tf.Variable(tf.random_normal([depth_out_1])),
'b_conv2': tf.Variable(tf.random_normal([depth_out_2])),
'b_fc': tf.Variable(tf.random_normal([1024])),
'out': tf.Variable(tf.random_normal([n_classes]))
}
x = tf.reshape(x, shape=[-1, IMG_SIZE, IMG_SIZE, 1])
conv1 = tf.nn.relu(conv2d(x, weights['W_conv1']) + biases['b_conv1'])
conv1 = maxpool2d(conv1)
conv2 = tf.nn.relu(conv2d(conv1, weights['W_conv2']) + biases['b_conv2'])
conv2 = maxpool2d(conv2)
fc = tf.reshape(conv2, [-1, int(IMG_SIZE/4) * int(IMG_SIZE/4) * depth_out_2])
fc = tf.nn.relu(tf.matmul(fc, weights['W_fc']) + biases['b_fc'])
fc = tf.nn.dropout(fc, drop_out)
output = tf.matmul(fc, weights['out']) + biases['out']
return output
train = train_data[:-500]
test = train_data[-500:]
train_X = np.array([i[0] for i in train]).reshape(-1,IMG_SIZE,IMG_SIZE,1)
train_y = [i[1] for i in train]
test_X = np.array([i[0] for i in test]).reshape(-1,IMG_SIZE,IMG_SIZE,1)
test_y = [i[1] for i in test]
def train_neural_network(x):
prediction = conv_nural_network(x)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=prediction, labels=y))
optimizer = tf.train.AdamOptimizer(learning_rate = learning_rate).minimize(cost)
init = tf.global_variables_initializer()
loss_trace = []
accuracy_trace = []
with tf.Session() as sess:
sess.run(init)
for i in range(epochs):
sess.run(optimizer, feed_dict={x: train_X, y: train_y})
loss = sess.run(cost_function, feed_dict={x: train_X, y: train_y})
accuracy = np.mean(np.argmax(sess.run(prediction,feed_dict={x:train_X,y:train_y}),axis=1) == np.argmax(train_y,axis=1))
loss_trace.append(loss)
accuracy_trace.append(accuracy)
print('Epoch:', (i + 1), 'loss:', loss, 'accuracy:', accuracy)
print('Final training result:', 'loss:', loss, 'accuracy:', accuracy)
loss_test = sess.run(cost_function, feed_dict={x: test_X, y: test_y})
test_pred = np.argmax(sess.run(prediction, feed_dict={x: test_X, y: test_y}), axis=1)
accuracy_test = np.mean(test_pred == np.argmax(test_y, axis=1))
print('Results on test dataset:', 'loss:', loss_test, 'accuracy:', accuracy_test)
train_neural_network(x)
This error comes after that. A value error has come but I don't know where I have given the wrong input type.
ValueError Traceback (most recent call last)
<ipython-input-91-7682c5a4d0ec> in <module>
25
26
---> 27 train_neural_network(x)
<ipython-input-91-7682c5a4d0ec> in train_neural_network(x)
11 sess.run(init)
12 for i in range(epochs):
---> 13 sess.run(optimizer, feed_dict={x: train_X, y: train_y})
14 loss = sess.run(cost_function, feed_dict={x: train_X, y: train_y})
15 accuracy = np.mean(np.argmax(sess.run(prediction,feed_dict={x:train_X,y:train_y}),axis=1) == np.argmax(train_y,axis=1))
ValueError: Cannot feed value of shape (24500, 50, 50, 1) for Tensor 'Placeholder_34:0', which has shape '(?, 2500)'

why get train accuracy not test accuracy in tensorboard

I want to see test accuracy in tensorboard, but it seems I get accuracy with training data. I print test accuracy on console, and it is showing about 70%, but in tensorboard, the curve showed accuracy is growing and finally almost 100%.
This is my code:
def train_crack_captcha_cnn(is_train, checkpoint_dir):
global max_acc
X = tf.placeholder(tf.float32, [None, dr.ROWS, dr.COLS, dr.CHANNELS])
Y = tf.placeholder(tf.float32, [None, 1, 1, 2])
output, end_points = resnet_v2_50(X, num_classes = 2)
global_steps = tf.Variable(1, trainable=False)
learning_rate = tf.train.exponential_decay(0.001, global_steps, 100, 0.9)
with tf.device('/device:GPU:0'):
loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=Y, logits=output))
# optimizer 为了加快训练 learning_rate应该开始大,然后慢慢衰
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss, global_step=global_steps)
predict = tf.argmax(output, axis = 3)
l = tf.argmax(Y, axis = 3)
correct_pred = tf.equal(predict, l)
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
## tensorboard
tf.summary.scalar('test_accuracy', accuracy)
tf.summary.scalar("loss", loss)
tf.summary.scalar("learning_rate", learning_rate)
saver = tf.train.Saver()
with tf.Session(config=tf.ConfigProto(allow_soft_placement = True)) as sess:
if is_train:
writer = tf.summary.FileWriter("/tmp/cnn_log/log", graph = sess.graph)
sess.run(tf.global_variables_initializer())
step_value = sess.run(global_steps)
while step_value < 100000:
step_value = sess.run(global_steps)
merged = tf.summary.merge_all()
batch_x, batch_y = get_next_batch()
result, _, _loss= sess.run([merged, optimizer, loss], feed_dict={X: batch_x, Y: batch_y})
writer.add_summary(result, step_value)
print('step : {} loss : {}'.format(step_value, _loss))
# 每100 step计算一次准确率
if step_value % 20 == 0:
acc = sess.run(accuracy, feed_dict={X: validation, Y: validation_labels})
print('accuracy : {}'.format(acc))
# 如果准确率大于max_acc,保存模型,完成训练
if acc > max_acc:
max_acc = float(acc) #转换类型防止变为同一个引用
saver.save(sess, checkpoint_dir + "/" + str(step_value) + '-' + str(acc) + "/model.ckpt", global_step=global_steps)
##### predict #####
# predict_y = sess.run(output, feed_dict={X: test})
# data = pd.DataFrame([i for i in range(1, len(predict_y) + 1)], columns = ['id'])
# predict_y = np.argmax(predict_y, axis = 3)
# predict_y = np.reshape(predict_y,(-1))
# print(predict_y)
# predict_y = pd.Series(predict_y, name='label')
# data['label'] = predict_y
# data.to_csv("gender_submission.csv" + str(step), index=False)
##### end #####
writer.close()
else:
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
acc = sess.run(accuracy, feed_dict={X: validation, Y: validation_labels})
print('accuracy : {}'.format(acc))
I add accuracy into tensorboard like this:
tf.summary.scalar('test_accuracy', accuracy)
and every 20 step, I get one accuracy about test data, and print the result to console, which is not the same with data shown on tensorboard.
Why?

Implement inference bayesian network using session tensorflow

I am a new with machine learning. I have a final project about prediction using two algorithms, Artificial Neural Network and Bayesian Neural Network. I want to compare the prediction result between ANN and BNN. I have finished the ANN program, but I have a problem with the BNN. I try a tutorial from this link: bayesian neural network tutorial. This is my ANN sample code to train and evaluate the model.
keep_prob = tf.placeholder("float", name="keep_prob")
x = tf.placeholder(tf.float32, [None, n_input], name="x")
y = tf.placeholder(tf.float32, name="y")
training_epochs = 5000
display_step = 1000
batch_size = 5
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=predictions, labels=y), name="cost_function")
optimizer = tf.train.AdamOptimizer(learning_rate=0.0001, name="Adam").minimize(cost)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for epoch in tqdm(range(training_epochs)):
avg_cost = 0.0
total_batch = int(len(x_train) / batch_size)
x_batches = np.array_split(x_train, total_batch)
y_batches = np.array_split(y_train, total_batch)
for i in range(total_batch):
batch_x, batch_y = x_batches[i], y_batches[i]
_, c = sess.run([optimizer, cost], feed_dict={x: batch_x, y: batch_y, keep_prob: 0.8})
avg_cost += c / total_batch
if epoch % display_step == 0:
print("Epoch:", '%04d' % (epoch + 1), "cost=", "{:.9f}".format(avg_cost))
print("Optimization Finished!")
correct_prediction = tf.equal(tf.argmax(predictions, 1), tf.argmax(y, 1), name="corr_pred")
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"), name="accuracy")
# print('Accuracy: ', sess.run(accuracy, feed_dict={x: x_test, y: y_test}))
print("Accuracy:", accuracy.eval({x: x_test, y: y_test, keep_prob: 1.0}))
and this is my BNN code:
# Importing required libraries
from math import floor
import edward as ed
import numpy as np
import pandas as pd
import tensorflow as tf
from edward.models import Normal, NormalWithSoftplusScale
from fancyimpute import KNN
from sklearn import preprocessing
# Read data
features_dummies_nan = pd.read_csv('csv/features_dummies_with_label.csv', sep=',')
# Function: impute missing value by KNN
def impute_missing_values_by_KNN():
home_data = features_dummies_nan[[col for col in features_dummies_nan.columns if 'hp' in col]]
away_data = features_dummies_nan[[col for col in features_dummies_nan.columns if 'ap' in col]]
label_data = features_dummies_nan[[col for col in features_dummies_nan.columns if 'label' in col]]
home_filled = pd.DataFrame(KNN(3).complete(home_data))
home_filled.columns = home_data.columns
home_filled.index = home_data.index
away_filled = pd.DataFrame(KNN(3).complete(away_data))
away_filled.columns = away_data.columns
away_filled.index = away_data.index
data_frame_out = pd.concat([home_filled, away_filled, label_data], axis=1)
return data_frame_out
features_dummies = impute_missing_values_by_KNN()
target = features_dummies.loc[:, 'label'].values
data = features_dummies.drop('label', axis=1)
data = data.values
perm = np.random.permutation(len(features_dummies))
data = data[perm]
target = target[perm]
train_size = 0.9
train_cnt = floor(features_dummies.shape[0] * train_size)
x_train = data[0:train_cnt] # data_train
y_train = target[0:train_cnt] # target_train
x_test = data[train_cnt:] # data_test
y_test = target[train_cnt:] # target_test
keep_prob = tf.placeholder("float", name="keep_prob")
n_input = data.shape[1] # D
n_classes = 3
n_hidden_1 = 100 # H0
n_hidden_2 = 100 # H1
n_hidden_3 = 100 # H2
def neural_network(X, W_0, W_1, W_2, W_out, b_0, b_1, b_2, b_out):
hidden1 = tf.nn.relu(tf.matmul(X, W_0) + b_0)
hidden2 = tf.nn.relu(tf.matmul(hidden1, W_1) + b_1)
hidden3 = tf.nn.relu(tf.matmul(hidden2, W_2) + b_2)
output = tf.matmul(hidden3, W_out) + b_out
return tf.reshape(output, [-1])
scaler = preprocessing.StandardScaler().fit(x_train)
data_train_scaled = scaler.transform(x_train)
data_test_scaled = scaler.transform(x_test)
W_0 = Normal(loc=tf.zeros([n_input, n_hidden_1]), scale=5.0 * tf.ones([n_input, n_hidden_1]))
W_1 = Normal(loc=tf.zeros([n_hidden_1, n_hidden_2]), scale=5.0 * tf.ones([n_hidden_1, n_hidden_2]))
W_2 = Normal(loc=tf.zeros([n_hidden_2, n_hidden_3]), scale=5.0 * tf.ones([n_hidden_2, n_hidden_3]))
W_out = Normal(loc=tf.zeros([n_hidden_3, 1]), scale=5.0 * tf.ones([n_hidden_3, 1]))
b_0 = Normal(loc=tf.zeros(n_hidden_1), scale=5.0 * tf.ones(n_hidden_1))
b_1 = Normal(loc=tf.zeros(n_hidden_2), scale=5.0 * tf.ones(n_hidden_2))
b_2 = Normal(loc=tf.zeros(n_hidden_3), scale=5.0 * tf.ones(n_hidden_3))
b_out = Normal(loc=tf.zeros(1), scale=5.0 * tf.ones(1))
qW_0 = NormalWithSoftplusScale(loc=tf.Variable(tf.random_normal([n_input, n_hidden_1])),
scale=tf.Variable(tf.random_normal([n_input, n_hidden_1])))
qW_1 = NormalWithSoftplusScale(loc=tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),
scale=tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])))
qW_2 = NormalWithSoftplusScale(loc=tf.Variable(tf.random_normal([n_hidden_2, n_hidden_3])),
scale=tf.Variable(tf.random_normal([n_hidden_2, n_hidden_3])))
qW_out = NormalWithSoftplusScale(loc=tf.Variable(tf.random_normal([n_hidden_3, 1])),
scale=tf.Variable(tf.random_normal([n_hidden_3, 1])))
qb_0 = NormalWithSoftplusScale(loc=tf.Variable(tf.random_normal([n_hidden_1])),
scale=tf.Variable(tf.random_normal([n_hidden_1])))
qb_1 = NormalWithSoftplusScale(loc=tf.Variable(tf.random_normal([n_hidden_2])),
scale=tf.Variable(tf.random_normal([n_hidden_2])))
qb_2 = NormalWithSoftplusScale(loc=tf.Variable(tf.random_normal([n_hidden_3])),
scale=tf.Variable(tf.random_normal([n_hidden_3])))
qb_out = NormalWithSoftplusScale(loc=tf.Variable(tf.random_normal([1])),
scale=tf.Variable(tf.random_normal([1])))
sigma_y = 1.0
x = tf.placeholder(tf.float32, [None, n_input])
y = Normal(loc=neural_network(x, W_0, W_1, W_2, W_out, b_0, b_1, b_2, b_out), scale=sigma_y)
inference = ed.KLqp({W_0: qW_0, b_0: qb_0,
W_1: qW_1, b_1: qb_1,
W_2: qW_2, b_2: qb_2,
W_out: qW_out, b_out: qb_out}, data={x: x_train, y: y_train})
global_step = tf.Variable(0, trainable=False)
starter_learning_rate = 0.05
learning_rate = tf.train.exponential_decay(starter_learning_rate, global_step,
1000, 0.3, staircase=True)
optimizer = tf.train.AdamOptimizer(learning_rate)
inference.run(n_iter=5000, optimizer=optimizer, global_step=global_step)
But, I want to compare two algorithms result. So, I want to make some variables will be same between ANN and BNN, for example sum of epoch. Then I want to adapt my ANN code above for this BNN code section.
sigma_y = 1.0
x = tf.placeholder(tf.float32, [None, n_input])
y = Normal(loc=neural_network(x, W_0, W_1, W_2, W_out, b_0, b_1, b_2, b_out), scale=sigma_y)
inference = ed.KLqp({W_0: qW_0, b_0: qb_0,
W_1: qW_1, b_1: qb_1,
W_2: qW_2, b_2: qb_2,
W_out: qW_out, b_out: qb_out}, data={x: x_train, y: y_train})
global_step = tf.Variable(0, trainable=False)
starter_learning_rate = 0.05
learning_rate = tf.train.exponential_decay(starter_learning_rate, global_step,
1000, 0.3, staircase=True)
optimizer = tf.train.AdamOptimizer(learning_rate)
inference.run(n_iter=5000, optimizer=optimizer, global_step=global_step)
I have several things that I don't understand. There is y = tf.placeholder(tf.float32, name="y") in ANN but in BNN is y = Normal(loc=neural_network(x, W_0, W_1, W_2, W_out, b_0, b_1, b_2, b_out), scale=sigma_y). Then, there is scale in BNN but not in ANN. So, can I adapt my ANN train and test sample code to BNN sample code above? I want to make inference on BNN run like in sess.run() on ANN so I can count the BNN prediction accuracy result. Can I do that?

Tensor Flow - using saved model won't return correct results

My specs:
Windows 10 64,
Python 3.6,
Tensorflow 1.0.1.
I've been trying to train and use neural network agains MNIST dataset:
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data", one_hot=True)
tf.reset_default_graph()
n_nodes_hl1 = 500
n_nodes_hl2 = 500
n_nodes_hl3 = 500
n_classes = 10
batch_size = 100
x = tf.placeholder('float',[None,784])
y = tf.placeholder('float')
current_epoch = tf.Variable(1)
def neural_network_model(data):
hidden_1_layer = {'weights': tf.Variable(tf.random_normal([784,n_nodes_hl1])),
'biases': tf.Variable(tf.random_normal([n_nodes_hl1]))
}
hidden_2_layer = {'weights': tf.Variable(tf.random_normal([n_nodes_hl1,n_nodes_hl2])),
'biases': tf.Variable(tf.random_normal([n_nodes_hl2]))
}
hidden_3_layer = {'weights': tf.Variable(tf.random_normal([n_nodes_hl2,n_nodes_hl3])),
'biases': tf.Variable(tf.random_normal([n_nodes_hl3]))
}
output_layer = {'weights': tf.Variable(tf.random_normal([n_nodes_hl3,n_classes])),
'biases': tf.Variable(tf.random_normal([n_classes]))
}
l1 = tf.add(tf.matmul(data, hidden_1_layer['weights']), hidden_1_layer['biases'])
l1 = tf.nn.relu(l1)
l2 = tf.add(tf.matmul(l1, hidden_2_layer['weights']), hidden_2_layer['biases'])
l2 = tf.nn.relu(l2)
l3 = tf.add(tf.matmul(l2, hidden_3_layer['weights']), hidden_3_layer['biases'])
l3 = tf.nn.relu(l3)
output = tf.matmul(l3, output_layer['weights']) + output_layer['biases']
return output
saver = tf.train.Saver()
def train_neural_network(x):
prediction = neural_network_model(x)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=prediction,labels=y))
optimizer = tf.train.AdamOptimizer().minimize(cost)
hm_epochs = 10
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for epoch in range(hm_epochs):
epoch_loss = 0
for _ in range(int(mnist.train.num_examples/batch_size)):
epoch_x,epoch_y = mnist.train.next_batch(batch_size)
_, c = sess.run([optimizer, cost], feed_dict = {x:epoch_x, y:epoch_y})
epoch_loss += c
correct = tf.equal(tf.arg_max(prediction,1), tf.arg_max(y,1))
accuracy = tf.reduce_mean(tf.cast(correct,'float'))
saver.save(sess, "/tmp/model.ckpt")
train_neural_network(x)
After that I passed one of MNIST images through trained model:
import cv2
import numpy as np
mn = cv2.imread('352.png')
mn = cv2.cvtColor(mn,cv2.COLOR_BGR2GRAY)
mn2 = np.array(list(mn.flatten()))
x = tf.placeholder('float')
y = tf.placeholder('float')
with tf.Session() as sess:
prediction = neural_network_model(x)
sess.run(tf.global_variables_initializer())
saver.restore(sess,"/tmp/model.ckpt")
result = (sess.run(tf.argmax(prediction.eval(feed_dict={x:[ mn2 ]}),1)))
print(prediction.eval(feed_dict={x:[mn2]}))
print(result)
Unfortunately, with a model accuracy of ca. 95% I keep getting wrong results - not only they are wrong but also not consistent among different runs
Let's say I pass image of number 8, and I get 9, 4, 3...
The model file model.ckpt.data-00000-of-00001 saved to hard drive is about 10 megabytes. When I try to restore model while doing the training for every loop (as suggested on pythonprogramming.net course) model seems not to update, because files are only 1KB.
What am I doing wrong here?

Tensorflow starts fast and slows down during training [duplicate]

This question already has an answer here:
TensorFlow: slow performance when getting gradients at inputs
(1 answer)
Closed 6 years ago.
Im an electrical engineering student and im trying to model an industrial plant based on the power in a resistor inside a boiller, the temperature of the water in the boiller and the water flow passing through the boiller using python 3.5 and tensorflow.
The matter is that im a beginner at python and tensorflow and i wrote this code that works, but the trainning starts fast and rapidly start to slow down, and by the middle of the trainning its starts to taking ages between steps.
I just need some help on the optimization, and of course, any tips are welcome!
Thank you very much!
Here is the code:
import numpy as np
import tensorflow as tf
input_vec_size = 3
step_size = 0.05
batch_size = 3
test_size = 16
train_end = 1905
eval_end = 290
predict_end = 1396
n_cores = 4
def read_my_file_format(filename_queue):
line_reader = tf.TextLineReader(skip_header_lines=1)
_, csv_row = line_reader.read(filename_queue)
record_defaults = [[0.0], [0.0], [0.0], [0.0]]
time, power_in, temperature, flow = \
tf.decode_csv(csv_row, record_defaults=record_defaults)
features = tf.pack([
power_in,
temperature
])
return features, flow
def input_pipeline(directory, batch_size, n_cores, buffer_size, num_epochs=None):
filename_queue = tf.train.string_input_producer(
tf.train.match_filenames_once(directory),
shuffle=True)
features, flow = read_my_file_format(filename_queue)
x, y = tf.train.batch(
[features, flow], batch_size=batch_size, allow_smaller_final_batch=True)
def init_weights(shape):
return tf.Variable(tf.random_normal(shape, stddev=0.001))
def init_bias(shape): #inicializa bias
initial = tf.constant(0.001, shape=shape) #variancia 0.1
return tf.Variable(initial)
def model(X, w_h, w_h2, w_o, B, B2, B3, p_keep_input, p_keep_hidden):
X = tf.nn.dropout(X, p_keep_input)
h = tf.nn.relu(tf.matmul(X, w_h)+B)
h = tf.nn.dropout(h, p_keep_hidden)
h2 = tf.nn.relu(tf.matmul(h, w_h2)+B2)
h2 = tf.nn.dropout(h2, p_keep_hidden)
return tf.matmul(h2, w_o)+B3
X = tf.placeholder("float", [None, input_vec_size])
Y = tf.placeholder("float", [None, 1])
p_keep_hidden = tf.placeholder("float")
p_keep_input = tf.placeholder("float")
w_h = init_weights([input_vec_size, fclayer_size])
w_h2= init_weights([fclayer_size, fclayer_size])
w_o= init_weights([fclayer_size, 1])
B = init_bias([fclayer_size])
B2 = init_bias([fclayer_size])
B3 = init_bias([1])
py_x = model(X, w_h, w_h2, w_o, B, B2, B3, p_keep_input, p_keep_hidden)
predict_op = py_x[0]
cost = tf.reduce_mean(tf.square(predict_op - Y))
train_op = tf.train.MomentumOptimizer(step_size, 0.5).minimize(cost)
saver = tf.train.Saver()
directory = "./train/*.csv"
x, y = input_pipeline(directory, batch_size, n_cores, buffer_size, num_epochs=None)
directory_eval = "./eval/*.csv"
xe, ye = input_pipeline(directory_eval, test_size, n_cores, buffer_size, num_epochs=None)
directory_predict = "./predict/*.csv"
xp, yp = input_pipeline(directory_predict, test_size, n_cores, buffer_size, num_epochs=None)
with tf.Session() as sess:
tf.initialize_all_variables().run()
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
print("==================================TREINAMENTO=================================")
for iteraction in range(int(train_end/batch_size)):
trX, trY = sess.run([x,y])
for i in range(0, batch_size):
features, features_past, features_past2 = sess.run(tf.unpack(trX[i])), sess.run(tf.unpack(trX[i-1])), sess.run(tf.unpack(trX[i-2]))
power_in_i = features[0] - 4
temperature_i = features[1]
temperature_i1 = features_past[1]
temperature_i2 = features_past2[1]
trX_now = tf.pack([power_in_i, (temperature_i-temperature_i1), (temperature_i-temperature_i2)])
trX_now = sess.run(trX_now)
X_Batch, Y_Batch = trX_now.reshape([-1, input_vec_size]), trY[i].reshape([-1, 1])
sess.run(train_op, feed_dict={X: X_Batch,
Y: Y_Batch, p_keep_input: 0.95, p_keep_hidden: 0.7})
if(i%batch_size == 0):
predict_train = sess.run(tf.reshape(predict_op, [-1, 1]), feed_dict={X: X_Batch, p_keep_input: 1.0, p_keep_hidden: 1.0})
train_cost = sess.run(cost, feed_dict={py_x: predict_train, Y: Y_Batch})
print("Train Batch:", iteraction,"Sample:", batch_size*iteraction, "X:", X_Batch, "Y:", Y_Batch, "y_:",
predict_train, "Cost:", train_cost)
saver.save(sess, "./model.ckpt")
print('Variaveis salvas com sucesso')
coord.request_stop()
coord.join(threads)
sess.close()
print('=============================Fim do Treinamento=============================')
with tf.Session() as sess:
tf.initialize_all_variables().run()
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
print("==============================VALIDAÇAO==================================")
saver.restore(sess, "./model.ckpt")
print("Model restored.")
for iteraction in range(int(eval_end/test_size)):
teX, teY = sess.run([xe, ye])
for i in range(0, test_size):
features, features_past, features_past2 = sess.run(tf.unpack(teX[i])), sess.run(tf.unpack(teX[i - 1])), sess.run(tf.unpack(teX[i-2]))
power_in_i = features[0] - 4
temperature_i = features[1]
temperature_i1 = features_past[1]
teX_now = tf.pack([power_in_i, (temperature_i - temperature_i1), (temperature_i-temperature_i2)])
teX_now = sess.run(teX_now)
X_Batch, Y_Batch = teX_now.reshape([-1, input_vec_size]), teY[i].reshape([-1, 1])
predict_eval = sess.run(tf.reshape(predict_op, [-1, 1]), feed_dict={X: X_Batch, p_keep_input: 1.0, p_keep_hidden: 1.0})
eval_cost = sess.run(cost, feed_dict={py_x: predict_eval, Y: Y_Batch})
print("Eval Batch:", iteraction,"Sample:", batch_size*iteraction, "X:", X.eval(feed_dict={X: X_Batch}), "Y:", Y_Batch, "y_:",
predict_eval, "Cost:", eval_cost)
coord.request_stop()
coord.join(threads)
sess.close()
print('=============================FIM DA VALIDAÇAO=============================')
with tf.Session() as sess:
tf.initialize_all_variables().run()
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
print("==============================PREDIÇÃO==================================")
saver.restore(sess, "./model.ckpt")
print("Model restored.")
predict_batch_mean = 0
predict_mean = 0
for iteraction in range(int(predict_end / test_size)):
tpX, tpY = sess.run([xp, yp])
for i in range(0, test_size):
features, features_past, features_past2 = sess.run(tf.unpack(tpX[i])), sess.run(tf.unpack(tpX[i - 1])), sess.run(tf.unpack(tpX[i-2]))
power_in_i = features[0]- 4
temperature_i = features[1]
temperature_i1 = features_past[1]
tpX_now = tf.pack([power_in_i, (temperature_i - temperature_i1), (temperature_i-temperature_i2)])
tpX_now = sess.run(tpX_now)
X_Batch, Y_Batch = tpX_now.reshape([-1, input_vec_size]), tpY[i].reshape([-1, 1])
prediction = sess.run(tf.reshape(predict_op, [-1, 1]), feed_dict={X: X_Batch, p_keep_input: 1.0, p_keep_hidden: 1.0})
print("Predict Batch:", iteraction,"Sample:", batch_size*iteraction, "X:", X.eval(feed_dict={X: X_Batch}), "y_:",
prediction)
predict_batch_mean = (predict_batch_mean + prediction)/i
predict_mean = (predict_mean + predict_batch_mean)/iteraction
print("Predicted Flow:", predict_mean)
coord.request_stop()
coord.join(threads)
sess.close()
My quick guess is that you are creating a lot of new nodes in each iteration through your training: those tf.packs and tf.reshapes are just making your graph bigger and bigger.
Construct the graph once outside the training loop, and I'll bet everything gets happy.
Pandas saved me this time, and im already in love with it!
After some learning, there is the working code. Now its fast, aside of the "not that good" prediction accuracy yet.
Heres the code:
import numpy as np
import pandas as pd
import tensorflow as tf
#VARIAVEIS
input_vec_size = 6
layer1_size = 512
fclayer_size = 1024
step_size = 0.02
test_size = 16
train_end = 1905
eval_end = 290
predict_end = 1396
#READS TRAIN FILE
def read_data(directory):
data=pd.read_csv(directory, sep=',',header=None)
return data
#Batch Maker
def get_batch(data, i, data_size):
j = i + (input_vec_size - 1 - data_size)*(i//(data_size - input_vec_size + 1)) + input_vec_size - 1
# print(j, i//(data_size - 5))
features = [(data[1][j] - 4) / 16,
(data[2][j] - data[2][j - 1])*10,
(data[2][j] - data[2][j - 2])*10,
(data[2][j] - data[2][j - 3])*10,
(data[2][j] - data[2][j - 4])*10,
(data[2][j] - data[2][j - 5])*10]
features = np.reshape(features, [-1, input_vec_size])
flow = data[3][j]/1500
flow = np.reshape(flow, [-1,1])
return features, flow
#Inicializaçao de variaveis
def init_weights(shape):
return tf.Variable(tf.random_normal(shape, stddev=0.001))
def init_bias(shape): #inicializa bias
initial = tf.constant(0.001, shape=shape) #variancia 0.1
return tf.Variable(initial)
#Definindo Modelo DNN
def model(X, w_h, w_h2, w_o, B, B2, p_keep_input, p_keep_hidden):
X = tf.nn.dropout(X, p_keep_input)
h = tf.nn.relu(tf.matmul(X, w_h)+B)
h = tf.nn.dropout(h, p_keep_hidden)
h2 = tf.nn.relu(tf.matmul(h, w_h2))
h2 = tf.nn.dropout(h2, p_keep_hidden)
return tf.matmul(h2, w_o)
#PLaceholders
X = tf.placeholder("float", [None, input_vec_size])
Y = tf.placeholder("float", [None, 1])
p_keep_hidden = tf.placeholder("float")
p_keep_input = tf.placeholder("float")
#Estados iniciais das variaveis
w_h = init_weights([input_vec_size, layer1_size])
w_h2= init_weights([layer1_size, fclayer_size])
w_o= init_weights([fclayer_size, 1])
B = init_bias([layer1_size])
B2 = init_bias([fclayer_size])
#Modelo
py_x = model(X, w_h, w_h2, w_o, B, B2, p_keep_input, p_keep_hidden)
#Operaçao de previsão
predict_op = tf.reshape(py_x[0], [-1,1])
#Funçao custo
cost = tf.reduce_mean(tf.square(predict_op - Y))
#Operação de treinamento
train_op = tf.train.AdadeltaOptimizer(step_size).minimize(cost)
#Utilizado para salvar as variaveis apos o treinamento
saver = tf.train.Saver()
with tf.Session() as sess:
tf.initialize_all_variables().run()
directory = '~/PycharmProjects/modelagemELT430/train/G2.csv'
data = read_data(directory)
for i in range(0, 10*(train_end - input_vec_size + 1)):
features, flow = get_batch(data, i, train_end)
# features = sess.run(features)
sess.run(train_op, feed_dict={X: features,
Y: flow, p_keep_input: 0.9, p_keep_hidden: 0.6})
predict_train = sess.run(predict_op,
feed_dict={X: features, p_keep_input: 1.0, p_keep_hidden: 1.0})
train_cost = sess.run(cost, feed_dict={py_x: predict_train, Y: flow})
print("Train Sample:", i, "X:", features, "Y:", flow*1500, "y_:",
predict_train*1500, "Cost:", train_cost)
saver.save(sess, "./model.ckpt")
print('Variaveis salvas com sucesso')
sess.close()
print('=============================Fim do Treinamento=============================')
with tf.Session() as sess:
tf.initialize_all_variables().run()
directory = '~/PycharmProjects/modelagemELT430/eval/G2E.csv'
data = read_data(directory)
print("==============================VALIDAÇAO==================================")
saver.restore(sess, "./model.ckpt")
print("Model restored.")
for i in range(0, eval_end - input_vec_size + 1):
features, flow = get_batch(data, i, eval_end)
predict_eval = sess.run(predict_op,
feed_dict={X: features, p_keep_input: 1.0, p_keep_hidden: 1.0})
eval_cost = sess.run(cost, feed_dict={py_x: predict_eval, Y: flow})
print("Eval Sample:", i, "X:", features, "Y:",flow*1500, "y_:",predict_eval*1500, "Cost:", eval_cost)
sess.close()
print('============================Fim da Validação=================================')
with tf.Session() as sess:
tf.initialize_all_variables().run()
directory = '~/PycharmProjects/modelagemELT430/predict/G2P.csv'
data = read_data(directory)
print("==============================Predição==================================")
saver.restore(sess, "./model.ckpt")
print("Model restored.")
for i in range(0, predict_end - input_vec_size + 1):
features, flow = get_batch(data, i, predict_end)
predict = sess.run(predict_op,
feed_dict={X: features, p_keep_input: 1.0, p_keep_hidden: 1.0})
eval_cost = sess.run(cost, feed_dict={py_x: predict, Y: flow})
print("Predict Sample:", i, "X:", features, "y_:",predict*1500)
sess.close()
print('============================Fim da Prediçao=================================')

Categories

Resources