I am new to tensorflow. I'm creating a simple fully connected neural network for image classification. The image is (-1, 224, 224, 3), and label is (-1, 2). However, the result of my code is that the accuracy does not improve at all; it stays at 47% and does not change - even if changed learning rate, optimizer, and different test set.. Any help will be greatly appreciated! Thank you!
import matplotlib.pyplot as plt
from util.MacOSFile import MacOSFile
import numpy as np
import _pickle as pickle
import tensorflow as tf
def pickle_load(file_path):
with open(file_path, "rb") as f:
return pickle.load(MacOSFile(f))
###hyperparameters###
batch_size = 32
iterations = 10
###loading training data start###
data = pickle_load('training.pickle')
x_train = []
y_train = []
for features, labels in data:
x_train.append(features)
y_train.append(labels)
x_train = np.array(x_train)
y_train = np.array(y_train)
###################################
###loading test data start###
data = pickle_load('testing.pickle')
x_test = []
y_test = []
for features, labels in data:
x_test.append(features)
y_test.append(labels)
x_test = np.array(x_test)
y_test = np.array(y_test)
###################################
###neural network###
x_s = tf.placeholder(tf.float32, [None, 224, 224, 3])
y_s = tf.placeholder(tf.float32, [None, 2])
x_image = tf.reshape(x_s, [-1, 150528])
W_1 = tf.Variable(tf.truncated_normal([150528, 8224]))
b_1 = tf.Variable(tf.zeros([8224]))
h_fc1 = tf.nn.relu(tf.matmul(x_image, W_1) + b_1)
W_2 = tf.Variable(tf.truncated_normal([8224, 1028]))
b_2 = tf.Variable(tf.zeros([1028]))
h_fc2 = tf.nn.relu(tf.matmul(h_fc1, W_2) + b_2)
W_3 = tf.Variable(tf.truncated_normal([1028, 2]))
b_3 = tf.Variable(tf.zeros([2]))
prediction = tf.nn.softmax(tf.matmul(h_fc2, W_3) + b_3)
cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2(labels=y_s, logits=prediction)
train_step = tf.train.GradientDescentOptimizer(0.1).minimize(cross_entropy)
init = tf.global_variables_initializer()
###neural network end###
with tf.Session() as sess:
sess.run(init)
train_sample_size = len(data) #how many data points?
max_batches_in_data = int(train_sample_size/batch_size) #max number of batches possible; 623
for iteration in range(iterations):
print('Iteration ', iteration)
epoch = int(iteration/max_batches_in_data)
start_idx = (iteration-epoch*max_batches_in_data)*batch_size
end_idx = (iteration+1 - epoch*max_batches_in_data)*batch_size
mini_x_train = x_train[start_idx: end_idx]
mini_y_train = y_train[start_idx: end_idx]
##actual training is here
sess.run(train_step, feed_dict={x_s: mini_x_train, y_s: mini_y_train})
#test accuracy#
y_pre = sess.run(prediction, feed_dict={x_s: x_train[:100]})
correct_prediction = tf.equal(tf.argmax(y_pre,1), tf.argmax(y_train[:100], 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
result = sess.run(accuracy, feed_dict={x_s: x_train[:100], y_s: y_train[:100]})
print("Result: {0}".format(result))
I've made a few observations, firstly your code is a bit outdated, you don't have to manually set up fully connected layers, there is something for that:dense layers.
If you load in images, why don't you use convolutional layers as well?
I also recommend the adam optimizer just leave the parameters to their default value. I hope I helped a bit :)
Related
i'm trying to create a neural network model for a kaggle competition using mnist dataset. currently, my code looks like this since i am trying to capture certain metrics. however, i can't seem to figure out how to turn this into an output to submit.
current:
import time
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
import pandas as pd
import numpy as np
from tensorflow.python.framework import ops
ops.reset_default_graph()
#requests.packages.urllib3.disable_warnings()
import ssl
try:
_create_unverified_https_context = ssl._create_unverified_context
except AttributeError:
# Legacy Python that doesn't verify HTTPS certificates by default
pass
else:
# Handle target environment that doesn't support HTTPS verification
ssl._create_default_https_context = _create_unverified_https_context
# Load training and testing data directly from TensorFlow
(X_train, y_train), (X_test, y_test) = tf.keras.datasets.mnist.load_data()
X_train = X_train.astype(np.float32).reshape(-1, 28*28) / 255.0
X_test = X_test.astype(np.float32).reshape(-1, 28*28) / 255.0
y_train = y_train.astype(np.int32)
y_test = y_test.astype(np.int32)
# Initialize metrics
metrics = {}
# Initialize metric names
names = ['Number of Hidden Layers', 'Nodes per Layer', 'Time in Seconds',
'Training Set Accuracy', 'Test Set Accuracy']
# Set fixed parameters
n_epochs = 20
batch_size = 50
learning_rate = 0.01
# Function that creates batch generator used in training
def shuffle_batch(X, y, batch_size):
rnd_idx = np.random.permutation(len(X))
n_batches = len(X) // batch_size
for batch_idx in np.array_split(rnd_idx, n_batches):
X_batch, y_batch = X[batch_idx], y[batch_idx]
yield X_batch, y_batch
# Start timer
start = time.process_time()
n_hidden = 300
# Reset the session
tf.reset_default_graph()
#ops.reset_default_graph()
tf.set_random_seed(2141)
#tf.random.set_seed(2141)
np.random.seed(9347)
# Set X and y placeholders
X = tf.placeholder(tf.float32, shape=(None, 784), name="X")
y = tf.placeholder(tf.int32, shape=(None), name="y")
with tf.name_scope("dnn"):
hidden1 = tf.layers.dense(X, n_hidden, name="hidden1",
activation=tf.nn.relu)
hidden2 = tf.layers.dense(hidden1, n_hidden, name="hidden2",
activation=tf.nn.relu)
logits = tf.layers.dense(hidden2, 10, name="outputs")
y_proba = tf.nn.softmax(logits)
with tf.name_scope("loss"):
xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y,
logits=logits)
loss = tf.reduce_mean(xentropy, name="loss")
with tf.name_scope("train"):
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
training_op = optimizer.minimize(loss)
with tf.name_scope("eval"):
correct = tf.nn.in_top_k(logits, y, 1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
with tf.Session() as sess:
tf.global_variables_initializer().run()
for epoch in range(n_epochs):
for X_batch, y_batch in shuffle_batch(X_train, y_train, batch_size):
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
acc_train = accuracy.eval(feed_dict={X: X_train, y: y_train})
acc_test = accuracy.eval(feed_dict={X: X_test, y: y_test})
# Record the clock time it takes
duration = time.process_time() - start
metrics['Model 1'] = [2, n_hidden, duration, acc_train, acc_test]
# Convert metrics dictionary to dataframe for display
results_summary = pd.DataFrame.from_dict(metrics, orient='index')
results_summary.columns = names
# Sort by model number
results_summary.reset_index(inplace=True)
results_summary.sort_values(by=['index'], axis=0, inplace=True)
results_summary.set_index(['index'], inplace=True)
results_summary.index.name = None
# Export to csv
results_summary.to_csv('results_summary.csv')
results_summary
i need to create an output that looks something like this in csv file:
ImageId Label
0 1 2
1 2 0
2 3 9
3 4 0
4 5 3
would i have to recreate the whole thing in order to actually create "y_pred" when doing something like model.predict(X_test), or can i just reshape the existing code in some way to do this? ideally, i would like to capture predicted values and compare them to true values using a confusion matrix.
i've tried to the following but keep getting errors like AttributeError: module 'tensorflow.compat.v1' has no attribute 'run':
feed_dict = {X: X_test}
classification = tf.run(y_proba, feed_dict)
label = numpy.argmax(classification, axis=-1)
thanks in advance
#Import TsnsorFlow
import tensorflow as tf
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
#Imort data
data = pd.read_csv("C:\\testdata1004.csv")
#Drop data variable
data = data.drop(['DATE'],1)
#Dimensions of dataset
n = data.shape[0]
p = data.shape[1]
#Make data a numpy array
data = data.values
#Training and test data
train_start = 0
train_end = int(np.floor(0.8*n))
test_start = train_end
test_end = n
data_train = data[np.arange(train_start, train_end), :]
data_test = data[np.arange(test_start, test_end), :]
#Scale data
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
scaler.fit(data_train)
data_train = scaler.transform(data_train)
data_test = scaler.transform(data_test)
#Build X and Y
X_train = data_train[:, 1:]
y_train = data_train[:, 0]
X_test = data_test[:, 1:]
y_test = data_test[:, 0]
#Define a and b as placeholders
a = tf.placeholder(dtype=tf.int8)
b = tf.placeholder(dtype=tf.int8)
#Define a and b as placeholders
c = tf.add(a,b)
#Initialize the graph
graph = tf.Session()
#Run the graph
graph.run(c, feed_dict={a: 5, b: 4})
#Model architecture parameters
n_stocks = 500
n_neurons_1 = 1024
n_neurons_2 = 512
n_neurons_3 = 256
n_neurons_4 = 128
n_target = 1
#Placeholder
X = tf.placeholder(dtype=tf.float32, shape=[None, n_stocks])
Y = tf.placeholder(dtype=tf.float32, shape=[None])
#Initializers
sigma = 1
weight_initializer = tf.variance_scaling_initializer(mode="fan_avg",distribution="uniform", scale=sigma)
bias_initializer = tf.zeros_initializer()
#Layer 1: Variables for hidden weights and biases
W_hidden_1 = tf.Variable(weight_initializer([n_stocks,n_neurons_1]))
bias_hidden_1 = tf.Variable(bias_initializer([n_neurons_1]))
#Layer 2: VAriables for hidden weights and biases
W_hidden_2 = tf.Variable(weight_initializer([n_neurons_1,n_neurons_2]))
bias_hidden_2 = tf.Variable(bias_initializer([n_neurons_2]))
#Layer 3: VAriables for hidden weights and biases
W_hidden_3 = tf.Variable(weight_initializer([n_neurons_2,n_neurons_3]))
bias_hidden_3 = tf.Variable(bias_initializer([n_neurons_3]))
#Layer 4: VAriables for hidden weights and biases
W_hidden_4 = tf.Variable(weight_initializer([n_neurons_3,n_neurons_4]))
bias_hidden_4 = tf.Variable(bias_initializer([n_neurons_4]))
#Output layer : Variables for output weights and biases
W_out = tf.Variable(weight_initializer([n_neurons_4,n_target]))
bias_out = tf.Variable(bias_initializer([n_target]))
#Hidden layer
hidden_1 = tf.nn.relu(tf.add(tf.matmul(X, W_hidden_1),bias_hidden_1))
hidden_2 = tf.nn.relu(tf.add(tf.matmul(hidden_1,W_hidden_2),bias_hidden_2))
hidden_3 = tf.nn.relu(tf.add(tf.matmul(hidden_2,W_hidden_3),bias_hidden_3))
hidden_4 = tf.nn.relu(tf.add(tf.matmul(hidden_3,W_hidden_4),bias_hidden_4))
#Output layer (must be transposed)
out = tf.transpose(tf.add(tf.matmul(hidden_4, W_out),bias_out))
#Cost function
mse = tf.reduce_mean(tf.squared_difference(out, Y))
#Optimizer
opt = tf.train.AdamOptimizer().minimize(mse)
#Make Session
net = tf.Session()
#Run initializer
net.run(tf.global_variables_initializer())
#Setup interactive plot
plt.ion()
fig = plt.figure()
ax1 = fig.add_subplot(111)
line1, = ax1.plot(y_test)
line2, = ax1.plot(y_test*0.5)
plt.show()
#Number of epochs and batch size
epochs = 10
batch_size = 256
for e in range(epochs):
#suffle training data
shuffle_indices = np.random.permutation(np.arange(len(y_train)))
X_train = X_train[shuffle_indices]
y_train = y_train[shuffle_indices]
#Minibatch training
for i in range(0, len(y_train) // batch_size) :
start = i * batch_size
batch_x = X_train[start:start + batch_size]
batch_y = y_train[start:start + batch_size]
#Run optimizer with batch
net.run(opt, feed_dict={X:batch_x, Y: batch_y})
#Show progress
if np.mod(i,5) == 0:
#Prediction
pred = net.run(out, feed_dict={X: X_test})
line2.set_ydata(pred)
plt.title('Epoch ' + str(e) + ', Batch ' + str(i))
file_name = 'img/epoch_' + str(e) + '_batch_' + str(i) + '.jpg'
plt.savefig(file_name)
plt.pause(0.01)
#Print final MSE after Training
mse_final = net.run(mse, feed_dict= {X: X_test, Y: y_test})
print(mse_final)
I implemented tensorflow code, but it doesn't work and i got error
this code is just an example, that i have. I dont know what is the reason i got the error and the way I can fix. could you tell me how to fix it? Thank you
I implemented tensorflow code, but it doesn't work and i got error
this code is just an example, that i have. I dont know what is the reason i got the error and the way I can fix. could you tell me how to fix it? Thank you
The None is for batch size, but then you need another dimension for length of the data you input into y. Try changing:
Y = tf.placeholder(dtype=tf.float32, shape=[None,1])
I am using LSTM RNN to detect whether a heart beat is arrhythmic or not. So the output classes are:[0,1] and n_classes=2, but when this code is executed:
# Fit training using batch data
_, loss, acc = sess.run(
[optimizer, cost, accuracy],
feed_dict={
x: batch_xs,
y: batch_ys
}
)
It gives following error
ValueError: Cannot feed value of shape (1, 1) for Tensor 'Placeholder_1:0', which has shape '(?, 2)'
Here is the whole code:
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import tensorflow as tf # Version 1.0.0 (some previous versions are used in past commits)
from sklearn import metrics
import _pickle as cPickle
import os
import pandas as pd
import functions as f
[ml2_train_input,ml2_train_output,ml2_train_peaks,ml2_test_input,ml2_test_output,ml2_test_peaks]=f.get_ml2(0.5)
ml2_train_output=f.get_binary_output(ml2_train_output[:52500])
ml2_test_output=f.get_binary_output(ml2_test_output[:52500])
# Output classes to learn how to classify
LABELS = [0,1 ]
training_data_count = len(ml2_train_input[:52500]) # training series
test_data_count = len(ml2_test_input[:52500]) # testing series
n_input = 360 # 360 input parameters per timestep
# LSTM Neural Network's internal structure
n_hidden = 8 # Hidden layer num of features
n_classes = 2 # Total classes
# Training
learning_rate = 0.005
lambda_loss_amount = 0.0015
training_iters = training_data_count * 10 # Loop 10 times on the dataset
batch_size = 500
display_iter = 1000 # To show test set accuracy during training
X_test=np.array(ml2_test_input[:52500])
y_test=np.array(ml2_test_output[:52500])
# Some debugging info
print("Some useful info to get an insight on dataset's shape and normalisation:")
print("(X shape, y shape, every X's mean, every X's standard deviation)")
print(X_test.shape, y_test.shape, np.mean(X_test), np.std(X_test))
print("The dataset is therefore properly normalised, as expected, but not yet one-hot encoded.")
def LSTM_RNN(_X, _weights, _biases):
# Function returns a tensorflow LSTM (RNN) artificial neural network from given parameters.
# Moreover, two LSTM cells are stacked which adds deepness to the neural network.
# Note, some code of this notebook is inspired from an slightly different
# RNN architecture used on another dataset, some of the credits goes to
# "aymericdamien" under the MIT license.
# (NOTE: This step could be greatly optimised by shaping the dataset once
# input shape: (batch_size, n_steps, n_input)
# permute n_steps and batch_size
# Reshape to prepare input to hidden activation
#_X = tf.reshape(_X, [-1, n_input])
# new shape: (n_steps*batch_size, n_input)
# Linear activation
_X = tf.nn.relu(tf.matmul(_X, _weights['hidden']) + _biases['hidden'])
# Split data because rnn cell needs a list of inputs for the RNN inner loop
_X = tf.split(_X, 500,0)
# new shape: n_steps * (batch_size, n_hidden)
# Define two stacked LSTM cells (two recurrent layers deep) with tensorflow
lstm_cell_1 = tf.contrib.rnn.BasicLSTMCell(n_hidden, forget_bias=1.0, state_is_tuple=True,reuse=None)
lstm_cell_2 = tf.contrib.rnn.BasicLSTMCell(n_hidden, forget_bias=1.0, state_is_tuple=True,reuse=None)
lstm_cells = tf.contrib.rnn.MultiRNNCell([lstm_cell_1, lstm_cell_2], state_is_tuple=True)
# Get LSTM cell output
outputs, states = tf.contrib.rnn.static_rnn(lstm_cells, _X, dtype=tf.float32)
# Get last time step's output feature for a "many to one" style classifier,
# as in the image describing RNNs at the top of this page
lstm_last_output = outputs[-1]
# Linear activation
return tf.matmul(lstm_last_output, _weights['out']) + _biases['out']
def extract_batch_size(_train, step, batch_size):
# Function to fetch a "batch_size" amount of data from "(X|y)_train" data.
shape = list(_train.shape)
shape[0] = batch_size
batch_s = np.empty(shape)
for i in range(batch_size):
# Loop index
index = ((step-1)*batch_size + i) % len(_train)
batch_s[i] = _train[index]
return batch_s
def one_hot(y_):
# Function to encode output labels from number indexes
# e.g.: [[5], [0], [3]] --> [[0, 0, 0, 0, 0, 1], [1, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0]]
y_ = y_.reshape(len(y_))
n_values = int(np.max(y_)) + 1
return np.eye(n_values)[np.array(y_, dtype=np.int32)] # Returns FLOATS
# Graph input/output
x = tf.placeholder(tf.float32, [None, n_input])
y = tf.placeholder(tf.float32, [None, n_classes])
# Graph weights
weights = {
'hidden': tf.Variable(tf.random_normal([n_input, n_hidden])), # Hidden layer weights
'out': tf.Variable(tf.random_normal([n_hidden, n_classes], mean=1.0))
}
biases = {
'hidden': tf.Variable(tf.random_normal([n_hidden])),
'out': tf.Variable(tf.random_normal([n_classes]))
}
pred = LSTM_RNN(x, weights, biases)
# Loss, optimizer and evaluation
l2 = lambda_loss_amount * sum(
tf.nn.l2_loss(tf_var) for tf_var in tf.trainable_variables()
) # L2 loss prevents this overkill neural network to overfit the data
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=pred)) + l2 # Softmax loss
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost) # Adam Optimizer
correct_pred = tf.equal(tf.argmax(pred,1), tf.argmax(y,1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
# To keep track of training's performance
test_losses = []
test_accuracies = []
train_losses = []
train_accuracies = []
# Launch the graph
sess = tf.InteractiveSession(config=tf.ConfigProto(log_device_placement=True))
init = tf.global_variables_initializer()
sess.run(init)
X_train=np.array(ml2_train_input[:52500])
y_train=np.array(ml2_train_output[:52500])
step = 1
while step * batch_size <= training_iters:
batch_xs = extract_batch_size(X_train, step, batch_size)
batch_ys = one_hot(extract_batch_size(y_train, step, batch_size))
# Fit training using batch data
_, loss, acc = sess.run(
[optimizer, cost, accuracy],
feed_dict={
x: batch_xs,
y: batch_ys
}
)
train_losses.append(loss)
train_accuracies.append(acc)
# Evaluate network only at some steps for faster training:
if (step*batch_size % display_iter == 0) or (step == 1) or (step * batch_size > training_iters):
# To not spam console, show training accuracy/loss in this "if"
print("Training iter #" + str(step*batch_size) + \
": Batch Loss = " + "{:.6f}".format(loss) + \
", Accuracy = {}".format(acc))
# Evaluation on the test set (no learning made here - just evaluation for diagnosis)
loss, acc = sess.run(
[cost, accuracy],
feed_dict={
x: X_test,
y: one_hot(y_test)
}
)
test_losses.append(loss)
test_accuracies.append(acc)
print("PERFORMANCE ON TEST SET: " + \
"Batch Loss = {}".format(loss) + \
", Accuracy = {}".format(acc))
step += 1
print("Optimization Finished!")
Please help!
I feel you should convert your Y values to categorical (one-hot encoded) than it should work. So try to convert your Y values to categorical
I just started using tensorflow and I followed the tutorial example on MNIST dataset. It went well, I got like around 90% accuracy.
But after I replace the next_batch with my own version, the result was way worse than it used to be, usually 50%.
Instead of using the data Tensorflow downloaded and parsed, I download the dataset from this website. Using numpy to get what I want.
df = pd.read_csv('mnist_train.csv', header=None)
X = df.drop(0,1)
Y = df[0]
temp = np.zeros((Y.size, Y.max()+1))
temp[np.arange(Y.size),Y] = 1
np.save('X',X)
np.save('Y',temp)
do the same thing to the test data, then following the tutorial, nothing is changed
x = tf.placeholder(tf.float32, shape=[None, 784])
y_ = tf.placeholder(tf.float32, shape=[None, 10])
X = np.load('X.npy')
Y = np.load('Y.npy')
X_test = np.load('X_test.npy')
Y_test = np.load('Y_test.npy')
BATCHES = 1000
W = tf.Variable(tf.truncated_normal([784,10], stddev=0.1))
# W = tf.Variable(tf.zeros([784,10]))
b = tf.Variable(tf.zeros([10]))
y = tf.nn.softmax(tf.matmul(x, W) + b)
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y))
train_step = tf.train.GradientDescentOptimizer(0.05).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
sess = tf.InteractiveSession()
tf.global_variables_initializer().run()
right here is my own get_mini_batch, I shuffle the original data's index, then every time I get 100 data out of it, which seems to be like the exact same thing example code does. The only difference is data I throw away some of the data in the tail.
pos = 0
idx = np.arange(X.shape[0])
np.random.shuffle(idx)
for _ in range(1000):
batch_xs, batch_ys = X[idx[range(pos,pos+BATCHES)],:], Y[idx[range(pos,pos+BATCHES)],]
if pos+BATCHES >= X.shape[0]:
pos = 0
idx = np.arange(X.shape[0])
np.random.shuffle(idx)
pos += BATCHES
sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})
print(sess.run(accuracy, feed_dict={x: X_test, y_: Y_test}))
It confuses me why my version is way worse than the tutorial one.
Like lejilot said, we should normalize the data before we push it into the neural network.
See this post
I am new to TensorFlow and neural networks. I am trying to build a neural network that can classify images in the CIFAR-10 dataset.
Here is my code:
import tensorflow as tf
import pickle
import numpy as np
import random
image_size= 32*32*3 # because 3 channels
n_classes = 10
lay1_size = 50
batch_size = 100
def unpickle(filename):
with open(filename,'rb') as f:
data = pickle.load(f, encoding='latin1')
x = data['data']
y = data['labels']
# shuffle the data
z = list(zip(x,y))
random.shuffle(z)
x, y = zip(*z)
x = x[:batch_size]
y = y[:batch_size]
# covert decimals to one hot arrays
y = np.eye(n_classes)[[y]]
return x, y
# set up network
def add_layer(inputs, in_size, out_size, activation_function=None):
W = tf.Variable(tf.random_normal([in_size, out_size]), dtype=tf.float32)
b = tf.Variable(tf.zeros([1,out_size]) + 0.1, dtype=tf.float32)
Wx_plus_b = tf.matmul(inputs, W) + b
if activation_function is None:
output = Wx_plus_b
else:
output = activation_function(Wx_plus_b)
return output
def compute_accuracy(v_xs, v_ys):
global prediction
y_pre = sess.run(prediction, feed_dict={xs:v_xs})
correct_prediction = tf.equal(tf.argmax(y_pre,1), tf.argmax(v_ys, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
result = sess.run(accuracy, feed_dict={xs:v_xs, ys:v_ys})
return result
xs = tf.placeholder(tf.float32, [None,image_size])
ys = tf.placeholder(tf.float32)
lay1 = add_layer(xs, image_size, lay1_size, activation_function=tf.nn.tanh)
lay2 = add_layer(lay1, lay1_size, lay1_size, activation_function=tf.nn.tanh)
prediction = add_layer(lay2, lay1_size, n_classes, activation_function=tf.nn.softmax)
cross_entropy = tf.reduce_mean(-tf.reduce_sum(ys*tf.log(prediction), reduction_indices=[1]))
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
#train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
# run network
sess = tf.Session()
sess.run(tf.initialize_all_variables())
x_test, y_test = unpickle('test_batch')
for i in range(1000):
x_train, y_train = unpickle('data_batch_1')
sess.run(train_step, feed_dict={xs:x_train,ys:y_train})
if i % 50 == 0:
print(compute_accuracy(x_test, y_test))
sess.close()
I am using two hidden layers with 50 nodes in each layer. I am running 1,000 cycles, where in each cycle I shuffle data in the dataset and pick the first 100 images of that shuffle to train on.
I am consistently getting ~0.1 accuracy, the machine is not learning at all.
When I modify the code to use the MNIST dataset instead of the CIFAR-10 dataset I get ~0.87 accuracy.
I took code from an MNIST tutorial and am trying to modify it to classify CIFAR-10 data.
I can't figure out what's wrong here. How do I get my algorithm to learn?