I built a neural network with tensorflow. It is a simple 3 layer neural network with the last layer being softmax.
I tried it on standard adult income dataset (e.g. https://archive.ics.uci.edu/ml/datasets/adult) since it is publicly available, has a good amount of data (roughly 50k examples) and also provides separate test data.
As there are some categorical attributes, I converted them into one hot encodings. For neural network I used Xavier initialization and Adam Optimizer. As there are only two output classes (>50k and <=50k) the last softmax layer had only two neurons. After one hot encoding expansion, the 14 attributes / columns expanded into 108 columns.
I experimented with different number of neurons in the first two hidden layers (from 5 to 25). I also experimented with number of iterations (from 1000 to 20000).
The training accuracy wasn't affected much by the number of neurons. It went up a little with more number of iterations. However I could not do any better than 82% :(
Am I missing something basic in my approach? Has anyone tried this (neural network with this dataset)? If so what are the expected results? Could the low accuracy be due to missing values? (I am planning to try filtering out all the missing values if there aren't much in the dataset).
Any other ideas? Here is my tensorflow neural network code in case there are any bugs in it etc.
def create_placeholders(n_x, n_y):
X = tf.placeholder(tf.float32, [n_x, None], name = "X")
Y = tf.placeholder(tf.float32, [n_y, None], name = "Y")
return X, Y
def initialize_parameters(num_features):
tf.set_random_seed(1) # so that your "random" numbers match ours
layer_one_neurons = 5
layer_two_neurons = 5
layer_three_neurons = 2
W1 = tf.get_variable("W1", [layer_one_neurons,num_features], initializer = tf.contrib.layers.xavier_initializer(seed = 1))
b1 = tf.get_variable("b1", [layer_one_neurons,1], initializer = tf.zeros_initializer())
W2 = tf.get_variable("W2", [layer_two_neurons,layer_one_neurons], initializer = tf.contrib.layers.xavier_initializer(seed = 1))
b2 = tf.get_variable("b2", [layer_two_neurons,1], initializer = tf.zeros_initializer())
W3 = tf.get_variable("W3", [layer_three_neurons,layer_two_neurons], initializer = tf.contrib.layers.xavier_initializer(seed = 1))
b3 = tf.get_variable("b3", [layer_three_neurons,1], initializer = tf.zeros_initializer())
parameters = {"W1": W1,
"b1": b1,
"W2": W2,
"b2": b2,
"W3": W3,
"b3": b3}
return parameters
def forward_propagation(X, parameters):
"""
Implements the forward propagation for the model: LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SOFTMAX
Arguments:
X -- input dataset placeholder, of shape (input size, number of examples)
parameters -- python dictionary containing your parameters "W1", "b1", "W2", "b2", "W3", "b3"
the shapes are given in initialize_parameters
Returns:
Z3 -- the output of the last LINEAR unit
"""
# Retrieve the parameters from the dictionary "parameters"
W1 = parameters['W1']
b1 = parameters['b1']
W2 = parameters['W2']
b2 = parameters['b2']
W3 = parameters['W3']
b3 = parameters['b3']
Z1 = tf.add(tf.matmul(W1, X), b1)
A1 = tf.nn.relu(Z1)
Z2 = tf.add(tf.matmul(W2, A1), b2)
A2 = tf.nn.relu(Z2)
Z3 = tf.add(tf.matmul(W3, A2), b3)
return Z3
def compute_cost(Z3, Y):
"""
Computes the cost
Arguments:
Z3 -- output of forward propagation (output of the last LINEAR unit), of shape (6, number of examples)
Y -- "true" labels vector placeholder, same shape as Z3
Returns:
cost - Tensor of the cost function
"""
# to fit the tensorflow requirement for tf.nn.softmax_cross_entropy_with_logits(...,...)
logits = tf.transpose(Z3)
labels = tf.transpose(Y)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = logits, labels = labels))
return cost
def model(X_train, Y_train, X_test, Y_test, learning_rate = 0.0001, num_epochs = 1000, print_cost = True):
"""
Implements a three-layer tensorflow neural network: LINEAR->RELU->LINEAR->RELU->LINEAR->SOFTMAX.
Arguments:
X_train -- training set, of shape (input size = 12288, number of training examples = 1080)
Y_train -- test set, of shape (output size = 6, number of training examples = 1080)
X_test -- training set, of shape (input size = 12288, number of training examples = 120)
Y_test -- test set, of shape (output size = 6, number of test examples = 120)
learning_rate -- learning rate of the optimization
num_epochs -- number of epochs of the optimization loop
print_cost -- True to print the cost every 100 epochs
Returns:
parameters -- parameters learnt by the model. They can then be used to predict.
"""
ops.reset_default_graph() # to be able to rerun the model without overwriting tf variables
tf.set_random_seed(1) # to keep consistent results
seed = 3 # to keep consistent results
(n_x, m) = X_train.shape # (n_x: input size, m : number of examples in the train set)
n_y = Y_train.shape[0] # n_y : output size
costs = [] # To keep track of the cost
# Create Placeholders of shape (n_x, n_y)
X, Y = create_placeholders(n_x, n_y)
# Initialize parameters
parameters = initialize_parameters(X_train.shape[0])
# Forward propagation: Build the forward propagation in the tensorflow graph
Z3 = forward_propagation(X, parameters)
# Cost function: Add cost function to tensorflow graph
cost = compute_cost(Z3, Y)
# Backpropagation: Define the tensorflow optimizer. Use an AdamOptimizer.
optimizer = tf.train.AdamOptimizer(learning_rate = learning_rate).minimize(cost)
# Initialize all the variables
init = tf.global_variables_initializer()
# Start the session to compute the tensorflow graph
with tf.Session() as sess:
# Run the initialization
sess.run(init)
# Do the training loop
for epoch in range(num_epochs):
_ , epoch_cost = sess.run([optimizer, cost], feed_dict={X: X_train, Y: Y_train})
# Print the cost every epoch
if print_cost == True and epoch % 100 == 0:
print ("Cost after epoch %i: %f" % (epoch, epoch_cost))
if print_cost == True and epoch % 5 == 0:
costs.append(epoch_cost)
# plot the cost
plt.plot(np.squeeze(costs))
plt.ylabel('cost')
plt.xlabel('iterations (per tens)')
plt.title("Learning rate =" + str(learning_rate))
plt.show()
# lets save the parameters in a variable
parameters = sess.run(parameters)
print ("Parameters have been trained!")
# Calculate the correct predictions
correct_prediction = tf.equal(tf.argmax(Z3), tf.argmax(Y))
# Calculate accuracy on the test set
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
print ("Train Accuracy:", accuracy.eval({X: X_train, Y: Y_train}))
#print ("Test Accuracy:", accuracy.eval({X: X_test, Y: Y_test}))
return parameters
import math
import numpy as np
import h5py
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.python.framework import ops
import pandas as pd
%matplotlib inline
np.random.seed(1)
df = pd.read_csv('adult.data', header = None)
X_train_orig = df.drop(df.columns[[14]], axis=1, inplace=False)
Y_train_orig = df[[14]]
X_train = pd.get_dummies(X_train_orig) # get one hot encoding
Y_train = pd.get_dummies(Y_train_orig) # get one hot encoding
parameters = model(X_train.T, Y_train.T, None, None, num_epochs = 10000)
Any suggestions for other publicly available dataset for trying this out?
I tried standard algorithms on this dataset from scikit learn with default parameters and I got following accuracies:
Random Forest: 86
SVM: 96
kNN: 83
MLP: 79
I have uploaded my iPython notebook for this at: https://github.com/sameermahajan/ClassifiersWithIncomeData/blob/master/Scikit%2BLearn%2BClassifiers.ipynb
The best accuracy is with SVM which can be expected from some explanation that can be seen from: http://scikit-learn.org/stable/auto_examples/classification/plot_classifier_comparison.html Interestingly SVM also took a lot of time to run, way more than any other method.
This may not be a good problem to be solved by neural network looking at MLPClassifier accuracy above. My neural network wasn't that bad after all! Thanks for all the responses and your interest in this.
I didn't experiment on this dataset but after looking at some papers and doing some researches, it looks like your network is doing ok.
First is your accuracy calculed from the training set or the test set ? Having both will give you a good hint of how your network is performing.
I'm still a bit new to machine learning but I can maybe give some help :
By looking at the data documentation link here : https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.names
And this paper : https://cseweb.ucsd.edu/classes/wi17/cse258-a/reports/a120.pdf
From those links 85% accuracy on training and test set looks like a good score, you are not too far.
Do you have some kind of cross-validation to look for overfitting of your network ?
I don't have your code so can't help you if this is a bug or a programming related issue, maybe sharing your code might be a good idea.
I think you would gain more accuracy by pre-processing your data a bit :
There are a lot of unknowns inside your data and neural networks are very sensitive to mislabeling and bad data.
You should try to find and replace or remove the unknowns.
You could also try to identify the most useful features and drop the ones that are near useless.
Feature scaling / data normalization can also be quite important for neural networks, i didn't look much into the data but maybe you can try to figure out how to scale your data between [0, 1] if its not done already.
The document I linked you seems to see an upgrade in performance by adding layers up to 5 layers, did you try adding more layers ?
You can also add dropout if you network overfits, if you didn't already.
I would maybe try other networks that are generally good for those tasks like SVM (Support Vector Machine) or Logistic Regression or even Random Forest but not sure by looking at the result that those will perform better than the artificial neural network.
I would also take a look at those links : https://www.kaggle.com/wenruliu/adult-income-dataset/feed
https://www.kaggle.com/wenruliu/income-prediction
In this link there are some people trying algorithms and giving tips to process the data and tackle this subject.
Hope it helped
Good luck,
Marc.
I think you are focusing too much in your network structure and you are forgetting that your results also depend largely on the data quality. I have tried a quick out-of-the-shelf random forest and it gave me similar results as you got (acc = 0.8275238).
I suggest you do some feature engineering (the kaggle link provided by #Marc has some nice examples). Decide an strategy for your NA's (look here), group values when you have many factor levels in categorical variables (e.g. countries grouped into continents) or discretise continuous variables (age variable into levels as in old, mid_aged, young).
Play with your data, study your dataset and try to apply expertise to remove redundant or too narrow info. Once this is done, then start tweaking your model. Additionally, you can consider doing as I did: use ensemble models (which are usually fast and pretty accurate with the default values) like RF or XGB to check if the results are consistent between all your models. Once you are sure you are in the right track, you can start tweaking structure, layers, etc. and see if you can push your results ever further.
Hope this helps.
Good luck!
Related
I want to train a network on the isolet dataset, consisting of 6238 samples with 300 features each.
This is my code so far:
import tensorflow as tf
import sklearn.preprocessing as prep
import numpy as np
import matplotlib.pyplot as plt
def main():
X, C, Xtst, Ctst = load_isolet()
#normalize
#X = (X - np.mean(X, axis = 1)[:, np.newaxis]) / np.std(X, axis = 1)[:, np.newaxis]
#Xtst = (Xtst - np.mean(Xtst, axis = 1)[:, np.newaxis]) / np.std(Xtst, axis = 1)[:, np.newaxis]
scaler = prep.MinMaxScaler(feature_range=(0,1))
scaledX = scaler.fit_transform(X)
scaledXtst = scaler.transform(Xtst)
# Build the tf.keras.Sequential model by stacking layers. Choose an optimizer and loss function for training:
model = tf.keras.models.Sequential([
tf.keras.layers.Dense(X.shape[1], activation='relu'),
tf.keras.layers.Dense(64, activation='relu'),
tf.keras.layers.Dense(26, activation='softmax')
])
ES_callback = tf.keras.callbacks.EarlyStopping(monitor='loss', min_delta=1e-2, patience=10, verbose=1)
initial_learning_rate = 0.01
lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(initial_learning_rate,decay_steps=100000,decay_rate=0.9999,staircase=True)
optimizer = tf.keras.optimizers.Adam(learning_rate=lr_schedule)
model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy'])
history = model.fit(scaledX, C, epochs=100, callbacks=[ES_callback], batch_size = 32)
plt.figure(1)
plt.plot(range(len(history.history['loss'])), history.history['loss']);
plt.plot(range(len(history.history['accuracy'])), history.history['accuracy']);
plt.show()
Up to now, I have pretty much turned every knob I know:
different number of layers
different sizes of layers
different activation functions
different learning rates
different optimizers (we should test with 'adam' and 'stochastic gradient decent'
different batch sizes
different data preparations (the features range from -1 to 1 values. I tried normalizing along the feature axes, batch normalizing (z_i = (x_i - mean) / std(x_i)) and as seen in the code above scaling the values from 0 to 1 (since I guess 'relu' activation won't work well with negative input values)
Pretty much everything I tried gives weird outputs with extremely high loss values (depending on the learning rate) and very low accuracies during learning. The loss is increasing over epochs pretty much all of the time, but seems to be independent from the accuracy values.
For the code, I followed tutorials I got provided, however something is very off, since I should find the best hyper parameters, but I'm not able to find any good whatsoever.
I'd be very glad to get some points, where got the code wrong or need to preprocess the data differently.
Edit: Using loss='categorical_crossentropy'was given, so at least this one should be correct.
first of all:
Your convergence problems may be due to "incorrect" loss function. tf.keras supports a variety of losses that depend on the shape of your input labels.
Try different possibilities like
tf.keras.losses.SparseCategoricalCrossentropy if your labels are one-hot vectors.
tf.keras.losses.CategoricalCrossentropy if your lables are 1,2,3...
or tf.keras.losses.BinaryCrossentropy if your labels are just 0,1.
Honestly, this part of tf.keras is a bit tricky and some settings like that might need tuning.
Second of all - this part:
scaler = prep.MinMaxScaler(feature_range=(0,1))
scaledX = scaler.fit_transform(X)
scaledXtst = scaler.fit_transform(Xtst)
assuming Xtst is your test set you want to scale it based on your training set. So the correct scaling would be just
scaledXtst = scaler.transform(Xtst)
Hope this helps!
I want to run some experiments with neural networks using PyTorch, so I tried a simple one as a warm-up exercise, and I cannot quite make sense of the results.
The exercise attempts to predict the rating of 1000 TPTP problems from various statistics about the problems such as number of variables, maximum clause length etc. Data file https://github.com/russellw/ml/blob/master/test.csv is quite straightforward, 1000 rows, the final column is the rating, started off with some tens of input columns, with all the numbers scaled to the range 0-1, I progressively deleted features to see if the result still held, and it does, all the way down to one input column; the others are in previous versions in Git history.
I started off using separate training and test sets, but have set aside the test set for the moment, because the question about whether training performance generalizes to testing, doesn't arise until training performance has been obtained in the first place.
Simple linear regression on this data set has a mean squared error of about 0.14.
I implemented a simple feedforward neural network, code in https://github.com/russellw/ml/blob/master/test_nn.py and copied below, that after a couple hundred training epochs, also has an mean squared error of 0.14.
So I tried changing the number of hidden layers from 1 to 2 to 3, using a few different optimizers, tweaking the learning rate, switching the activation functions from relu to tanh to a mixture of both, increasing the number of epochs to 5000, increasing the number of hidden units to 1000. At this point, it should easily have had the ability to just memorize the entire data set. (At this point I'm not concerned about overfitting. I'm just trying to get the mean squared error on training data to be something other than 0.14.) Nothing made any difference. Still 0.14. I would say it must be stuck in a local optimum, but that's not supposed to happen when you've got a couple million weights; it's supposed to be practically impossible to be in a local optimum for all parameters simultaneously. And I do get slightly different sequences of numbers on each run. But it always converges to 0.14.
Now the obvious conclusion would be that 0.14 is as good as it gets for this problem, except that it stays the same even when the network has enough memory to just memorize all the data. But the clincher is that I also tried a random forest, https://github.com/russellw/ml/blob/master/test_rf.py
... and the random forest has a mean squared error of 0.01 on the original data set, degrading gracefully as features are deleted, still 0.05 on the data with just one feature.
Nowhere in the lore of machine learning is it said 'random forests vastly outperform neural nets', so I'm presumably doing something wrong, but I can't see what it is. Maybe it's something as simple as just missing a flag or something you need to set in PyTorch. I would appreciate it if someone could take a look.
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
# data
df = pd.read_csv("test.csv")
print(df)
print()
# separate the output column
y_name = df.columns[-1]
y_df = df[y_name]
X_df = df.drop(y_name, axis=1)
# numpy arrays
X_ar = np.array(X_df, dtype=np.float32)
y_ar = np.array(y_df, dtype=np.float32)
# torch tensors
X_tensor = torch.from_numpy(X_ar)
y_tensor = torch.from_numpy(y_ar)
# hyperparameters
in_features = X_ar.shape[1]
hidden_size = 100
out_features = 1
epochs = 500
# model
class Net(nn.Module):
def __init__(self, hidden_size):
super(Net, self).__init__()
self.L0 = nn.Linear(in_features, hidden_size)
self.N0 = nn.ReLU()
self.L1 = nn.Linear(hidden_size, hidden_size)
self.N1 = nn.Tanh()
self.L2 = nn.Linear(hidden_size, hidden_size)
self.N2 = nn.ReLU()
self.L3 = nn.Linear(hidden_size, 1)
def forward(self, x):
x = self.L0(x)
x = self.N0(x)
x = self.L1(x)
x = self.N1(x)
x = self.L2(x)
x = self.N2(x)
x = self.L3(x)
return x
model = Net(hidden_size)
criterion = nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.1)
# train
print("training")
for epoch in range(1, epochs + 1):
# forward
output = model(X_tensor)
cost = criterion(output, y_tensor)
# backward
optimizer.zero_grad()
cost.backward()
optimizer.step()
# print progress
if epoch % (epochs // 10) == 0:
print(f"{epoch:6d} {cost.item():10f}")
print()
output = model(X_tensor)
cost = criterion(output, y_tensor)
print("mean squared error:", cost.item())
can you please print the shape of your input ?
I would say check those things first:
that your target y have the shape (-1, 1) I don't know if pytorch throws an Error in this case. you can use y.reshape(-1, 1) if it isn't 2 dim
your learning rate is high. usually when using Adam the default value is good enough or try simply to lower your learning rate. 0.1 is a high value for a learning rate to start with
place the optimizer.zero_grad at the first line inside the for loop
normalize/standardize your data ( this is usually good for NNs )
remove outliers in your data (my opinion: I think this can't affect Random forest so much but it can affect NNs badly)
use cross validation (maybe skorch can help you here. It's a scikit learn wrapper for pytorch and easy to use if you know keras)
Notice that Random forest regressor or any other regressor can outperform neural nets in some cases. There is some fields where neural nets are the heros like Image Classification or NLP but you need to be aware that a simple regression algorithm can outperform them. Usually when your data is not big enough.
I am a deep learning and Tensorflow beginner and I am trying to implement the algorithm in this paper using Tensorflow. This paper uses Matconvnet+Matlab to implement it, and I am curious if Tensorflow has the equivalent functions to achieve the same thing. The paper said:
The network parameters were initialized using the Xavier method [14]. We used the regression loss across four wavelet subbands under l2 penalty and the proposed network was trained by using the stochastic gradient descent (SGD). The regularization parameter (λ) was 0.0001 and the momentum was 0.9. The learning rate was set from 10−1 to 10−4 which was reduced in log scale at each epoch.
This paper uses wavelet transform (WT) and residual learning method (where the residual image = WT(HR) - WT(HR'), and the HR' are used for training). Xavier method suggests to initialize the variables normal distribution with
stddev=sqrt(2/(filter_size*filter_size*num_filters)
Q1. How should I initialize the variables? Is the code below correct?
weights = tf.Variable(tf.random_normal[img_size, img_size, 1, num_filters], stddev=stddev)
This paper does not explain how to construct the loss function in details . I am unable to find the equivalent Tensorflow function to set the learning rate in log scale (only exponential_decay). I understand MomentumOptimizer is equivalent to Stochastic Gradient Descent with momentum.
Q2: Is it possible to set the learning rate in log scale?
Q3: How to create the loss function described above?
I followed this website to write the code below. Assume model() function returns the network mentioned in this paper and lamda=0.0001,
inputs = tf.placeholder(tf.float32, shape=[None, patch_size, patch_size, num_channels])
labels = tf.placeholder(tf.float32, [None, patch_size, patch_size, num_channels])
# get the model output and weights for each conv
pred, weights = model()
# define loss function
loss = tf.nn.softmax_cross_entropy_with_logits_v2(labels=labels, logits=pred)
for weight in weights:
regularizers += tf.nn.l2_loss(weight)
loss = tf.reduce_mean(loss + 0.0001 * regularizers)
learning_rate = tf.train.exponential_decay(???) # Not sure if we can have custom learning rate for log scale
optimizer = tf.train.MomentumOptimizer(learning_rate, momentum).minimize(loss, global_step)
NOTE: As I am a deep learning/Tensorflow beginner, I copy-paste code here and there so please feel free to correct it if you can ;)
Q1. How should I initialize the variables? Is the code below correct?
Use tf.get_variable or switch to slim (it does the initialization automatically for you). example
Q2: Is it possible to set the learning rate in log scale?
You can but do you need it? This is not the first thing that you need to solve in this network. Please check #3
However, just for reference, use following notation.
learning_rate_node = tf.train.exponential_decay(learning_rate=0.001, decay_steps=10000, decay_rate=0.98, staircase=True)
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate_node).minimize(loss)
Q3: How to create the loss function described above?
At first, you have not written "pred" to "image" conversion to this message(Based on the paper you need to apply subtraction and IDWT to obtain final image).
There is one problem here, logits have to be calculated based on your label data. i.e. if you will use marked data as "Y : Label", you need to write
pred = model()
pred = tf.matmul(pred, weights) + biases
logits = tf.nn.softmax(pred)
loss = tf.reduce_mean(tf.abs(logits - labels))
This will give you the output of Y : Label to be used
If your dataset's labeled images are denoised ones, in this case you need to follow this one:
pred = model()
pred = tf.matmul(image, weights) + biases
logits = tf.nn.softmax(pred)
image = apply_IDWT("X : input", logits) # this will apply IDWT(x_label - y_label)
loss = tf.reduce_mean(tf.abs(image - labels))
Logits are the output of your network. You will use this one as result to calculate the rest. Instead of matmul, you can add a conv2d layer in here without a batch normalization and an activation function and set output feature count as 4. Example:
pred = model()
pred = slim.conv2d(pred, 4, [3, 3], activation_fn=None, padding='SAME', scope='output')
logits = tf.nn.softmax(pred)
image = apply_IDWT("X : input", logits) # this will apply IDWT(x_label - y_label)
loss = tf.reduce_mean(tf.abs(logits - labels))
This loss function will give you basic training capabilities. However, this is L1 distance and it may suffer from some issues (check). Think following situation
Let's say you have following array as output [10, 10, 10, 0, 0] and you try to achieve [10, 10, 10, 10, 10]. In this case, your loss is 20 (10 + 10). However, you have 3/5 success. Also, it may indicate some overfit.
For same case, think following output [6, 6, 6, 6, 6]. It still has loss of 20 (4 + 4 + 4 + 4 + 4). However, whenever you apply threshold of 5, you can achieve 5/5 success. Hence, this is the case that we want.
If you use L2 loss, for the first case, you will have 10^2 + 10^2 = 200 as loss output. For the second case, you will get 4^2 * 5 = 80.
Hence, optimizer will try to run away from #1 as quick as possible to achieve global success rather than perfect success of some outputs and complete failure of the others. You can apply loss function like this for that.
tf.reduce_mean(tf.nn.l2_loss(logits - image))
Alternatively, you can check for cross entropy loss function. (it does apply softmax internally, do not apply softmax twice)
tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, image))
Q1. How should I initialize the variables? Is the code below correct?
That's correct (although missing an opening parentheses). You could also look into tf.get_variable if the variables are going to be reused.
Q2: Is it possible to set the learning rate in log scale?
Exponential decay decreases the learning rate at every step. I think what you want is tf.train.piecewise_constant, and set boundaries at each epoch.
EDIT: Look at the other answer, use the staircase=True argument!
Q3: How to create the loss function described above?
Your loss function looks correct.
Other answers are very detailed and helpful. Here is a code example that uses placeholder to decay learning rate at log scale. HTH.
import tensorflow as tf
import numpy as np
# data simulation
N = 10000
D = 10
x = np.random.rand(N, D)
w = np.random.rand(D,1)
y = np.dot(x, w)
print y.shape
#modeling
batch_size = 100
tni = tf.truncated_normal_initializer()
X = tf.placeholder(tf.float32, [batch_size, D])
Y = tf.placeholder(tf.float32, [batch_size,1])
W = tf.get_variable("w", shape=[D,1], initializer=tni)
B = tf.zeros([1])
lr = tf.placeholder(tf.float32)
pred = tf.add(tf.matmul(X,W), B)
print pred.shape
mse = tf.reduce_sum(tf.losses.mean_squared_error(Y, pred))
opt = tf.train.MomentumOptimizer(lr, 0.9)
train_op = opt.minimize(mse)
learning_rate = 0.0001
do_train = True
acc_err = 0.0
sess = tf.Session()
sess.run(tf.global_variables_initializer())
while do_train:
for i in range (100000):
if i > 0 and i % N == 0:
# epoch done, decrease learning rate by 2
learning_rate /= 2
print "Epoch completed. LR =", learning_rate
idx = i/batch_size + i%batch_size
f = {X:x[idx:idx+batch_size,:], Y:y[idx:idx+batch_size,:], lr: learning_rate}
_, err = sess.run([train_op, mse], feed_dict = f)
acc_err += err
if i%5000 == 0:
print "Average error = {}".format(acc_err/5000)
acc_err = 0.0
Closed. This question needs to be more focused. It is not currently accepting answers.
Want to improve this question? Update the question so it focuses on one problem only by editing this post.
Closed 5 years ago.
Improve this question
By Following the tutorials on Tensorflow and by reading some basic stuff about neural networks, I have modeled a neural network using python and Tensorflow libraries.
As of now,my ".csv" file data is as follows:
AT V AP RH PE
14.96 41.76 1024.07 73.17 463.26
25.18 62.96 1020.04 59.08 444.37
5.11 39.4 1012.16 92.14 488.56
20.86 57.32 1010.24 76.64 446.48
10.82 37.5 1009.23 96.62 473.9
26.27 59.44 1012.23 58.77 443.67
15.89 43.96 1014.02 75.24 467.35
9.48 44.71 1019.12 66.43 478.42
14.64 45 1021.78 41.25 475.98
.....................................
As of now,I have designed my neural network to function for multiple inputs and multiple outputs. In the above data,I am considering first three columns as my inputs and next 2 columns as my outputs.So,once I am training the data,If I pass the inputs 14.64,45,1021.78 ,I want my neural network to predict the output values 41.25 and 475.98.
Here is my current code:
import tensorflow as tf
import numpy as np
import pandas as pd
#import matplotlib.pyplot as plt
rng = np.random
# Parameters
learning_rate = 0.01
training_epochs = 5000
display_step = 1000
batch_size = 100
# Read data from CSV
df = pd.read_csv("H:\MiniThessis\Sample.csv")
# In[173]:
# Seperating out dependent & independent variable
train_x = df[['AT','V','AP']]
train_y = df[['RH','PE']]
trainx = train_x.as_matrix().astype(np.float32)
trainy = train_y.as_matrix().astype(np.float32)
n_input = 3
n_classes = 2
n_hidden_1 = 20
n_hidden_2 = 20
n_samples = len(trainx)
# tf Graph Input
#Inserts a placeholder for a tensor that will be always fed.
x = tf.placeholder(tf.float32, [None, n_input])
y = tf.placeholder(tf.float32, [None, n_classes])
# Set model weights
W_h1 = tf.Variable(tf.random_normal([n_input, n_hidden_1]))
W_h2 = tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2]))
W_out = tf.Variable(tf.random_normal([n_hidden_2, n_classes]))
b_h1 = tf.Variable(tf.zeros([n_hidden_1]))
b_h2 = tf.Variable(tf.zeros([n_hidden_2]))
b_out = tf.Variable(tf.zeros([n_classes]))
# In[175]:
# Construct a linear model
layer_1 = tf.add(tf.matmul(x, W_h1), b_h1)
layer_1 = tf.nn.relu(layer_1)
layer_2 = tf.add(tf.matmul(layer_1, W_h2), b_h2)
layer_2 = tf.nn.relu(layer_2)
out_layer = tf.matmul(layer_2, W_out) + b_out
# In[176]:
# Mean squared error
#cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=out_layer, labels=y))
cost = tf.reduce_mean(tf.pow(out_layer-y, 2))/(2*n_samples)
#cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=out_layer, labels=y))
# Gradient descent
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost)
# In[177]:
# Initializing the variables
init = tf.global_variables_initializer()
# In[181]:
initialval = 0
finalval = 100
batchcount = int(n_samples/100)
remainder = n_samples%100
print(remainder)
if remainder!=0:
batchcount = batchcount +1
# Launch the graph
with tf.Session() as sess:
sess.run(init)
# Fit all training data
for epoch in range(training_epochs):
avg_cost = 0.
for batchIdx in range(batchcount):
if remainder != 0 and batchIdx==batchcount-1:
finalval = finalval-(100-remainder)
subtrainx = trainx[initialval:finalval]
subtrainy = trainy[initialval:finalval]
_, c = sess.run([optimizer, cost], feed_dict={x: subtrainx,y: subtrainy})
initialval = initialval+100
finalval = finalval+100
avg_cost += c/batchcount
# Display logs per epoch step
if epoch % display_step == 0:
print("Epoch:", '%04d' % (epoch+1), "cost=", \
"{:.9f}".format(avg_cost))
#print("Optimization Finished!")
#training_cost = sess.run(cost, feed_dict={x: trainx,y: trainy})
#print(training_cost)
best = sess.run([out_layer], feed_dict={x: np.array([[14.96,41.76,1024.07]])})
print(best)
The architecture of my neural network is as follows:
1) No. of input nodes(n_inputs) and output(n_classes) nodes are 3 and 2 respectively
2)As of now,I am considering two hidden layers ,each having 20 nodes
I need help regarding in following points:
1) How do I select the parameters "training_epoch" ; "learning_rate" ; "batch_size" here,so that I can have a better accuracy?
2)I am still not sure about the architecture of my neural network. How many hidden layers is it recommended to use? and also, the number of nodes in each hidden layer?
3)If suppose,in my data,I want to use first two columns as my inputs and next three columns as outputs,then what are the changes,I can make? Do I also need to change my complete architecture?
4)Also,I am not sure about my cost function.Which one is better to use for better accuracy?
5) Also,please let me know,if I am missing some important parameter,which is worth considering!
Thanks in advance!
The questions you are asking are quite general, and if a universal answer to those would be discovered, then the data scientists would cease to exist, since the process of machine learning system construction would then be automated.
1,2,4. The learning rate and epoch count are correspondingly: "the less the better" and "the more the better". In practice however, you also need your machine to finish education before the heat death of universe sets in.
Usually, you look at the values of the error function that is your network score on the dataset you train it on. At first network is learning from data and score gets better with each epoch. After a while, the network learns all it could from the dataset and score stops improving. After that there's no point of continuing. You may also quit earlier if the error is small enough for your uses.
The learning rate can also be chosen on the basis of the evolution of the error. The higher the rate is, the faster the network will finish learning, but if it is too high, the network can overshoot the optimal value of the parameters an get worse and worse over time. So you need to find a compromise between the speed of learning and the accuracy. It is not uncommon to reduce the learning rate as the epoch counter increases.
You've also used the word "accuracy" but you need to first define what it means in terms of your problem. (It is tightly related with the cost function, actually.)
The problem you've described falls into the class of linear\nonlinear regression (as opposed to logistic regression), for these problems the mean square error (which is used in your code) is customary. Those cross-entropy criteria are more suited for classification problems, usually. If you have some domain expertise about your data you of course may devise specialized cost functions for your network.
There is no single answer about the best structure of the network. It's complexity depends on the function you are trying to approximate. If the connection between input and output data is linear, then you don't need anything more than a single neuron.
Generally, the task of deciding on the structure of a network is an optimization problem itself. So, you first set aside some test data with answers, then train your network on the rest of data, then see how well it performs on the test data, which it has not seen before. You test network, you modify it, train it, and test again. Based on the performance of the network on both training and test sets you decide how to alter it next.
3. You will need to change the number of inputs and outputs. And you will have to train the network anew.
Also, these and many more basic questions are covered in many courses on the internet, consider taking those. One of such courses can be found here.
I have generated some input data in a CSV where 'awesomeness' is 'age * 10'. It looks like this:
age, awesomeness
67, 670
38, 380
32, 320
69, 690
40, 400
It should be trivial to write a tensorflow model that can predict 'awesomeness' from 'age', but I can't make it work.
When I run training, the output I get is:
accuracy: 0.0 <----------------------------------- What!??
accuracy/baseline_label_mean: 443.8
accuracy/threshold_0.500000_mean: 0.0
auc: 0.0
global_step: 6000
labels/actual_label_mean: 443.8
labels/prediction_mean: 1.0
loss: -2.88475e+09
precision/positive_threshold_0.500000_mean: 1.0
recall/positive_threshold_0.500000_mean: 1.0
Please note that this is obviously a completely contrived example, but that is because I was getting the same result with a more complex meaningful model with a much larger data set; 0% accuracy.
This is my attempt at the most minimal possible reproducible test case that I can make which exhibits the same behaviour.
Here's what I'm doing, based on the census example for the DNNClassifier from tflearn:
COLUMNS = ["age", "awesomeness"]
CONTINUOUS_COLUMNS = ["age"]
OUTPUT_COLUMN = "awesomeness"
def build_estimator(model_dir):
"""Build an estimator."""
age = tf.contrib.layers.real_valued_column("age")
deep_columns = [age]
m = tf.contrib.learn.DNNClassifier(model_dir=model_dir,
feature_columns=deep_columns,
hidden_units=[50, 10])
return m
def input_fn(df):
"""Input builder function."""
feature_cols = {k: tf.constant(df[k].values, shape=[df[k].size, 1]) for k in CONTINUOUS_COLUMNS}
output = tf.constant(df[OUTPUT_COLUMN].values, shape=[df[OUTPUT_COLUMN].size, 1])
return feature_cols, output
def train_and_eval(model_dir, train_steps):
"""Train and evaluate the model."""
train_file_name, test_file_name = training_data()
df_train = pd.read_csv(...) # ommitted for clarity
df_test = pd.read_csv(...)
m = build_estimator(model_dir)
m.fit(input_fn=lambda: input_fn(df_train), steps=train_steps)
results = m.evaluate(input_fn=lambda: input_fn(df_test), steps=1)
for key in sorted(results):
print("%s: %s" % (key, results[key]))
def training_data():
"""Return path to the training and test data"""
training_datafile = path.join(path.dirname(__file__), 'data', 'data.training')
test_datafile = path.join(path.dirname(__file__), 'data', 'data.test')
return training_datafile, test_datafile
model_folder = 'scripts/model' # Where to store the model
train_steps = 2000 # How many iterations to run while training
train_and_eval(model_folder, train_steps)
A couple of notes:
The original example tutorial this is based on is here https://github.com/tensorflow/tensorflow/blob/master/tensorflow/examples/learn/wide_n_deep_tutorial.py
Notice I am using the DNNClassifier, not the LinearClassifier as I want specifically to deal with continuous input variables.
A lot of examples just use 'premade' data sets which are known to work with examples; my data set has been manually generated and is absolutely not random.
I have verified the csv loader is loading the data correctly as int64 values.
Training and test data are generated identically, but have different values in them; however, using data.training as the test data still returns a 0% accuracy, so there's no question that something isn't working, this isn't just over-fitting.
First of all, you are describing a regression task, not a classification task. Therefore, both, DNNClassifier and LinearClassifier would be the wrong thing to use. That also makes accuracy the wrong quantity to use to tell if your model works or not. I suggest you read up on these two different context e.g. in the book "The Elements of Statistical Learning"
But here is a short answer to your problem. Say you have a linear model
awesomeness_predicted = slope * age
where slope is the parameter you want to learn from data. Lets say you have data age[0], ..., age[N] and the corresponding awesomeness values a_data[0],...,a_data[N]. In order to specify if your model works well, we are going to use mean squared error, that is
error = sum((a_data[i] - a_predicted[i])**2 for i in range(N))
What you want to do now is start with a random guess for slope and gradually improving using gradient descent. Here is a full working example in pure tensorflow
import tensorflow as tf
import numpy as np
DTYPE = tf.float32
## Generate Data
age = np.array([67, 38, 32, 69, 40])
awesomeness = 10 * age
## Generate model
# define the parameter of the model
slope = tf.Variable(initial_value=tf.random_normal(shape=(1,), dtype=DTYPE))
# define the data inputs to the model as variable size tensors
x = tf.placeholder(DTYPE, shape=(None,))
y_data = tf.placeholder(DTYPE, shape=(None,))
# specify the model
y_pred = slope * x
# use mean squared error as loss function
loss = tf.reduce_mean(tf.square(y_data - y_pred))
target = tf.train.AdamOptimizer().minimize(loss)
## Train Model
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
for epoch in range(100000):
_, training_loss = sess.run([target, loss],
feed_dict={x: age, y_data: awesomeness})
print("Training loss: ", training_loss)
print("Found slope=", sess.run(slope))
There are a few things I would like to say.
Assuming you load the data correctly:
-This looks like a regression task and you are using a classifier. I'm not saying it doesn't work at all, but like this your are giving a label to each entry of age and training on the whole batch each epoch is very unstable.
-You are getting a huge value for the loss, your gradients are exploding. Having this toy dataset you probably need to tune hyperparameters like hidden neurons, learning rate and number of epochs. Try to log the loss value for each epoch and see if that may be the problem.
-Last suggestion, make your data work with a simpler model, possibly suited for your task, like a regression model and then scale up
See also https://github.com/tflearn/tflearn/blob/master/examples/basics/multiple_regression.py for using tflearn to solve this.
""" Multiple Regression/Multi target Regression Example
The input features have 10 dimensions, and target features are 2 dimension.
"""
from __future__ import absolute_import, division, print_function
import tflearn
import numpy as np
# Regression data- 10 training instances
#10 input features per instance.
X=np.random.rand(10,10).tolist()
#2 output features per instance
Y=np.random.rand(10,2).tolist()
# Multiple Regression graph, 10-d input layer
input_ = tflearn.input_data(shape=[None,10])
#10-d fully connected layer
r1 = tflearn.fully_connected(input_,10)
#2-d fully connected layer for output
r1 = tflearn.fully_connected(r1,2)
r1 = tflearn.regression(r1, optimizer='sgd', loss='mean_square',
metric='R2', learning_rate=0.01)
m = tflearn.DNN(r1)
m.fit(X,Y, n_epoch=100, show_metric=True, snapshot_epoch=False)
#Predict for 1 instance
testinstance=np.random.rand(1,10).tolist()
print("\nInput features: ",testinstance)
print("\n Predicted output: ")
print(m.predict(testinstance))