Using MLP to associate integers (input) to histograms (outputs) using tensorflow - python

I'm struggling with solving this issue and I believe it is due to my data. I'm thinking about this as a few to many regression problem, but there could be a better approach in tensorflow.
Training Data
I have some data generated from a video sequence. For each frame of video I have a distribution of x,y positions for each cluster. There are 157,110 frames and 200,000 clusters. The frames and clusters are the inputs, which are integers and I think could be considered labels (I'll be using another network to learn the sequences of clusters later on). As each histogram is related to both a frame and clusterID, the input is not "one hot". The histograms (outputs) have 19+8 (x+y) bins where each count is rarely above 10, and could be normalized.
A subset of the training data is available here: The first two columns are the frame and clusterID (inputs) and the remaining 19+8 columns are the histograms (outputs).
What is the best network to learn to generate the appropriate histogram for a given frame/clusterID pair?
The following code is my current attempt using an MLP. It does not converge; in fact cost does not decrease at all. Is there something wrong in my implementation, or my choice of MLP, or a lack of scaling in my input data?
#!/usr/bin/python
# This program uses tensorflow to learn cluster probabilities and associate them with frame and cluster IDs
# Arguments
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("clusterProbabilityfile", help="CSV file containing cluster probabilities")
parser.add_argument("trainingIterations", type=int, help="CSV file containing cluster probabilities")
args = parser.parse_args()
# Imports for ML
import tensorflow as tf
import numpy as np
from tensorflow.python.framework import dtypes
# Imports for loading CSV file
from tensorflow.python.platform import gfile
import csv
# Global vars
numInputUnits = 2;
numOutputUnits = 19+8
numHiddenUnits = (numOutputUnits-numInputUnits)/2
workingDirectory = args.clusterProbabilityfile.split('/')[0]+"/"
columnSplit = 2 # Column number that splits
# Shuffle training set
def shuffleTrainingSet(trainingSet):
trainingIndecies = np.arange(len(trainingSet.data)) # assumes len(data) == len(target)
np.random.shuffle(trainingIndecies) # shuffle indecies
data = trainingSet.data[trainingIndecies]
target = trainingSet.target[trainingIndecies]
training_set = tf.contrib.learn.datasets.base.Dataset(data=data, target=target)
return training_set
# Load training data from CSV file, convert to numpy arrays and construct Dataset
# Modified from tf.contrib.learn.datasets.base.load_csv_without_header
# Should these be randomized???
with gfile.Open(args.clusterProbabilityfile) as csv_file:
data_file = csv.reader(csv_file)
data, target = [], []
for row in data_file:
target.append(row[columnSplit+1:]) # All elements past the split column.
data.append(row[:columnSplit]) # All elements before and including the split column.
target = np.array(target, dtype=int)
data = np.array(data, dtype=int)
training_set = tf.contrib.learn.datasets.base.Dataset(data=data, target=target)
training_set = shuffleTrainingSet(training_set)
# Construct computation graph
# MLP approach (from https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/3_NeuralNetworks/multilayer_perceptron.py)
# Single hidden layer!
inputVec = tf.placeholder(tf.float32, [None, numInputUnits])
outputVec = tf.placeholder(tf.float32, [None, numOutputUnits])
# Weights
hiddenWeights = tf.Variable(tf.random_normal([numInputUnits, numHiddenUnits])) # inputUnits -> hiddenUnits
outputWeights = tf.Variable(tf.random_normal([numHiddenUnits, numOutputUnits])) # hiddenUnits -> outputUnits
# Biases
hiddenBiases = tf.Variable(tf.random_normal([numHiddenUnits]))
outputBiases = tf.Variable(tf.random_normal([numOutputUnits]))
# Contruct MLP from layers
hiddenLayer = tf.add(tf.matmul(inputVec, hiddenWeights), hiddenBiases) # input * weight + bias = hidden
hiddenLayer = tf.nn.relu(hiddenLayer) # RELU Activation function for hidden layer.
outputLayer = tf.add(tf.matmul(hiddenLayer, outputWeights), outputBiases) # hidden * weight + bias = output
# loss and optimizer
#cross_entropy = -(outputVec * tf.log(outputLayer) + (1 - outputVec) * tf.log(1 - outputLayer))
#cost = tf.reduce_mean(cross_entropy)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(outputLayer, outputVec))
optimizer = tf.train.AdamOptimizer(learning_rate=0.001).minimize(cost)
# Compute graph
sess = tf.Session()
sess.run(tf.initialize_all_variables())
for epoch in range(args.trainingIterations):
training_set = shuffleTrainingSet(training_set) # Reshuffle for each epoch.
epochCost = sess.run(cost, feed_dict={inputVec: training_set.data, outputVec: training_set.target})
print("{:d}\t{:f}".format(epoch, epochCost))
# Evaluate model
correct_prediction = tf.equal(tf.argmax(outputLayer,1), tf.argmax(outputVec,1)) # compare output layer with target output vector.
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print("Cost:", sess.run(cost,feed_dict={inputVec: training_set.data, outputVec: training_set.target}))
print("Accuracy:", sess.run(accuracy,feed_dict={inputVec: training_set.data, outputVec: training_set.target}))

Related

RuntimeError: shape '[265, 265]' is invalid for input of size 265

I am trying to train a model using Gaussian Process regression in Python, and I have some problems in predicting. The training set is of size 265, and after having trained the model, I am trying to predict on the test set.
I get the error "RuntimeError: shape '[265, 265]' is invalid for input of size 265" and I don't really understand why.
I also get a vector as loss while training (instead than a scalar). I solved the loss problem by doing loss.mean() (even if I still don't understand why the loss is returned as a vector), but I have not managed to solve the other problem.
265 is the size of the training set (and not of the test set), so I imagine the error is related to that and not to the test set that I give in input.
Thank you really much for your help! I send all my code, the problem is in the function predict. PS: I am sorry but I don't know how to upload the training and test data here.
'''
import os
import typing
#from sklearn.gaussian_process.kernels import *
import numpy as np
#from sklearn.gaussian_process import GaussianProcessRegressor
#from sklearn.model_selection import cross_val_score
#from sklearn.model_selection import KFold
import matplotlib.pyplot as plt
from matplotlib import cm
import gpytorch
import matplotlib.pyplot as plt
from matplotlib import rcParams
import numpy as np
import torch
import pandas as pd
class Model(object): # were we allowed to change here?
"""
Model for this task.
You need to implement the fit_model and predict methods
without changing their signatures, but are allowed to create additional methods.
"""
def __init__(self): # are we allowed to change here?
"""
Initialize your model here.
We already provide a random number generator for reproducibility.
"""
self.likelihood = gpytorch.likelihoods.GaussianLikelihood()
# self.mean_module = gpytorch.means.ZeroMean() #prior mean
# self.covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.RBFKernel()) #covariance (which kernel to use?)
self.model = None
self.rng = np.random.default_rng(seed=0) # it was already present
# TODO: Add custom initialization for your model here if necessary
def predict(self, x: np.ndarray) -> typing.Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""
Predict the pollution concentration for a given set of locations.
:param x: Locations as a 2d NumPy float array of shape (NUM_SAMPLES, 2)
#TODO: for us it is 1-d!!!
:return:
Tuple of three 1d NumPy float arrays, each of shape (NUM_SAMPLES,),
containing your predictions, the GP posterior mean, and the GP posterior stddev (in that order)
"""
# TODO: Use your GP to estimate the posterior mean and stddev for each location here
gp_mean = np.zeros(x.shape[0], dtype=float)
gp_std = np.zeros(x.shape[0], dtype=float)
x = torch.Tensor(x)
# Turn the model into evaluation mode
self.model.eval()
self.likelihood.eval()
# The gpytorch.settings.fast_pred_var flag activates LOVE (for fast variances)
# See https://arxiv.org/abs/1803.06058
with torch.no_grad(), gpytorch.settings.fast_pred_var():
# Obtain the predictive mean and covariance matrix
#print(x.size())
f_preds = self.model(x) #TODO: check the input!!!
print(type(f_preds))
gp_mean = np.array(f_preds.mean)
gp_std = torch.sqrt(f_preds.variance) # check if it is right
gp_std = np.array(gp_std)
# Make predictions by feeding model through likelihood
# observed_pred = self.likelihood(f_preds) #I think it is useless
# TODO: Use the GP posterior to form your predictions here
predictions = gp_mean
# check if we want to change the predictions for the asymmetric cost
'''for i in range(0, len(predictions)):
if (predictions[i] + 3 * gp_std[i] > 35.5) and (predictions[i] < 35.5):
predictions[i] = 35.51
elif ((predictions[i] - gp_std[i]) > 35.5):
predictions[i] = predictions[i] - gp_std[i]'''
return predictions, gp_mean, gp_std
def fit_model(self, train_x: np.ndarray, train_y: np.ndarray):
"""
Fit your model on the given training data.
:param train_x: Training features as a 2d NumPy float array of shape (NUM_SAMPLES, 2) #TODO: 1-d for us!!!
:param train_y: Training pollution concentrations as a 1d NumPy float array of shape (NUM_SAMPLES,)
"""
# TODO: Fit your model here
# Put the model into training mode
# initialize likelihood and model (necessary?)
train_x = torch.Tensor(train_x)
train_y = torch.Tensor(train_y)
# torch.Tensor prende in input matrici di numpy e le trasforma in elementi di torch,
# dentro pytorch hai di default un algoritmo di propagation
# clustering
# samplesize=5000
# train_x = train_x[0:samplesize, :]
# train_y = train_y[0:samplesize]
#
self.model = ExactGPModel(train_x, train_y, self.likelihood)
self.model.train()
self.likelihood.train()
# understand how to manage these last 4 rows
# Use the SGD optimizer
optimizer = torch.optim.Adam([
{'params': self.model.parameters()}, # Includes GaussianLikelihood parameters
],
lr=0.1) # if instead of "Adam" we put "SGD" it uses stochastic gradient descent! Oppure minibatch gradient descent
#TODO: choose which optimizer to use
# we can always use stochastic, minibatch or standard(batch)
# we can use gridsearch, change parameters like lr,beta1,beta2,... in order to have a better approximation
# "Loss" for GPs - the marginal log likelihood
mll = gpytorch.mlls.ExactMarginalLogLikelihood(self.likelihood, self.model)
training_iter = 1200 # numero di epochs
for i in range(training_iter):
# Zero gradients from previous iteration
optimizer.zero_grad()
# Output from model
output = self.model(train_x)
# Calc loss and backprop gradients
loss = -mll(output, train_y)
loss = torch.mean(loss) #TODO: understand how to deal with this loss, we shouldn't do the mean!!!
#print(loss)
#print(type(loss))
loss.backward()
print('Iter %d/%d - Loss: %.3f lengthscale: %.3f noise: %.3f' % (
i + 1, training_iter, loss.item(),
self.model.covar_module.base_kernel.lengthscale.item(),
self.model.likelihood.noise.item()))
optimizer.step()
# Auxiliary class
class ExactGPModel(gpytorch.models.ExactGP):
# every object of ExactGPmodel will have every module of models.ExactGP
def __init__(self, train_x, train_y, likelihood):
super(ExactGPModel, self).__init__(train_x, train_y, likelihood)
self.mean_module = gpytorch.means.ZeroMean()
self.covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.MaternKernel(nu=1 / 2))
def forward(self, x):
mean_x = self.mean_module(x)
covar_x = self.covar_module(x)
return gpytorch.distributions.MultivariateNormal(mean_x, covar_x)
# Load the training dateset and test features
trainfilename="TrainingData.txt"
testfilename="TestingData.txt"
'''train_set = pd.read_csv(trainfilename)
test_set = pd.read_csv(testfilename)
# Convert Pandas DataFrame to NumpyArray
train_set = train_set.to_numpy()
test_set = test_set.to_numpy()
meant = np.mean(train_set[:,0:1])
sdt = np.std(train_set[:,0:1])
train_set[:,0:1] = (train_set[:,0:1]-meant)/sdt
train_set = torch.from_numpy(train_set)
meant = np.mean(test_set[:,0:1])
sdt = np.std(test_set[:,0:1])
test_set[:,0:1] = (test_set[:,0:1]-meant)/sdt
test_set = torch.from_numpy(test_set)
train_x = train_set[:,0:1]
train_y = train_set[:,1:2]
test_x = test_set[:,0:1]'''
train_set = np.loadtxt(trainfilename, delimiter=',', skiprows=1)
test_x = np.loadtxt(testfilename, delimiter=',', skiprows=1)
train_x = train_set[:,0:1]
train_y = train_set[:,1:2]
#print(type(train_x)) #the type looks fine
# Fit the model
print('Fitting model')
model = Model()
model.fit_model(train_x, train_y) #TODO: how to give the input here???
# Predict on the test features
print('Predicting on test features')
#print(test_x.size())
print(test_x.shape)
predicted_y = model.predict(test_x) #TODO: how to give the input here???
print(predicted_y)
#TODO: RuntimeError: shape '[265, 265]' is invalid for input of size 265
#TODO: I think it does not get the covariance matrix
'''

keeping example index correspondence with tf.keras.predict and tf.data.Dataset

I'm using the tf.keras API in TensorFlow2. I have 100,000 images or so that are saved as TFRecords (128 images per record). Each record has an input image, target image, and frame index. I can't find a clean way to keep the frame index with the prediction.
Here is an example, except I build a dataset with NumPy arrays instead of reading from TFRecords:
import tensorflow as tf
from tensorflow import keras
import numpy as np
# build dummy tf.data.Dataset
x = np.random.random(10000).astype(np.float32)
y = x + np.random.random(10000).astype(np.float32) * 0.1
idx = np.arange(10000, dtype=np.uint16)
np.random.shuffle(idx) # frames are random in my TFRecord files
ds = tf.data.Dataset.from_tensor_slices((x, y, idx))
# pretend ds returned from TFRecord
ds = ds.map(lambda f0, f1, f2: (f0, f1)) # strip off idx
ds = ds.batch(32)
# build and train model
x = keras.Input(shape=(1,))
y_hat = keras.layers.Dense(1)(x) # i.e. linear regression
model = keras.Model(x, y_hat)
model.compile('sgd', 'mse')
history = model.fit(ds, epochs=5)
# predict 1 batch
model.predict(ds, steps=1)
Short of reading through the dataset again to extract the indices (which is prone to error), is there a clean way to keep prediction correspondence with image index? In TF1.x it was straightforward. But I'd like to take advantage of clean Keras compile(), fit(), predict() API in TF2.
Ok, was thinking too hard, pretty easy actually. Just add index to dataset when you are making predictions, and pull out indices as you are iterating through batches:
rt tensorflow as tf
from tensorflow import keras
import numpy as np
def build_dataset(mode):
np.random.seed(1)
x = np.random.random(10000).astype(np.float32)
y = x + np.random.random(10000).astype(np.float32) * 0.1
idx = np.arange(10000, dtype=np.uint16)
if mode == 'train':
ds = tf.data.Dataset.from_tensor_slices((x, y))
ds = ds.shuffle(128)
else:
ds = tf.data.Dataset.from_tensor_slices((x, idx))
ds = ds.batch(32)
return ds
# build and train simple linear regression model
x_tf = keras.Input(shape=(1,))
yhat_tf = keras.layers.Dense(1)(x_tf)
model = keras.Model(x_tf, yhat_tf)
model.compile(optimizer='sgd', loss='mse')
ds = build_dataset('train')
history = model.fit(ds, epochs=5)
# predict 1 batch
ds = build_dataset('predict')
for batch in ds:
x_tf, indices_tf = batch
yhat_np = model.predict(x_tf)
break

Efficient example implementation of GPU-training of a simple feed-forward NN in TensorFlow? Maybe with tf.data?

I just started using the GPU version of TensorFlow hoping that it would speed up the training of my feed-forward neural networks. I am able to train on my GPU (GTX1080ti), but unfortunately it is not notably faster than doing the same training on my CPU (i7-8700K) the current way I’ve implemented it. During training, the GPU appears to barely be utilized at all, which makes me suspect that the bottleneck in my implementation is how the data is copied from the host to the device using feed_dict.
I’ve heard that TensorFlow has something called the “tf.data” pipeline which is supposed to make it easier and faster to feed data to GPUs etc. However I have not been able to find any simple examples where this concept is implemented into multilayer perceptron training as a replacement for feed_dict.
Is anyone aware of such an example and can point me to it? Preferably as simple as possible since I’m new to TensorFlow in general. Or is there something else I should change in my current implementation to make it more efficient? I’m pasting the code I have here:
import tensorflow as tf
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
tf.reset_default_graph()
import time
# Function for iris dataset.
def get_iris_data():
iris = datasets.load_iris()
data = iris["data"]
target = iris["target"]
# Convert to one-hot vectors
num_labels = len(np.unique(target))
all_Y = np.eye(num_labels)[target]
return train_test_split(data, all_Y, test_size=0.33, random_state=89)
# Function which initializes tensorflow weights & biases for feed-forward NN.
def InitWeights(LayerSizes):
with tf.device('/gpu:0'):
# Make tf placeholders for network inputs and outputs.
X = tf.placeholder( shape = (None,LayerSizes[0]),
dtype = tf.float32,
name ='InputData')
y = tf.placeholder( shape = (None,LayerSizes[-1]),
dtype = tf.float32,
name ='OutputData')
# Initialize weights and biases.
W = {}; b = {};
for ii in range(len(LayerSizes)-1):
layername = f'layer%s' % ii
with tf.variable_scope(layername):
ny = LayerSizes[ii]
nx = LayerSizes[ii+1]
# Weights (initialized with xavier initializatiion).
W['Weights_'+layername] = tf.get_variable(
name = 'Weights_'+layername,
shape = (ny, nx),
initializer = tf.contrib.layers.xavier_initializer(),
dtype = tf.float32
)
# Bias (initialized with xavier initializatiion).
b['Bias_'+layername] = tf.get_variable(
name = 'Bias_'+layername,
shape = (nx),
initializer = tf.contrib.layers.xavier_initializer(),
dtype = tf.float32
)
return W, b, X, y
# Function for forward propagation of NN.
def FeedForward(X, W, b):
with tf.device('/gpu:0'):
# Initialize 'a' of first layer to the placeholder of the network input.
a = X
# Loop all layers of the network.
for ii in range(len(W)):
# Use name of each layer as index.
layername = f'layer%s' % ii
## Weighted sum: z = input*W + b
z = tf.add(tf.matmul(a, W['Weights_'+layername], name = 'WeightedSum_z_'+layername), b['Bias_'+layername])
## Passed through actication fcn: a = h(z)
if ii == len(W)-1:
a = z
else:
a = tf.nn.relu(z, name = 'activation_a_'+layername)
return a
if __name__ == "__main__":
# Import data
train_X, test_X, train_y, test_y = get_iris_data()
# Define network size [ninputs-by-256-by-outputs]
LayerSizes = [4, 256, 3]
# Initialize weights and biases.
W, b, X, y = InitWeights(LayerSizes)
# Define loss function to optimize.
yhat = FeedForward(X, W, b)
loss = tf.reduce_sum(tf.square(y - yhat),reduction_indices=[0])
# Define optimizer to use when minimizing loss function.
all_variables = tf.trainable_variables()
optimizer = tf.train.GradientDescentOptimizer(learning_rate = 0.0001)
train_op = optimizer.minimize(loss, var_list = all_variables)
# Start tf session and initialize variables.
sess = tf.Session()
sess.run(tf.global_variables_initializer())
# Train 10000 minibatches and time how long it takes.
t0 = time.time()
for i in range(10000):
ObservationsToUse = np.random.choice(len(train_X), 32)
X_minibatch = train_X[ObservationsToUse,:]
y_minibatch = train_y[ObservationsToUse,:]
sess.run(train_op, feed_dict={X : X_minibatch, y : y_minibatch})
t1 = time.time()
print('Training took %0.2f seconds' %(t1-t0))
sess.close()
The speed might be low because:
You are creating placeholders. Using numpy, we insert the data in the
placeholders and thereby they are converted to tensors of the graph.
By using tf.data.Dataset, you can create a direct pipeline which makes the data directly flow into the graph without the need of placeholders. They are fast, scalable and have a number of functions to play around with.
with np.load("/var/data/training_data.npy") as data:
features = data["features"]
labels = data["labels"]
# Assume that each row of `features` corresponds to the same row as `labels`.
assert features.shape[0] == labels.shape[0]
dataset = tf.data.Dataset.from_tensor_slices((features, labels))
Some useful functions :
dataset = dataset.shuffle(buffer_size=10000)
dataset = dataset.batch(32) # Creating batches
dataset = dataset.repeat(num_epochs) # repeat the dataset 'N' times
iterator = dataset.make_one_shot_iterator() # Create a iterator to retrieve batches of data
X, Y = iterator.get_next()
Here, 32 is the batch size.
In your case,
dataset = tf.data.Dataset.from_tensor_slices((data, targets))
Hence, there is no need of placeholders. Directly run,
session.run( train_op ) # no feed_dict!!

Tensorflow and reading binary data properly

I am trying to properly read in my own binary data to Tensorflow based on Fixed length records section of this tutorial, and by looking at the read_cifar10 function here. Mind you I am new to tensorflow, so my understanding may be off.
My Data
My files are binary with float32 type. The first 32 bit sample is the label, and the remaining 256 samples are the data. I want to reshape the data at the end to a [2, 128] matrix.
My Code So far:
import tensorflow as tf
import os
def read_data(filename_queue):
item_type = tf.float32
label_items = 1
data_items = 256
label_bytes = label_items * item_type.size
data_bytes = data_items * item_type.size
record_bytes = label_bytes + data_bytes
reader = tf.FixedLengthRecordReader(record_bytes=record_bytes)
key, value = reader.read(filename_queue)
record_data = tf.decode_raw(value, item_type)
# labels = tf.cast(tf.strided_slice(record_data, [0], [label_items]), tf.int32)
label = tf.strided_slice(record_data, [0], [label_items])
data0 = tf.strided_slice(record_data, [label_items], [label_items + data_items])
data = tf.reshape(data0, [2, data_items/2])
return data, label
if __name__ == '__main__':
os.environ["CUDA_VISIBLE_DEVICES"] = "0" # Set GPU device
datafiles = ['train_0000.dat', 'train_0001.dat']
num_epochs = 2
filename_queue = tf.train.string_input_producer(datafiles, num_epochs=num_epochs, shuffle=True)
data, label = read_data(filename_queue)
with tf.Session() as sess:
init = tf.global_variables_initializer()
sess.run(init)
(x, y) = read_data(filename_queue)
print(y.eval())
This code hands at the print(y.eval()), but I fear I have much bigger issues than that.
Question:
When I execute this, I get a data and label tensor returned. The problem is I don't quite understand how to actually read the data from the tensor. For example, I understand the autoencoder example here, however this has a mnist.train.next_batch(batch_size) function that is called to read the next batch. Do I need to write that for my function, or is it handled by something internal to my read_data() function. If I need to write that function, what does it look like?
Are their any other obvious things I'm missing? My goal in using this method is to reduce I/O overhead, and not store all of the data in memory, since my file are quite large.
Thanks in advance.
Yes. You are pretty much done. At this point you need to:
1) Write your neural network model model which is supposed to take your data and return a label.
2) Write your cost function C which takes the network prediction and the true label and gives you a cost.
3) Choose and optimizer.
4) Put everything together:
opt = tf.AdamOptimizer(learning_rate=0.001)
datafiles = ['train_0000.dat', 'train_0001.dat']
num_epochs = 2
with tf.Session() as sess:
init = tf.global_variables_initializer()
sess.run(init)
filename_queue = tf.train.string_input_producer(datafiles, num_epochs=num_epochs, shuffle=True)
data, label = read_data(filename_queue)
example_batch, label_batch = tf.train.shuffle_batch(
[data, label], batch_size=128)
y_pred = model(data)
loss = C(label, y_pred)
After which you iterate and minimize the loss with:
opt.minimize(loss)
See also tf.train.string_input_producer behavior in a loop for related information.

where and how to put the filename in this tensorflow code?

I've copied a tensorflow code from Sirajology's github. It should load a .csv into a single layer neural net.
My question is, how and where do I put the .csv file into the code?
Also, I don't understand if the code will automatically split the .csv into train and test data, or if I need to do that with some different code before it is fed to the neural net?
I've been spending a lot of time with python and tensorflow and understand some basic concepts but am still a total newb. Any help is appreciated! Thanks!!!
#I have eliminated all code that is obviously irrelevant to the question
tf.app.flags.DEFINE_string('train', None,
'File containing the training data (labels & features).')
tf.app.flags.DEFINE_string('test', None,
'File containing the test data (labels & features).')
tf.app.flags.DEFINE_integer('num_epochs', 1,
'Number of examples to separate from the training '
'data for the validation set.')
tf.app.flags.DEFINE_boolean('verbose', False, 'Produce verbose output.')
FLAGS = tf.app.flags.FLAGS
# Extract numpy representations of the labels and features given rows consisting of:
# label, feat_0, feat_1, ..., feat_n
def extract_data(filename):
# Arrays to hold the labels and feature vectors.
labels = []
fvecs = []
# Iterate over the rows, splitting the label from the features. Convert labels
# to integers and features to floats.
for line in file(filename):
row = line.split(",")
labels.append(int(row[0]))
fvecs.append([float(x) for x in row[1:]])
# Convert the array of float arrays into a numpy float matrix.
fvecs_np = np.matrix(fvecs).astype(np.float32)
# Convert the array of int labels into a numpy array.
labels_np = np.array(labels).astype(dtype=np.uint8)
# Convert the int numpy array into a one-hot matrix.
labels_onehot = (np.arange(NUM_LABELS) == labels_np[:, None]).astype(np.float32)
# Return a pair of the feature matrix and the one-hot label matrix.
return fvecs_np,labels_onehot
def main(argv=None):
# Be verbose?
verbose = FLAGS.verbose
# Get the data.
train_data_filename = FLAGS.train
test_data_filename = FLAGS.test
# Extract it into numpy matrices.
train_data,train_labels = extract_data(train_data_filename)
test_data, test_labels = extract_data(test_data_filename)
# Get the shape of the training data.
train_size,num_features = train_data.shape
# Get the number of epochs for training.
num_epochs = FLAGS.num_epochs
# This is where training samples and labels are fed to the graph.
# These placeholder nodes will be fed a batch of training data at each
# training step using the {feed_dict} argument to the Run() call below.
x = tf.placeholder("float", shape=[None, num_features])
y_ = tf.placeholder("float", shape=[None, NUM_LABELS])
# For the test data, hold the entire dataset in one constant node.
test_data_node = tf.constant(test_data)
# Define and initialize the network.
# These are the weights that inform how much each feature contributes to
# the classification.
W = tf.Variable(tf.zeros([num_features,NUM_LABELS]))
b = tf.Variable(tf.zeros([NUM_LABELS]))
y = tf.nn.softmax(tf.matmul(x,W) + b)
# Optimization.
cross_entropy = -tf.reduce_sum(y_*tf.log(y))
train_step = tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy)
# Evaluation.
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
# Create a local session to run this computation.
with tf.Session() as s:
# Run all the initializers to prepare the trainable parameters.
tf.initialize_all_variables().run()
if verbose:
print ('Initialized!')
print
print ('Training.')
# Iterate and train.
for step in xrange(num_epochs * train_size // BATCH_SIZE):
if verbose:
print (step,)
offset = (step * BATCH_SIZE) % train_size
batch_data = train_data[offset:(offset + BATCH_SIZE), :]
batch_labels = train_labels[offset:(offset + BATCH_SIZE)]
train_step.run(feed_dict={x: batch_data, y_: batch_labels})
if verbose and offset >= train_size-BATCH_SIZE:
print
# Give very detailed output.
if verbose:
print
print ('Weight matrix.')
print (s.run(W))
print
print ('Bias vector.')
print (s.run(b))
print
print ("Applying model to first test instance.")
first = test_data[:1]
print ("Point =", first)
print ("Wx+b = ", s.run(tf.matmul(first,W)+b))
print ("softmax(Wx+b) = ", s.run(tf.nn.softmax(tf.matmul(first,W)+b)))
print
print ("Accuracy:", accuracy.eval(feed_dict={x: test_data, y_: test_labels}))
if __name__ == '__main__':
tf.app.run()
It's expecting to receive it as argument in the terminal.
The lines below are checking for it:
tf.app.flags.DEFINE_string('train', None,
'File containing the training data (labels & features).')
tf.app.flags.DEFINE_string('test', None,
'File containing the test data (labels & features).')
tf.app.flags.DEFINE_integer('num_epochs', 1,
'Number of examples to separate from the training '
'data for the validation set.')
So, you just have to run it as:
python YourScript.py --train FileName.csv --test TestName.csv --num_epochs 5 --verbose True

Categories

Resources