training accuracy drops in tensorflow - python

I was trying to create a model for character recognition.
This model was working fine with 28*28 dataset and for characters from 0-9 but it training accuracy is dropping if changed to 64*64 and characters ranges from 0-9, a-z, A-Z.
While iterating through accuracy it goes till 0.3 and then stays there afterwards. I tried to train with different dataset as well but the same thing is happening.
Changing learning rate to 0.001 also does not help.
Can anyone tell what is the issue with this?
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
import random as ran
import os
import tensorflow as tf
def TRAIN_SIZE(num):
images = np.load("data/train/images64.npy").reshape([2852,4096])
labels = np.load("data/train/labels.npy")
print ('Total Training Images in Dataset = ' + str(images.shape))
print ('--------------------------------------------------')
x_train = images[:num,:]
print ('x_train Examples Loaded = ' + str(x_train.shape))
y_train = labels[:num,:]
print ('y_train Examples Loaded = ' + str(y_train.shape))
print('')
return x_train, y_train
def TEST_SIZE(num):
images = np.load("data/test/images64.npy").reshape([558,4096])
labels = np.load("data/test/labels.npy")
print ('Total testing Images in Dataset = ' + str(images.shape))
print ('--------------------------------------------------')
x_test = images[:num,:]
print ('x_test Examples Loaded = ' + str(x_test.shape))
y_test = labels[:num,:]
print ('y_test Examples Loaded = ' + str(y_test.shape))
print('')
return x_test, y_test
def display_digit(num):
# print(y_train[num])
label = y_train[num].argmax(axis=0)
image = x_train[num].reshape([64,64])
# plt.axis("off")
plt.title('Example: %d Label: %d' % (num, label))
plt.imshow(image, cmap=plt.get_cmap('gray_r'))
plt.show()
def display_mult_flat(start, stop):
images = x_train[start].reshape([1,4096])
for i in range(start+1,stop):
images = np.concatenate((images, x_train[i].reshape([1,4096])))
plt.imshow(images, cmap=plt.get_cmap('gray_r'))
plt.show()
def get_char(a):
if(a<10):
return a
elif(a>=10 and a<36):
return chr(a+55)
else:
return chr(a+61)
x_train, y_train = TRAIN_SIZE(2850)
x_test, y_test = TRAIN_SIZE(1900)
x = tf.placeholder(tf.float32, shape=[None, 4096])
y_ = tf.placeholder(tf.float32, shape=[None, 62])
W = tf.Variable(tf.zeros([4096,62]))
b = tf.Variable(tf.zeros([62]))
y = tf.nn.softmax(tf.matmul(x,W) + b)
with tf.Session() as sess:
# x_test = x_test[1400:,:]
# y_test = y_test[1400:,:]
x_test, y_test =TEST_SIZE(400)
LEARNING_RATE = 0.2
TRAIN_STEPS = 1000
sess.run(tf.global_variables_initializer())
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))
training = tf.train.GradientDescentOptimizer(LEARNING_RATE).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
for i in range(TRAIN_STEPS+1):
sess.run(training, feed_dict={x: x_train, y_: y_train})
if i%100 == 0:
print('Training Step:' + str(i) + ' Accuracy = ' + str(sess.run(accuracy, feed_dict={x: x_test, y_: y_test})) + ' Loss = ' + str(sess.run(cross_entropy, {x: x_train, y_: y_train})))
savedPath = tf.train.Saver().save(sess, "/tmp/model.ckpt")
print("Model saved at: " ,savedPath)

You are trying to classify 62 different numbers and characters, but use a single fully connected layer to do that. Your model simply has not enough parameters for that task. In other words, you are underfitting the data. So either expand your network by adding parameters (layers) and/or use CNNs, which generally have good performance for image classification tasks.

Try different CNN mode. the model you are using like inception v1, v2,v3 alexnet etc..

Related

PyTorch minibatch training very slow

When training my model on the adult income data set and using minibatches training is very slow regardless if I use PyTorch's DataLoader or a basic implementation for minibatch training.
Is there a problem with my code or is there another way to speed up training for the adult income data set? I want to use one-hot encoding and cross-entropy loss + softmax. Do I have to use a different loss function or remove the softmax layer?
import pandas as pd
from pandas import read_csv
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import KBinsDiscretizer
from sklearn.model_selection import train_test_split
import torch
from torch.autograd import Variable
from torch.utils.data import DataLoader, Dataset, TensorDataset
import numpy as np
import matplotlib.pyplot as plt
import torch.nn as nn
import torch.nn.functional as F
import warnings
warnings.filterwarnings('ignore')
device = torch.device("cpu")
class Model(nn.Module):
def __init__(self, input_dim):
super(Model, self).__init__()
self.layer1 = nn.Linear(input_dim, 12)
self.layer2 = nn.Linear(12, 2)
def forward(self, x):
x = F.sigmoid(self.layer1(x))
x = F.softmax(self.layer2(x)) # To check with the loss function
return x
# load dataset
filename = './datasets/adult-all.csv'
dataframe = read_csv(filename, header=None, na_values='?')
# drop rows with missing
dataframe = dataframe.dropna()
# summarize the class distribution
target = dataframe.values[:, -1]
# split into inputs and outputs
last_ix = len(dataframe.columns) - 1
X_, y = dataframe.drop(last_ix, axis=1), dataframe[last_ix]
# select categorical and numerical features
cat_ix = X_.select_dtypes(include=['object', 'bool']).columns
num_ix = X_.select_dtypes(include=['int64', 'float64']).columns
# label encode the target variable to have the classes 0 and 1
y = LabelEncoder().fit_transform(y)
# one-hot encoding of categorical features
df_cat = pd.get_dummies(X_[cat_ix])
# binning of numerical features
x = X_.drop(columns=cat_ix, axis=1)
est = KBinsDiscretizer(n_bins=3, encode='onehot-dense', strategy='uniform')
df_num = est.fit_transform(x)
X = pd.concat([df_cat.reset_index(drop=True), pd.DataFrame(df_num).reset_index(drop=True)], axis=1)
# split training and test data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
X_tr = Variable(torch.tensor(X_train.values, dtype=torch.float))
X_te = Variable(torch.tensor(X_test.values, dtype=torch.float))
y_tr = Variable(torch.tensor(y_train, dtype=torch.long))
y_te = Variable(torch.tensor(y_test, dtype=torch.long))
def binary_cross_entropy_one_hot(input, target):
return torch.nn.CrossEntropyLoss()(input, target)
def _accuracy(y_pred, y_true):
classes = torch.argmax(y_pred, dim=1)
labels = y_true
accuracy = torch.mean((classes == labels).float())
return accuracy
model = Model(X.shape[1])
learning_rate = 1e-3
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
epochs = 1000
accuracy = 0.0
minibatch = True
# training loop
train_loss = []
for epoch in range(epochs):
if minibatch:
batch_size = 128 # or whatever
permutation = torch.randperm(X_tr.size()[0])
for i in range(0, X_tr.size()[0], batch_size):
optimizer.zero_grad()
indices = permutation[i:i + batch_size]
batch_x, batch_y = X_tr[indices], y_tr[indices]
# in case you wanted a semi-full example
outputs = model.forward(batch_x)
loss = binary_cross_entropy_one_hot(outputs, batch_y)
loss.backward()
optimizer.step()
if epoch % 100 == 0:
print(f'epoch: {epoch:2} loss: {loss:10.8f}')
# train_ds = TensorDataset(X_tr, y_tr)
# train_dl = DataLoader(train_ds, batch_size=256, shuffle=True)
# batch_loss = 0.0
# batch_accuracy = 0.0
# for nb, (x_batch, y_batch) in enumerate(train_dl): # manually set number of batches?
# optimizer.zero_grad()
# y_pred_train = model(x_batch)
# loss = binary_cross_entropy_one_hot(y_pred_train, y_batch)
# loss.backward()
# optimizer.step()
# batch_loss += loss.item()
# batch_accuracy += _accuracy(y_pred_train, y_batch)
# train_loss.append(batch_loss / (nb + 1))
# accuracy = batch_accuracy / (nb + 1)
# if epoch % 100 == 0:
# print(f'epoch: {epoch:2} loss: {train_loss[epoch]:10.8f}')
else:
optimizer.zero_grad()
y_pred = model(X_tr)
# computing the loss function
loss = binary_cross_entropy_one_hot(y_pred, y_tr)
if epoch % 100 == 0:
print(f'epoch: {epoch:2} loss: {loss.item():10.8f}')
loss.backward()
optimizer.step()
accuracy = _accuracy(y_pred, y_tr)
# evaluation on test data
with torch.no_grad():
model.eval()
y_pred = model(X_te)
test_loss = binary_cross_entropy_one_hot(y_pred, y_te)
test_acc = _accuracy(y_pred, y_te)
print("Loss on test data: {:.4}".format(test_loss))
print("Accuracy on test data: {:.4}".format(test_acc))
Time would depend on your input_dim, the size of your dataset, and the number of updates per epoch (// the batch size). From what you've shared with us, I'm not exactly sure what the issue is and if there is actually any bottleneck. However, here are a couple of things I would point out, which might help you (in no particular order):
No need to wrap your data with torch.autograd.Variable. It has been deprecated and is no longer needed, Autograd automatically supports torch.tensors with requires_grad set to True.
If you are using torch.nn.CrossEntropyLoss, you shouldn't use F.softmax on your model's output. That's because CrossEntropyLoss includes nn.LogSoftmax() and nn.NLLLoss(). Also no need to initialize the module each time you want to call it:
criterion = torch.nn.CrossEntropyLoss()
def binary_cross_entropy_one_hot(input, target):
return criterion(input, target)
I see you are redefining your data loader on each epoch. Is that what you really want? If not you can just define it outside the training loop:
train_ds = TensorDataset(X_tr, y_tr)
train_dl = DataLoader(train_ds, batch_size=256, shuffle=True)
for epoch in range(epochs):
for x, y in train_dl:
# ...
I would call .item() on your accuracy (when calling _accuracy) to not keep it attached to the computation graph and release it from memory when it is ready.

How Can I Extract Predictions from A Softmax Layer on Tensorflow

I'm trying to extract predictions, use predictions in calculating accuracy/precision/recall/F1 and prediction probability. I know I have 10 output classes therefore I can't calculate precision per see but I will be doing all these in other models moreover I'd like to be able to extract prediction probabilities. My model is as follows. I've checked GitHub and StackOverflow however I have yet to find a way to extract those properties. Most of the answers come close but never answer what I needed. I've used some low epoch numbers there in order to check out model fast and keep the output screen less crowded.
import tensorflow as tf
from tensorflow.contrib.layers import fully_connected
from sklearn.datasets import fetch_mldata
from sklearn.preprocessing import LabelBinarizer
from sklearn.model_selection import train_test_split
mnist = fetch_mldata('MNIST original', data_home="data/mnist/")
lb = LabelBinarizer().fit(mnist.target)
X_train, X_test, y_train, y_test = train_test_split(mnist.data, lb.transform(mnist.target), train_size=0.9, test_size=0.1)
X = tf.placeholder(tf.float32, shape=(None, 784))
y = tf.placeholder(tf.int64, shape=(None, 10))
lOne = fully_connected(inputs=X, num_outputs=100, activation_fn=tf.nn.elu)
logits = fully_connected(inputs=lOne, num_outputs=10, activation_fn=tf.nn.softmax)
pred = logits
acc = tf.metrics.accuracy(labels=y, predictions=pred)
loss = tf.losses.softmax_cross_entropy(logits=logits, onehot_labels=y)
trainOP = tf.train.AdamOptimizer(0.001).minimize(loss)
import numpy as np
bSize = 100
batches = int(np.floor(X_train.shape[0]/bSize)+1)
def batcher(dSet, bNum):
return(dSet[bSize*(bNum-1):bSize*(bNum)])
epochs = 2
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
for epoch in range(0, epochs):
for batch in range(1, batches):
X_batch = batcher(X_train, batch)
y_batch = batcher(y_train, batch)
sess.run(trainOP, feed_dict={X: X_batch, y: y_batch})
lossVal = sess.run([loss], feed_dict={X: X_test, y: y_test})
print(lossVal)
sess.close()
The code shared in the question covers training, but not "using" (infering) with the resulting model.
Two issues:
The trained model is not serialized, so future runs will run on an untrained model, and predict whatever their initialization tells them to. Hence a question comment suggesting to save the trained model, and restore it when predicting.
The logits are the output of a SoftMax function. A common way to get a class from logits is to select the highest value in the tensor (here a vector).
With TensorFlow, the last point can be done with tf.argmax ("Returns the index with the largest value across axes of a tensor."):
tf.argmax(input=logits, axis=1)
All in all, the question's code covers only partially the MNIST tutorial from the TensorFlow team. Perhaps more pointers there if you get stuck with this code.
I'm writing in case anyone may stumble upon this particular case. I've built a network following basic MNIST examples, I've used tf.nn.softmax in the final layer and expected to get results from said layer. It looks like I need to use softmax function again to get the results from a layer such as yPred = tf.nn.softmax(logits) with logits being the name of the output layer. I'm adding fixed code below.
I can add a line to save the model, load it later on and made predictions on saved model. Since this is just an example for me building the model, I've omitted the saving part.
import tensorflow as tf
from tensorflow.contrib.layers import fully_connected
from sklearn.datasets import fetch_mldata
from sklearn.preprocessing import LabelBinarizer
from sklearn.model_selection import train_test_split
mnist = fetch_mldata('MNIST original', data_home="data/mnist/")
lb = LabelBinarizer().fit(mnist.target)
X_train, X_test, y_train, y_test = train_test_split(mnist.data, lb.transform(mnist.target), train_size=0.9, test_size=0.1, stratify = mnist.target, random_state=42)
X = tf.placeholder(tf.float32, shape=(None, 784))
y = tf.placeholder(tf.int64, shape=(None, 10))
lOne = fully_connected(inputs=X, num_outputs=100, activation_fn=tf.nn.elu)
lTwo = fully_connected(inputs=lOne, num_outputs=100, activation_fn=tf.nn.elu)
logits = fully_connected(inputs=lTwo, num_outputs=10, activation_fn=tf.nn.softmax)
pred = tf.nn.softmax(logits)
acc_bool = tf.equal(tf.argmax(logits, 1), tf.argmax(y, 1))
acc_Num = tf.cast(acc_bool, tf.float32)
acc_Mean = tf.reduce_mean(acc_Num)
loss = tf.losses.softmax_cross_entropy(logits=logits, onehot_labels=y)
trainOP = tf.train.AdamOptimizer(0.001).minimize(loss)
import numpy as np
bSize = 1024
batches = int(np.floor(X_train.shape[0]/bSize)+1)
def batcher(dSet, bNum):
return(dSet[bSize*(bNum-1):bSize*(bNum)])
epochs = 250
init = tf.global_variables_initializer()
trainA = []
testA = []
with tf.Session() as sess:
sess.run(init)
for epoch in range(0, epochs):
for batch in range(1, batches):
X_batch = batcher(X_train, batch)
y_batch = batcher(y_train, batch)
sess.run(trainOP, feed_dict={X: X_batch, y: y_batch})
if epoch % 25 == 1:
trainLoss, trainAcc = sess.run([loss, acc_Mean], feed_dict={X: X_train, y: y_train})
testLoss, testAcc = sess.run([loss, acc_Mean], feed_dict={X: X_test, y: y_test})
yPred = sess.run(pred, feed_dict={X: X_test[0].reshape(1,-1), y: y_test[0].reshape(1,-1)})
print(yPred)
sess.close()

Tensorflow netwrok parameters nog updating

Iris dataset classification, network parameters not updating
Hey, i tried to build a classifier with a logistic regression netwrok but my parameters are not updating, my weights,bias,output and cost stay the same can somebody help me? I have no idea why my parameters are not updating how can I solve this? Thank you!
import tensorflow as tf
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
df = pd.read_csv('/Users/Laurens-Wissels/Desktop/iris.csv')
x = np.array(df[["sepal_length","sepal_width","petal_length","petal_width"]])
scaler_model = MinMaxScaler()
x = scaler_model.fit_transform(x)
y = df["species"]
def yvalue(y):
if y =="setosa":
return [1,0,0]
elif y == "versicolor":
return [0,1,0]
else:
return [0,0,1]
y = y.apply(yvalue)
y = y.reshape(150,1)
x_train, x_test , y_train,y_test = train_test_split(x,y,test_size=0.3)
print(y_train)
n_features = 4
n_species = 1
traing_epochs = 2000
learning_rate = 0.0001
n_samples = 105
display_step = 50
X = tf.placeholder(tf.float32,[105,n_features])
Y = tf.placeholder(tf.float32,[105,1])
W = tf.Variable(tf.random_normal([n_features,n_species]))
b = tf.Variable(tf.random_normal([1]))
_y = tf.add(tf.matmul(X,W),b)
output = tf.nn.softmax(_y)
cost = tf.reduce_mean(tf.pow(Y - output , 2))/(2*n_samples)
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
for i in range(traing_epochs):
sess.run(optimizer, feed_dict={X: x_train, Y: y_train}) # Take a gradient descent step using our inputs and labels
sess.run(output,feed_dict={X: x_train, Y: y_train})
# That's all! The rest of the cell just outputs debug messages.
# Display logs per epoch step
if (i) % display_step == 0:
cc = sess.run(cost, feed_dict={X: x_train, Y:y_train})
print("_y:",_y)
print("output:",output)
print("w:",sess.run(W, feed_dict={X: x_train, Y:y_train}))
print "Training step:", '%04d' % (i), "cost=",sess.run(cost, feed_dict={X: x_train, Y:y_train}) #, \"W=", sess.run(W), "b=", sess.run(b)
print("-------------------------------------")
plotData.append(sess.run(cost, feed_dict={X: x_train, Y:y_train}) )
print "Optimization Finished!"
training_cost = sess.run(cost, feed_dict={X: x_train, Y:y_train})
print "Training cost=", training_cost, "W=", sess.run(W), "b=", sess.run(b), '\n'
plt.plot(plotData)
plt.show()
tf.nn.softmax divides by the sum of the exponentiated elements (see the expression in the docs). If you only have one element in the dimension being summed over (last by default):
print(_y.shape)
(105, 1)
Then you end up with exp(x) / sum(exp(x)), which is a constant 1. So the gradient is 0 and therefore no training.
You could switch to tf.nn.sigmoid.

Neural Network with 1500+ features cannot fit in TensorFlow

There are 1875 features in data, which are correlated with loan records of people. Some of them have been used for score card, ks=27.
I wanted to use these features in neural networks to determine whether a person is good or bad. However,it turned out all people were defined bad or good without selectivity, even I employed the method of'#imbalance data' (as shown in the following code).
Is there some problem in my code(activation function?),can someone give some tips? thanks in advance!
from sas7bdat import SAS7BDAT
import pandas as pd
import numpy as np
from sklearn.utils import shuffle
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np
from sklearn.metrics import confusion_matrix
f=SAS7BDAT('data_10212102.sas7bdat')
data=f.to_data_frame()
#drop some time freatures
drop_cols = [col for col in data.columns if 'TIME' in col]
data=data.drop(drop_cols,axis=1)
data.loc[data.TARGET == 0,'Good'] =1
data.loc[data.TARGET == 1,'Good'] =0
data = data.rename(columns={'TARGET':'Bad'})
validation =data.ix[data.SETID==3,:]
train_test_data = data.loc[~data.index.isin(validation.index)]
X_train = train_test_data.ix[train_test_data.SETID==1,:]
X_test = train_test_data.ix[train_test_data.SETID==2,:]
X_train = shuffle(X_train)
X_test = shuffle(X_test)
X_validation = shuffle(validation)
Y_train = X_train.Bad
Y_train = pd.concat([Y_train,X_train.Good],axis=1)
Y_test = X_test.Bad
Y_test = pd.concat([Y_test,X_test.Good],axis=1)
Y_validation = X_validation.Bad
Y_validation = pd.concat([Y_validation,X_validation.Good],axis=1)
ratio = len(X_train)/len(X_train.ix[X_train.Bad==1,:])
X_train = X_train.drop(['Good','Bad'],axis=1)
X_test = X_test.drop(['Good','Bad'],axis=1)
X_validation = X_validation.drop(['Good','Bad'],axis=1)
#imbalance data
Y_train.Bad *= ratio
Y_test.Bad *=ratio
Y_validation.Bad *= ratio
#parameters
learning_rate = 0.001
training_epochs = 2000
batch_size = 512
display_step = 500
n_samples = X_train.shape[0]
n_features = 1845
n_class = 2
x = tf.placeholder(tf.float32, [None, n_features])
y = tf.placeholder(tf.float32, [None, n_class])
n_units =2048
n_layers =7
W={}
b={}
for i in range(n_layers):
if i==0:
W[i] = tf.Variable(tf.random_normal([n_features, n_units]))
b[i] = tf.Variable(tf.random_normal([n_units]))
pred = tf.nn.sigmoid(tf.matmul(x, W[i]) + b[i])
elif 0<i<n_layers-1:
W[i] = tf.Variable(tf.random_normal([n_units, n_units]))
b[i] = tf.Variable(tf.random_normal([n_units]))
pred = tf.nn.sigmoid(tf.matmul(pred, W[i]) + b[i])
else:
W[i] = tf.Variable(tf.random_normal([n_units, n_class]))
b[i] = tf.Variable(tf.random_normal([n_class]))
pred = tf.nn.softmax(tf.matmul(pred, W[i]) + b[i])
cost = -tf.reduce_sum(y * tf.log(pred))
optimizer =
tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
init = tf.global_variables_initializer()
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(init)
for epoch in range(training_epochs):
avg_cost = 0
total_batch = int(n_samples / batch_size)
for i in range(total_batch):
_, c = sess.run([optimizer, cost],
feed_dict={x: X_train[i * batch_size : (i+1) * batch_size],
y: Y_train[i * batch_size : (i+1) * batch_size]
})
avg_cost = c / total_batch
plt.plot(epoch+1, avg_cost, 'co')
if (epoch+1) % display_step == 0:
print("Epoch:", "%04d" % (epoch+1), "avg_cost=",avg_cost )
print("Training Accuracy:", accuracy.eval({x: X_train, y:Y_train}))
print("Testing Accuracy:", accuracy.eval({x: X_test, y:Y_test}))
print("Validating Accuracy:", accuracy.eval({x: X_validation, y:Y_validation}))
train_prob =pred.eval(feed_dict={x: X_train})
train_predict=tf.argmax(train_prob,1).eval()
print("train:",confusion_matrix(1-Y_train['Good'],train_predict))
test_prob = pred.eval(feed_dict={x: X_test})
test_predict=tf.argmax(test_prob,1).eval()
print("test:",confusion_matrix(1-Y_test['Good'],test_predict))
validation_prob = pred.eval(feed_dict={x: X_validation})
validation_predict=tf.argmax(validation_prob,1).eval()
print("validation:",confusion_matrix(1-Y_validation['Good'],validation_predict))
print("Optimization Finished!")
#Save the variables to disk
save_path = saver.save(sess,"./model_v0.ckpt")
print("Model saved in file: %s" %save_path)
plt.xlabel("Epoch")
plt.ylabel("Cost")
plt.show()

SKLearn Multiclass Classifier

I have written the following code to import data vectors from file and test the performance of SVM classifier (using sklearn and python).
However the classifier performance is lower than any other classifier (NNet for example gives 98% accuracy on test data but this gives 92% at best). In my experience SVM should produce better results for this kind of data.
Am I possibly doing something wrong?
import numpy as np
def buildData(featureCols, testRatio):
f = open("car-eval-data-1.csv")
data = np.loadtxt(fname = f, delimiter = ',')
X = data[:, :featureCols] # select columns 0:featureCols-1
y = data[:, featureCols] # select column featureCols
n_points = y.size
print "Imported " + str(n_points) + " lines."
### split into train/test sets
split = int((1-testRatio) * n_points)
X_train = X[0:split,:]
X_test = X[split:,:]
y_train = y[0:split]
y_test = y[split:]
return X_train, y_train, X_test, y_test
def buildClassifier(features_train, labels_train):
from sklearn import svm
#clf = svm.SVC(kernel='linear',C=1.0, gamma=0.1)
#clf = svm.SVC(kernel='poly', degree=3,C=1.0, gamma=0.1)
clf = svm.SVC(kernel='rbf',C=1.0, gamma=0.1)
clf.fit(features_train, labels_train)
return clf
def checkAccuracy(clf, features, labels):
from sklearn.metrics import accuracy_score
pred = clf.predict(features)
accuracy = accuracy_score(pred, labels)
return accuracy
features_train, labels_train, features_test, labels_test = buildData(6, 0.3)
clf = buildClassifier(features_train, labels_train)
trainAccuracy = checkAccuracy(clf, features_train, labels_train)
testAccuracy = checkAccuracy(clf, features_test, labels_test)
print "Training Items: " + str(labels_train.size) + ", Test Items: " + str(labels_test.size)
print "Training Accuracy: " + str(trainAccuracy)
print "Test Accuracy: " + str(testAccuracy)
i = 0
while i < labels_test.size:
pred = clf.predict(features_test[i])
print "F(" + str(i) + ") : " + str(features_test[i]) + " label= " + str(labels_test[i]) + " pred= " + str(pred);
i = i + 1
How is it possible to do multi-class classification if it does not do it by default?
p.s. my data is of the following format (last column is the class):
2,2,2,2,2,1,0
2,2,2,2,1,2,0
0,2,2,5,2,2,3
2,2,2,4,2,2,1
2,2,2,4,2,0,0
2,2,2,4,2,1,1
2,2,2,4,1,2,1
0,2,2,5,2,2,3
I found the problem after a long time and I am posting it, in case someone needs it.
The problem was that the data import function wouldn't shuffle the data. If the data is somehow sorted, then there is the risk that you train the classifier with some data and test it with totally different data. In the NNet case, Matlab was used which automatically shuffles the input data.
def buildData(filename, featureCols, testRatio):
f = open(filename)
data = np.loadtxt(fname = f, delimiter = ',')
np.random.shuffle(data) # randomize the order
X = data[:, :featureCols] # select columns 0:featureCols-1
y = data[:, featureCols] # select column featureCols
n_points = y.size
print "Imported " + str(n_points) + " lines."
### split into train/test sets
split = int((1-testRatio) * n_points)
X_train = X[0:split,:]
X_test = X[split:,:]
y_train = y[0:split]
y_test = y[split:]
return X_train, y_train, X_test, y_test

Categories

Resources