PyTorch minibatch training very slow - python

When training my model on the adult income data set and using minibatches training is very slow regardless if I use PyTorch's DataLoader or a basic implementation for minibatch training.
Is there a problem with my code or is there another way to speed up training for the adult income data set? I want to use one-hot encoding and cross-entropy loss + softmax. Do I have to use a different loss function or remove the softmax layer?
import pandas as pd
from pandas import read_csv
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import KBinsDiscretizer
from sklearn.model_selection import train_test_split
import torch
from torch.autograd import Variable
from torch.utils.data import DataLoader, Dataset, TensorDataset
import numpy as np
import matplotlib.pyplot as plt
import torch.nn as nn
import torch.nn.functional as F
import warnings
warnings.filterwarnings('ignore')
device = torch.device("cpu")
class Model(nn.Module):
def __init__(self, input_dim):
super(Model, self).__init__()
self.layer1 = nn.Linear(input_dim, 12)
self.layer2 = nn.Linear(12, 2)
def forward(self, x):
x = F.sigmoid(self.layer1(x))
x = F.softmax(self.layer2(x)) # To check with the loss function
return x
# load dataset
filename = './datasets/adult-all.csv'
dataframe = read_csv(filename, header=None, na_values='?')
# drop rows with missing
dataframe = dataframe.dropna()
# summarize the class distribution
target = dataframe.values[:, -1]
# split into inputs and outputs
last_ix = len(dataframe.columns) - 1
X_, y = dataframe.drop(last_ix, axis=1), dataframe[last_ix]
# select categorical and numerical features
cat_ix = X_.select_dtypes(include=['object', 'bool']).columns
num_ix = X_.select_dtypes(include=['int64', 'float64']).columns
# label encode the target variable to have the classes 0 and 1
y = LabelEncoder().fit_transform(y)
# one-hot encoding of categorical features
df_cat = pd.get_dummies(X_[cat_ix])
# binning of numerical features
x = X_.drop(columns=cat_ix, axis=1)
est = KBinsDiscretizer(n_bins=3, encode='onehot-dense', strategy='uniform')
df_num = est.fit_transform(x)
X = pd.concat([df_cat.reset_index(drop=True), pd.DataFrame(df_num).reset_index(drop=True)], axis=1)
# split training and test data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
X_tr = Variable(torch.tensor(X_train.values, dtype=torch.float))
X_te = Variable(torch.tensor(X_test.values, dtype=torch.float))
y_tr = Variable(torch.tensor(y_train, dtype=torch.long))
y_te = Variable(torch.tensor(y_test, dtype=torch.long))
def binary_cross_entropy_one_hot(input, target):
return torch.nn.CrossEntropyLoss()(input, target)
def _accuracy(y_pred, y_true):
classes = torch.argmax(y_pred, dim=1)
labels = y_true
accuracy = torch.mean((classes == labels).float())
return accuracy
model = Model(X.shape[1])
learning_rate = 1e-3
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
epochs = 1000
accuracy = 0.0
minibatch = True
# training loop
train_loss = []
for epoch in range(epochs):
if minibatch:
batch_size = 128 # or whatever
permutation = torch.randperm(X_tr.size()[0])
for i in range(0, X_tr.size()[0], batch_size):
optimizer.zero_grad()
indices = permutation[i:i + batch_size]
batch_x, batch_y = X_tr[indices], y_tr[indices]
# in case you wanted a semi-full example
outputs = model.forward(batch_x)
loss = binary_cross_entropy_one_hot(outputs, batch_y)
loss.backward()
optimizer.step()
if epoch % 100 == 0:
print(f'epoch: {epoch:2} loss: {loss:10.8f}')
# train_ds = TensorDataset(X_tr, y_tr)
# train_dl = DataLoader(train_ds, batch_size=256, shuffle=True)
# batch_loss = 0.0
# batch_accuracy = 0.0
# for nb, (x_batch, y_batch) in enumerate(train_dl): # manually set number of batches?
# optimizer.zero_grad()
# y_pred_train = model(x_batch)
# loss = binary_cross_entropy_one_hot(y_pred_train, y_batch)
# loss.backward()
# optimizer.step()
# batch_loss += loss.item()
# batch_accuracy += _accuracy(y_pred_train, y_batch)
# train_loss.append(batch_loss / (nb + 1))
# accuracy = batch_accuracy / (nb + 1)
# if epoch % 100 == 0:
# print(f'epoch: {epoch:2} loss: {train_loss[epoch]:10.8f}')
else:
optimizer.zero_grad()
y_pred = model(X_tr)
# computing the loss function
loss = binary_cross_entropy_one_hot(y_pred, y_tr)
if epoch % 100 == 0:
print(f'epoch: {epoch:2} loss: {loss.item():10.8f}')
loss.backward()
optimizer.step()
accuracy = _accuracy(y_pred, y_tr)
# evaluation on test data
with torch.no_grad():
model.eval()
y_pred = model(X_te)
test_loss = binary_cross_entropy_one_hot(y_pred, y_te)
test_acc = _accuracy(y_pred, y_te)
print("Loss on test data: {:.4}".format(test_loss))
print("Accuracy on test data: {:.4}".format(test_acc))

Time would depend on your input_dim, the size of your dataset, and the number of updates per epoch (// the batch size). From what you've shared with us, I'm not exactly sure what the issue is and if there is actually any bottleneck. However, here are a couple of things I would point out, which might help you (in no particular order):
No need to wrap your data with torch.autograd.Variable. It has been deprecated and is no longer needed, Autograd automatically supports torch.tensors with requires_grad set to True.
If you are using torch.nn.CrossEntropyLoss, you shouldn't use F.softmax on your model's output. That's because CrossEntropyLoss includes nn.LogSoftmax() and nn.NLLLoss(). Also no need to initialize the module each time you want to call it:
criterion = torch.nn.CrossEntropyLoss()
def binary_cross_entropy_one_hot(input, target):
return criterion(input, target)
I see you are redefining your data loader on each epoch. Is that what you really want? If not you can just define it outside the training loop:
train_ds = TensorDataset(X_tr, y_tr)
train_dl = DataLoader(train_ds, batch_size=256, shuffle=True)
for epoch in range(epochs):
for x, y in train_dl:
# ...
I would call .item() on your accuracy (when calling _accuracy) to not keep it attached to the computation graph and release it from memory when it is ready.

Related

How to halt keras model training according to val_accuracy when there is only 1 epoch

I am training a keras model to perform some simple categorisation tasks. In my case, the model needs to learn how to make a judgement based on the task cue and a given task stimulus. The task stimulus is an array of 5 numbers, which is randomly generated. Here, to train the model, I don't need the epoch more than 1, as learning a specific stimulus is not the goal. Thus, I set up the epoch as 1. However, I don't want the model to have a 100% accuracy on the validation dataset, but 80%.
To achieve this goal, I used callback function to stop training when the training accuracy reached 80%. But then I found the accuracy on the validation dataset is much better than the training accuracysee here. As I only have one epoch here, how should I setup the callback function to make sure the model has 80% accuracy on the validation dataset? Thanks in advance!
Here are codes:
import numpy as np
import random
import tensorflow as tf
from tensorflow import keras
from keras.models import Model
from keras.layers import Input
from keras.layers import Dense
# random seed
from numpy.random import seed
seed(1234)
random.seed(10)
# set up a simple categorization task
def tasksets(train_num, test_num, task_num):
x_train, y_train, rules_train = train_sequence(train_num, task_num)
x_test, y_test, rules_test = train_sequence(test_num, task_num)
return x_train, x_test, rules_train, y_train, y_test, rules_test
# generating training sequence
def train_sequence(trial_num, task_num):
x = np.zeros((trial_num, task_num), dtype=np.float64)
y = np.zeros(trial_num, dtype=np.float64)
rules = np.zeros(trial_num, dtype=np.float64)
rulepool = []
for r in range(task_num):
rulepool = rulepool + [r]*int(trial_num/task_num)
random.shuffle(rulepool)
for i in range(trial_num):
for t in range(task_num):
x[i,t] = random.random() # multi-dimentional stimuli
rule_idx = rulepool.pop(random.randint(0, len(rulepool)-1))
rules[i] = rule_idx
if x[i, rule_idx] <= 0.5:
answer = 0 # no
elif x[i, rule_idx] > 0.5:
answer = 1 # yes
y[i] = answer
x = np.reshape(x, (trial_num,task_num))
y = tf.one_hot(y, 2)
rules = np.reshape(rules, (trial_num))
rules = tf.one_hot(rules, depth = task_num)
return x, y, rules
def build_network(task_num, learning_rate: float = 0.001):
# nsteps = 1
input_dims = task_num
inputs_stimuli = Input(shape=(input_dims), name = "stimulus")
inputs_rule = Input(shape=(input_dims), name = "rule")
hid1 = Dense(6, activation='relu', name = "stimulus_representation")(inputs_stimuli)
hid2 = Dense(6, activation='relu', name = "rule_representation")(inputs_rule) # back to dense
fuse = keras.layers.concatenate([hid1, hid2]) # combine multiple inputs
decision = Dense(100, activation = "relu", name = "decision")(fuse) # back to dense
output = Dense(2, activation="softmax", name = "output")(decision)
model = Model(inputs=[inputs_stimuli, inputs_rule], outputs=output)
loss = tf.keras.losses.CategoricalCrossentropy()
model.compile(optimizer = \
tf.keras.optimizers.Adam(learning_rate = learning_rate), loss = loss, metrics = ["accuracy"])
return model
# Instantiate a callback object
accuracy_threshold = 0.80
class myCallback(tf.keras.callbacks.Callback):
def on_train_batch_end(self, batch, logs={}):
keys = list(logs.keys())
print("...Training: start of batch {}; got log keys: {}".format(batch, keys))
if(logs.get('accuracy') > accuracy_threshold):
print("\nReached %2.2f%% accuracy, so stopping training!!" %(accuracy_threshold*100))
self.model.stop_training = True
callbacks = myCallback()
# the model is trained until it reaches 80% accuracy in the test
check = 0
test_threshold = 1
task_num = 5
batch_size = 8
model = build_network(task_num)
x_train, x_test, rule_train, y_train, y_test, rule_test = tasksets(10000*task_num, 100*task_num, task_num)
results = model.fit([x_train, rule_train], y_train, epochs = 1,
batch_size = batch_size, callbacks=callbacks,
validation_data = ([x_test, rule_test], y_test))

I use pytorch to train a model to classify iris, but my acc was about 0.4

I have tried many improvements like increasing epochs, using better loss functions and optimizers, deepening the network and shuffling the dataset, etc, but still to no avail. This problem has been bothering me for a long time, thanks for your help. Below is my code.
load and process dataset(updated)
def Iris_Reader(dataset):
train_data, test_data, train_label, test_label = train_test_split(dataset.data, dataset.target, test_size=0.4)
# scaler = StandardScaler()
# train_data = scaler.fit_transform(train_data)
# test_data = scaler.transform(test_data)
return torch.FloatTensor(train_data), torch.LongTensor(train_label), torch.FloatTensor(test_data), torch.LongTensor(test_label)
Define the classifier
class Classifier(nn.Module):
def __init__(self):
super().__init__()
#4*3*3 network
self.model = nn.Sequential(
nn.Linear(4,3),
nn.ReLU(),
nn.Linear(3,3),
)
#SGD
self.optimiser = torch.optim.SGD(self.parameters(), lr = 0.1)
#MSE LOSS_FUNCTION
self.loss_fn = nn.CrossEntropyLoss()
self.counter = 0
self.progress = []
def forward(self, input):
return self.model(input)
def train(self, input, target):
output = self.forward(input)
loss = self.loss_fn(output, target)
self.counter += 1
self.progress.append(loss.item())
self.optimiser.zero_grad()
loss.backward()
self.optimiser.step()
# plot loss
def plot_loss(self):
plt.figure(dpi=100)
plt.ylim([0,1.0])
plt.yticks([0, 0.25, 0.5, 1.0])
plt.scatter(x = [i for i in range(len(self.progress))], y = self.progress, marker = '.', alpha = 0.2)
plt.grid('on')
plt.show()
TRAIN
C = Classifier()
epochs = 10
dataset = datasets.load_iris()
for epoch in range(epochs):
train_data, train_label, _, _ = Iris_Reader(dataset)
for i, j in zip(train_data, train_label):
C.train(i, j)
TEST
score = 0
num = 0
# for epoch in range(epochs):
_, _, test_data, test_label = Iris_Reader(dataset)
for i,j in zip(test_data, test_label):
output = C.forward(i).detach().argmax()
if output == j:
# print(C.forward(i).detach(), j)
score += 1
num += 1
print(score, num, round(score/num, 3))
OUTPUT: 53 60 0.883
There's a bunch of problems here:
First, you seem to shuffle data and labels independently, rendering the dataset useless.
Also, you recreate the dataset inside the loop every epoch, wasting the CPU time pointlessly.
Overall, the dataset creation can be shortened to something like this:
def Iris_Reader(dataset):
train_data, test_data, train_label, test_label = sklearn.model_selection.train_test_split(dataset.data, dataset.target, test_size=0.2)
return torch.FloatTensor(train_data), torch.LongTensor(train_label), torch.FloatTensor(test_data), torch.LongTensor(test_label)
and should be taken outside the loop.
Next, MSELoss() is suited for regression. For classification, CrossEntropyLoss() is the default choice.
Using sigmoid as activation in an intermediate layer is not the best choice, especially with a short number of epochs. ReLU should converge much better.
Last but not least, your loss chart would look much cleaner if the values were averaged per epoch.
Update: the implementation that ensures the target having the same size as network output, with additional feature scaling:
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
def Iris_Reader(dataset):
label = nn.functional.one_hot(torch.LongTensor(dataset.target), num_classes=3).float()
train_data, test_data,train_label, test_label = train_test_split(dataset.data, label, test_size=0.2)
scaler = StandardScaler()
train_data = scaler.fit_transform(train_data)
test_data = scaler.transform(test_data)
return torch.FloatTensor(train_data), train_label, torch.FloatTensor(test_data), test_label
Oh, and you should probably also remove the final Sigmoid() since CrossEntropyLoss() applies logsoftmax anyway.

Why does tensorflow show inaccurate loss?

I'm using Tensorflow to train a network to predict the third item in a list of numbers.
When I train, the network appears to train quite well and do well on both the training and test set. However, when I evaluate its performance myself, it seems to be doing quite poorly.
For example, at the end of training, Tensorflow says that the validation loss is 2.1 x 10^(-5). However, when I compute it myself, I get 0.17 x 10^0. What am I doing wrong?
Here's code that can be run on Google Colab:
import numpy as np
import tensorflow as tf
from sklearn.model_selection import train_test_split
def create_dataset(k=5, n=2, example_amount=200):
'''Create a dataset of numbers where the goal is to always output the nth number'''
# UPGRADE: this could be done better with numpy to just generate all the examples at once
example_amount = 1000
x = []
y = []
ans = [x, y]
for i in range(example_amount):
example_x = np.random.rand(k)
example_y = example_x[n]
x.append(example_x)
y.append(example_y)
return ans
def tensorize(tensor_like) -> tf.Tensor:
'''Turn stuff into tensors'''
return tf.convert_to_tensor(tensor_like, dtype=tf.float32)
def split_dataset(dataset, train_split=0.8, random_state=42):
'''
Takes in a list (or tuple) where index 0 contains the inputs and index 1 contains the outputs
outputs x_train, x_test, y_train, y_test, train_indexes, test_indexes all as tf.Tensor
'''
indices = np.arange(len(dataset[0]))
return tuple([tensorize(data) for data in train_test_split(dataset[0], dataset[1], indices, train_size=train_split, random_state=random_state)])
# how many numbers in each example
K = 5
# the index of the solution
N = 2
# how many examples
EXAMPLE_AMOUNT = 20000
# what percentage of the examples are in the training set
TRAIN_SPLIT = 0.5
# how long to train for
epochs = 50
dataset = create_dataset(K, N, EXAMPLE_AMOUNT)
x_train, x_test, y_train, y_test, train_indexes, test_indexes = split_dataset(dataset, train_split=TRAIN_SPLIT)
model_input = tf.keras.layers.Input(shape=(K,), name="input")
model_dense1 = tf.keras.layers.Dense(10, name="dense1")(model_input)
model_dense2 = tf.keras.layers.Dense(10, name="dense2")(model_dense1)
model_output = tf.keras.layers.Dense(1, name="output")(model_dense2)
model = tf.keras.Model(inputs=model_input, outputs=model_output)
model.compile(optimizer=tf.keras.optimizers.Adam(), loss="mse")
history = model.fit(x=x_train, y=y_train, validation_data=(x_test, y_test), epochs=epochs)
# the validation loss as Tensorflow computes it
print(history.history["val_loss"][-1]) # 2.1036579710198566e-05
# the validation loss as I compute it
val_loss = tf.math.reduce_mean(tf.keras.losses.MSE(y_test, model.predict(x_test))).numpy()
print(val_loss) # 0.1655631
What you miss is that the shape of y_test.
y_test.numpy().shape
(500,) <-- causing the behaviour
Simply reshape it like:
val_loss = tf.math.reduce_mean(tf.keras.losses.MSE(y_test.numpy().reshape(-1,1), model.predict(x_test))).numpy()
print(val_loss) # 1.1548506e-05
Also:
history.history["val_loss"][-1] # 1.1548506336112041e-05
Or you can flatten() both of the data while calculating it:
val_loss = tf.math.reduce_mean(tf.keras.losses.MSE(y_test.numpy().flatten(), model.predict(x_test).flatten())).numpy()
print(val_loss) # 1.1548506e-05

Confusion matrix and test accuracy for PyTorch Transfer Learning tutorial

Following the Pytorch Transfer learning tutorial, I am interested in reporting only train and test accuracy as well as confusion matrix (say using sklearn confusionmatrix). How can I do that? The current tutorial only reports train/val accuracy and I am having hard time figuring how to incorporate the sklearn confusionmatrix code there. Link to original tutorial here: https://pytorch.org/tutorials/beginner/transfer_learning_tutorial.html
%matplotlib inline
from graphviz import Digraph
import torch
from torch.autograd import Variable
# Author: Sasank Chilamkurthy
from __future__ import print_function, division
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
import matplotlib.pyplot as plt
import time
import os
import copy
plt.ion()
data_transforms = {
'train': transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
'val': transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
}
data_dir = "images"
image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x),
data_transforms[x])
for x in ['train', 'val']}
dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=4,
shuffle=True, num_workers=4)
for x in ['train', 'val']}
dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val']}
class_names = image_datasets['train'].classes
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def imshow(inp, title=None):
"""Imshow for Tensor."""
inp = inp.numpy().transpose((1, 2, 0))
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
inp = std * inp + mean
inp = np.clip(inp, 0, 1)
plt.imshow(inp)
if title is not None:
plt.title(title)
plt.pause(0.001) # pause a bit so that plots are updated
# Get a batch of training data
inputs, classes = next(iter(dataloaders['train']))
# Make a grid from batch
out = torchvision.utils.make_grid(inputs)
imshow(out, title=[class_names[x] for x in classes])
def train_model(model, criterion, optimizer, scheduler, num_epochs=25):
since = time.time()
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
print('-' * 10)
# Each epoch has a training and validation phase
for phase in ['train', 'val']:
if phase == 'train':
scheduler.step()
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
running_loss = 0.0
running_corrects = 0
# Iterate over data.
for inputs, labels in dataloaders[phase]:
inputs = inputs.to(device)
labels = labels.to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(phase == 'train'):
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
loss = criterion(outputs, labels)
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
optimizer.step()
# statistics
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
epoch_loss = running_loss / dataset_sizes[phase]
epoch_acc = running_corrects.double() / dataset_sizes[phase]
print('{} Loss: {:.4f} Acc: {:.4f}'.format(
phase, epoch_loss, epoch_acc))
# deep copy the model
if phase == 'val' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
print()
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:4f}'.format(best_acc))
# load best model weights
model.load_state_dict(best_model_wts)
return model
def visualize_model(model, num_images=6):
was_training = model.training
model.eval()
images_so_far = 0
fig = plt.figure()
with torch.no_grad():
for i, (inputs, labels) in enumerate(dataloaders['val']):
inputs = inputs.to(device)
labels = labels.to(device)
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
for j in range(inputs.size()[0]):
images_so_far += 1
ax = plt.subplot(num_images//2, 2, images_so_far)
ax.axis('off')
ax.set_title('predicted: {}'.format(class_names[preds[j]]))
imshow(inputs.cpu().data[j])
if images_so_far == num_images:
model.train(mode=was_training)
return
model.train(mode=was_training)
model_ft = models.resnet18(pretrained=True)
num_ftrs = model_ft.fc.in_features
model_ft.fc = nn.Linear(num_ftrs, 9)
model_ft = model_ft.to(device)
criterion = nn.CrossEntropyLoss()
# Observe that all parameters are being optimized
optimizer_ft = optim.SGD(model_ft.parameters(), lr=0.001, momentum=0.9)
# Decay LR by a factor of 0.1 every 7 epochs
exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=7, gamma=0.1)
model_ft = train_model(model_ft, criterion, optimizer_ft, exp_lr_scheduler,
num_epochs=25)
visualize_model(model_ft)
Answer given by ptrblck of PyTorch community. Thanks a lot!
nb_classes = 9
confusion_matrix = torch.zeros(nb_classes, nb_classes)
with torch.no_grad():
for i, (inputs, classes) in enumerate(dataloaders['val']):
inputs = inputs.to(device)
classes = classes.to(device)
outputs = model_ft(inputs)
_, preds = torch.max(outputs, 1)
for t, p in zip(classes.view(-1), preds.view(-1)):
confusion_matrix[t.long(), p.long()] += 1
print(confusion_matrix)
To get the per-class accuracy:
print(confusion_matrix.diag()/confusion_matrix.sum(1))
Here is a slightly modified(direct) approach using sklearn's confusion_matrix:-
from sklearn.metrics import confusion_matrix
nb_classes = 9
# Initialize the prediction and label lists(tensors)
predlist=torch.zeros(0,dtype=torch.long, device='cpu')
lbllist=torch.zeros(0,dtype=torch.long, device='cpu')
with torch.no_grad():
for i, (inputs, classes) in enumerate(dataloaders['val']):
inputs = inputs.to(device)
classes = classes.to(device)
outputs = model_ft(inputs)
_, preds = torch.max(outputs, 1)
# Append batch prediction results
predlist=torch.cat([predlist,preds.view(-1).cpu()])
lbllist=torch.cat([lbllist,classes.view(-1).cpu()])
# Confusion matrix
conf_mat=confusion_matrix(lbllist.numpy(), predlist.numpy())
print(conf_mat)
# Per-class accuracy
class_accuracy=100*conf_mat.diagonal()/conf_mat.sum(1)
print(class_accuracy)
Follwing the answer above... Here is an answer with some visualization
nb_classes = 9
confusion_matrix = np.zeros((nb_classes, nb_classes))
with torch.no_grad():
for i, (inputs, classes) in enumerate(test_loader):
inputs = inputs.to(DEVICE)
classes = classes.to(DEVICE)
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
for t, p in zip(classes.view(-1), preds.view(-1)):
confusion_matrix[t.long(), p.long()] += 1
plt.figure(figsize=(15,10))
class_names = list(label2class.values())
df_cm = pd.DataFrame(confusion_matrix, index=class_names, columns=class_names).astype(int)
heatmap = sns.heatmap(df_cm, annot=True, fmt="d")
heatmap.yaxis.set_ticklabels(heatmap.yaxis.get_ticklabels(), rotation=0, ha='right',fontsize=15)
heatmap.xaxis.set_ticklabels(heatmap.xaxis.get_ticklabels(), rotation=45, ha='right',fontsize=15)
plt.ylabel('True label')
plt.xlabel('Predicted label')
;
Another simple way to get accuracy is to use sklearns "accuracy_score".
Heres an example:
from sklearn.metrics import accuracy_score
y_pred = y_pred.data.numpy()
accuracy = accuracy_score(labels, np.argmax(y_pred, axis=1))
First you need to get the data from the variable.
"y_pred" is the predictions from your model, and labels are of course your labels.
np.argmax returns the index of the largest value inside the array. We want the largest value as it corresponds to the highest probability class when using softmax for multi-class classification. Accuracy score will return a percentage of matches between the labels and y_pred.
I used the following to convert the torch tensors to an int defining the predicted class.
x = [torch.max(tensor).item() for tensor in x_data]
y = [torch.max(tensor).item() for tensor in y_data]
i hope this helps! i'm still a noob so please be gentle...

How Can I Extract Predictions from A Softmax Layer on Tensorflow

I'm trying to extract predictions, use predictions in calculating accuracy/precision/recall/F1 and prediction probability. I know I have 10 output classes therefore I can't calculate precision per see but I will be doing all these in other models moreover I'd like to be able to extract prediction probabilities. My model is as follows. I've checked GitHub and StackOverflow however I have yet to find a way to extract those properties. Most of the answers come close but never answer what I needed. I've used some low epoch numbers there in order to check out model fast and keep the output screen less crowded.
import tensorflow as tf
from tensorflow.contrib.layers import fully_connected
from sklearn.datasets import fetch_mldata
from sklearn.preprocessing import LabelBinarizer
from sklearn.model_selection import train_test_split
mnist = fetch_mldata('MNIST original', data_home="data/mnist/")
lb = LabelBinarizer().fit(mnist.target)
X_train, X_test, y_train, y_test = train_test_split(mnist.data, lb.transform(mnist.target), train_size=0.9, test_size=0.1)
X = tf.placeholder(tf.float32, shape=(None, 784))
y = tf.placeholder(tf.int64, shape=(None, 10))
lOne = fully_connected(inputs=X, num_outputs=100, activation_fn=tf.nn.elu)
logits = fully_connected(inputs=lOne, num_outputs=10, activation_fn=tf.nn.softmax)
pred = logits
acc = tf.metrics.accuracy(labels=y, predictions=pred)
loss = tf.losses.softmax_cross_entropy(logits=logits, onehot_labels=y)
trainOP = tf.train.AdamOptimizer(0.001).minimize(loss)
import numpy as np
bSize = 100
batches = int(np.floor(X_train.shape[0]/bSize)+1)
def batcher(dSet, bNum):
return(dSet[bSize*(bNum-1):bSize*(bNum)])
epochs = 2
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
for epoch in range(0, epochs):
for batch in range(1, batches):
X_batch = batcher(X_train, batch)
y_batch = batcher(y_train, batch)
sess.run(trainOP, feed_dict={X: X_batch, y: y_batch})
lossVal = sess.run([loss], feed_dict={X: X_test, y: y_test})
print(lossVal)
sess.close()
The code shared in the question covers training, but not "using" (infering) with the resulting model.
Two issues:
The trained model is not serialized, so future runs will run on an untrained model, and predict whatever their initialization tells them to. Hence a question comment suggesting to save the trained model, and restore it when predicting.
The logits are the output of a SoftMax function. A common way to get a class from logits is to select the highest value in the tensor (here a vector).
With TensorFlow, the last point can be done with tf.argmax ("Returns the index with the largest value across axes of a tensor."):
tf.argmax(input=logits, axis=1)
All in all, the question's code covers only partially the MNIST tutorial from the TensorFlow team. Perhaps more pointers there if you get stuck with this code.
I'm writing in case anyone may stumble upon this particular case. I've built a network following basic MNIST examples, I've used tf.nn.softmax in the final layer and expected to get results from said layer. It looks like I need to use softmax function again to get the results from a layer such as yPred = tf.nn.softmax(logits) with logits being the name of the output layer. I'm adding fixed code below.
I can add a line to save the model, load it later on and made predictions on saved model. Since this is just an example for me building the model, I've omitted the saving part.
import tensorflow as tf
from tensorflow.contrib.layers import fully_connected
from sklearn.datasets import fetch_mldata
from sklearn.preprocessing import LabelBinarizer
from sklearn.model_selection import train_test_split
mnist = fetch_mldata('MNIST original', data_home="data/mnist/")
lb = LabelBinarizer().fit(mnist.target)
X_train, X_test, y_train, y_test = train_test_split(mnist.data, lb.transform(mnist.target), train_size=0.9, test_size=0.1, stratify = mnist.target, random_state=42)
X = tf.placeholder(tf.float32, shape=(None, 784))
y = tf.placeholder(tf.int64, shape=(None, 10))
lOne = fully_connected(inputs=X, num_outputs=100, activation_fn=tf.nn.elu)
lTwo = fully_connected(inputs=lOne, num_outputs=100, activation_fn=tf.nn.elu)
logits = fully_connected(inputs=lTwo, num_outputs=10, activation_fn=tf.nn.softmax)
pred = tf.nn.softmax(logits)
acc_bool = tf.equal(tf.argmax(logits, 1), tf.argmax(y, 1))
acc_Num = tf.cast(acc_bool, tf.float32)
acc_Mean = tf.reduce_mean(acc_Num)
loss = tf.losses.softmax_cross_entropy(logits=logits, onehot_labels=y)
trainOP = tf.train.AdamOptimizer(0.001).minimize(loss)
import numpy as np
bSize = 1024
batches = int(np.floor(X_train.shape[0]/bSize)+1)
def batcher(dSet, bNum):
return(dSet[bSize*(bNum-1):bSize*(bNum)])
epochs = 250
init = tf.global_variables_initializer()
trainA = []
testA = []
with tf.Session() as sess:
sess.run(init)
for epoch in range(0, epochs):
for batch in range(1, batches):
X_batch = batcher(X_train, batch)
y_batch = batcher(y_train, batch)
sess.run(trainOP, feed_dict={X: X_batch, y: y_batch})
if epoch % 25 == 1:
trainLoss, trainAcc = sess.run([loss, acc_Mean], feed_dict={X: X_train, y: y_train})
testLoss, testAcc = sess.run([loss, acc_Mean], feed_dict={X: X_test, y: y_test})
yPred = sess.run(pred, feed_dict={X: X_test[0].reshape(1,-1), y: y_test[0].reshape(1,-1)})
print(yPred)
sess.close()

Categories

Resources