The following code
from torch.utils.data import DataLoader
from defect_segmentation.data_loading.DatasetSingleImage import dataset_single_image_default
import numpy as np
if __name__ == "__main__":
import matplotlib.pyplot as plt
def main():
dataset = dataset_single_image_default()
batch_size = 16
shuffle = True
num_workers = 0
loader = DataLoader(dataset, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers)
for i_batch, sample_batched in enumerate(loader):
fig, axs = plt.subplots(int(np.sqrt(batch_size)), batch_size // int(np.sqrt(batch_size)))
fig.suptitle(f"i_batch = {i_batch}")
for i_sample, ax in zip(range(sample_batched.shape[0]), axs.flat):
ax.set_title(f"Sample #{i_sample}")
ax.axis("off")
ax.imshow(sample_batched[i_sample, :, :])
plt.pause(0.001)
main()
works and outputs figures like so
This is fine.
Problem is by the 10th figure, it becomes very slow to populate the figures. I don't know what can be causing this.
For completeness, here is the code for creating the dataset (striding over a single image):
from torch.utils.data import Dataset
from Utils.ConfigProvider import ConfigProvider
import cv2
import os
from overrides import overrides # pip install overrides
class DatasetSingleImage(Dataset):
def __init__(self, image_path: str, sample_shape: tuple, strides: tuple):
self._path = image_path
assert os.path.isfile(self._path)
self._im = cv2.imread(self._path)
self._shape = self._im.shape
self._rows, self._cols = self._shape[0], self._shape[1]
self._sample_shape = sample_shape
self._sample_rows, self._sample_cols = self._sample_shape[0], self._sample_shape[1]
self._strides = strides
self._stride_rows, self._stride_cols = self._strides[0], self._strides[1]
# self._rows_start_range = range(0, self._rows, self._stride_rows)
# self._cols_start_range = range(0, self._cols, self._stride_cols)
self._rows_tuples_range = \
[(c, min(c + self._sample_rows, self._rows)) for c in range(0, self._rows - self._sample_rows, self._stride_rows)]
self._cols_tuples_range = \
[(r, min(r + self._sample_cols, self._cols)) for r in range(0, self._cols - self._sample_cols, self._stride_cols)]
self._n_strides_rows = len(self._rows_tuples_range)
self._n_strides_cols = len(self._cols_tuples_range)
self._total_strides = self._n_strides_rows * self._n_strides_cols
def __len__(self):
return self._total_strides
#overrides # pip install overrides
def __getitem__(self, ind):
row_ind = ind // self._n_strides_cols
col_ind = ind % self._n_strides_cols
sample_x = self._rows_tuples_range[row_ind]
sample_y = self._cols_tuples_range[col_ind]
sample = self._im[sample_x[0]:sample_x[1], sample_y[0]:sample_y[1]]
assert sample.shape[:2] == self._sample_shape
return sample
def dataset_single_image_default():
path = ConfigProvider.config().data.defective_inspected_path1
sample_shape = (50, 50)
strides = (25, 25)
dataset = DatasetSingleImage(path, sample_shape, strides)
return dataset
What is making the plotting slow, and how to fix it?
Related
https://github.com/safwankdb/Neural-Style-Transfer
I created a model file by running the code in the link above. I then converted it to ONNX to run with C++ and OpenCV.
But while Input (1,3,512,512) in the ONNX file I exported, it is in the form (1,512,28,28) in Output. My export code:
VGG.eval()
torch.save(VGG, 'torchmodel.pth')
dummy_input = Variable(torch.randn(1, 3, 512, 512, device='cuda:1'))
input_names = ['input']
output_names = ['output']
onnxfile='style.onnx'
torch.onnx.export(VGG,dummy_input,onnxfile,verbose=False,input_names=input_names,opset_version=11, output_names=output_names)
I tried export parameters from some sites but it didn't work.
Does anyone have an opinion on this matter?
Original code :
# -*- coding: utf-8 -*-
"""StyleTransfer.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/15JKaqmpVNr8NhURJWgbkvIl1sd0aKS3o
"""
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision as tv
from PIL import Image
import imageio
import numpy as np
from matplotlib import pyplot as plt
to_tensor = tv.transforms.Compose([
tv.transforms.Resize((512,512)),
tv.transforms.ToTensor(),
tv.transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[1, 1, 1]),
])
unload = tv.transforms.Compose([
tv.transforms.Normalize(mean=[-0.485,-0.456,-0.406],
std=[1,1,1]),
tv.transforms.Lambda(lambda x: x.clamp(0,1))
])
to_image = tv.transforms.ToPILImage()
style_img = 'udnie.jpg'
input_img = 'chicago.jpg'
style_img = Image.open(style_img)
input_img = Image.open(input_img)
style_img = to_tensor(style_img).cuda()
input_img = to_tensor(input_img).cuda()
def get_features(module, x, y):
# print('here')
features.append(y)
def gram_matrix(x):
b, c, h, w = x.size()
F = x.view(b,c,h*w)
G = torch.bmm(F, F.transpose(1,2))/(h*w)
return G
VGG = tv.models.vgg19(pretrained=True).features
VGG.cuda()
for i, layer in enumerate(VGG):
if i in [0,5,10,19,21,28]:
VGG[i].register_forward_hook(get_features)
elif isinstance(layer, nn.MaxPool2d):
VGG[i] = nn.AvgPool2d(kernel_size=2)
VGG.eval()
for p in VGG.parameters():
p.requires_grad = False
features = []
VGG(input_img.unsqueeze(0))
c_target = features[4].detach()
features = []
VGG(style_img.unsqueeze(0))
f_targets = features[:4]+features[5:]
gram_targets = [gram_matrix(i).detach() for i in f_targets]
alpha = 1
beta = 1e3
iterations = 200
image = input_img.clone().unsqueeze(0)
# image = torch.randn(1,3,512,512).cuda()
images = []
optimizer = optim.LBFGS([
image.requires_grad_()], lr=1)
mse_loss = nn.MSELoss(reduction='mean')
l_c = []
l_s = []
counter = 0
for itr in range(iterations):
features = []
def closure():
optimizer.zero_grad()
VGG(image)
t_features = features[-6:]
content = t_features[4]
style_features = t_features[:4]+t_features[5:]
t_features = []
gram_styles = [gram_matrix(i) for i in style_features]
c_loss = alpha * mse_loss(content, c_target)
s_loss = 0
for i in range(5):
n_c = gram_styles[i].shape[0]
s_loss += beta * mse_loss(gram_styles[i],gram_targets[i])/(n_c**2)
total_loss = c_loss+s_loss
l_c.append(c_loss)
l_s.append(s_loss)
total_loss.backward()
return total_loss
optimizer.step(closure)
print('Step {}: S_loss: {:.8f} C_loss: {:.8f}'.format(itr, l_s[-1], l_c[-1]))
if itr%1 == 0:
temp = unload(image[0].cpu().detach())
temp = to_image(temp)
temp = np.array(temp)
images.append(temp)
imageio.mimsave('progress.gif', images)
plt.clf()
plt.plot(l_c, label='Content Loss')
plt.legend()
plt.savefig('loss1.png')
plt.clf()
plt.plot(l_s, label='Style Loss')
plt.legend()
plt.savefig('loss2.png')
plt.imsave('last.jpg',images[-1])
I've got this code that is supposed to make a heatmap, but with circles instead of squares/rectangles, so far testing it with placeholder colors, looks like this:
import matplotlib.pyplot as plt
import matplotlib.colors as mcl
import numpy as np
import pandas as pd
from typing import List, T
from random import uniform
def l_flatten(l: List[T]) -> List[T]:
return [j for i in l for j in i]
def get_luminance(color: str) -> float:
# taken from Seaborn's utils
rgb = mcl.colorConverter.to_rgba_array(color)[:, :3]
rgb = np.where(rgb <= .03928, rgb / 12.92, ((rgb + .055) / 1.055) ** 2.4)
lum = rgb.dot([.2126, .7152, .0722])
try:
lum = lum.item()
except ValueError:
pass
return lum
class CircleHeatmap:
def __init__(self,
ax: plt.Axes,
df: pd.DataFrame,
colors: List[str],
annot_show: bool,
annot_size: float,
circle_size: float,
x_labels: List[str],
x_labels_size: float,
x_labels_color: str,
y_labels: List[str],
y_labels_size: float,
y_labels_color: str) -> None:
# pass user-provided variables
self.ax = ax
self.df = df
self.colors = colors
self.annot_show = annot_show
self.annot_size = annot_size
self.circle_size = circle_size
self.x_labels = x_labels
self.x_labels_size = x_labels_size
self.x_labels_color = x_labels_color
self.y_labels = y_labels
self.y_labels_size = y_labels_size
self.y_labels_color = y_labels_color
# pass technical variables
self.y_size, self.x_size = self.df.shape
self.x_arr, self.y_arr = np.meshgrid(np.arange(self.x_size),
np.arange(self.y_size))
self.x_arr, self.y_arr = ((self.x_arr + 0.5).flat,
(self.y_arr + 0.5).flat)
self.x_len, self.y_len = [np.linspace(0, len(i), len(i) + 1)[:-1] + 0.5
for i in (self.x_labels, self.y_labels)]
self.df_values = l_flatten(self.df.values.tolist())
def plot(self) -> None:
self.ax.scatter(self.x_arr, self.y_arr,
s = self.circle_size ** 2,
c = self.colors)
def labels(self) -> None:
self.ax.set_xticks(self.x_len)
self.ax.set_yticks(self.y_len)
self.ax.set_xticklabels(self.x_labels, fontsize = self.x_labels_size,
color = self.x_labels_color)
self.ax.set_yticklabels(self.y_labels, fontsize = self.y_labels_size,
color = self.y_labels_color)
def main() -> None:
fig, ax = plt.subplots(figsize = (20, 30))
df = pd.DataFrame([[uniform(0, 1) for j in range(20)] for i in range(30)])
colors = ["#EC4E20", "#FF9505", "#016FB9"] * 200
heatmap = CircleHeatmap(ax = ax,
df = df,
colors = colors,
annot_show = False,
annot_size = 16,
circle_size = 45,
x_labels = [i for i in range(20)],
x_labels_size = 20,
x_labels_color = "black",
y_labels = [i for i in range(30)],
y_labels_size = 20,
y_labels_color = "black")
heatmap.plot()
heatmap.labels()
for i in ["top", "bottom", "right", "left"]:
ax.spines[i].set_visible(False)
plt.savefig("test2.png")
if __name__ == "__main__":
main()
As a result, I get something like this. My question is: how can I move the ticks and the labels on the x-axis up a little bit, preferrably with an option to control them with a variable?
I had a similar result, but only commented on the settings since you were so familiar with them. Once again, a fix was needed and I will respond with the corrected code. I don't know if I was able to add the code in the best place. The following code can help.
def labels(self) -> None:
self.ax.set_xticks(self.x_len)
self.ax.set_yticks(self.y_len)
self.ax.spines['bottom'].set_position(('data', 0))
self.ax.spines['left'].set_position(('data', 0))
self.ax.set_xticklabels(self.x_labels, fontsize = self.x_labels_size,
color = self.x_labels_color)
self.ax.set_yticklabels(self.y_labels, fontsize = self.y_labels_size,
color = self.y_labels_color)
I'm creating a heatmap for a CNN as per this tutorial.
In the last part:
def create_patiens_cam(case, plane):
patient_id = case['id']
mri = case['mri']
folder_path = f'./CAMS/{plane}/{patient_id}/'
if os.path.isdir(folder_path):
shutil.rmtree(folder_path)
os.makedirs(folder_path)
os.makedirs(folder_path + 'slices/')
os.makedirs(folder_path + 'cams/')
params = list(mrnet.parameters())
weight_softmax = np.squeeze(params[-2].cpu().data.numpy())
num_slices = mri.shape[1]
global feature_blobs
feature_blobs = []
mri = mri.to(device)
logit = mrnet(mri)
size_upsample = (256, 256)
feature_conv = feature_blobs[0]
h_x = F.softmax(logit, dim=1).data.squeeze(0)
probs, idx = h_x.sort(0, True)
probs = probs.cpu().numpy()
idx = idx.cpu().numpy()
slice_cams = returnCAM(feature_blobs[-1], weight_softmax, idx[:1])
for s in tqdm_notebook(range(num_slices), leave=False):
slice_pil = (transforms
.ToPILImage()(mri.cpu()[0][s] / 255))
slice_pil.save(folder_path + f'slices/{s}.png',
dpi=(300, 300))
img = mri[0][s].cpu().numpy()
img = img.transpose(1, 2, 0)
heatmap = (cv2
.cvtColor(cv2.applyColorMap(
cv2.resize(slice_cams[s], (256, 256)),
cv2.COLORMAP_JET),
cv2.COLOR_BGR2RGB)
)
result = heatmap * 0.3 + img * 0.5
pil_img_cam = Image.fromarray(np.uint8(result))
pil_img_cam.save(folder_path + f'cams/{s}.png', dpi=(300, 300))
I have created a folder "CAMS" in my 'mrnet' folder. However when running this last code (in jupyter notebook) I get no errors but no png's are being created. Anyone has any idea what could be wrong or where I could look to see what's wrong as I get no errors?
FULL CODE:
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 13 21:54:40 2021
#author: GlaDOS
"""
import os
import io
import requests
from PIL import Image
from torchvision import models, transforms
from torch.autograd import Variable
from torch.nn import functional as F
import numpy as np
import cv2
import pdb
from matplotlib import pyplot as plt
import sys
sys.path.append('C:/Users/GlaDOS/mrnet')
import shutil
import torch
import model
from dataloader import MRDataset
from tqdm import tqdm_notebook
task = 'acl'
plane = 'sagittal'
prefix = 'sag'
model_name = [name for name in os.listdir('C:/Users/GlaDOS/mrnet/models/')
if (task in name) and
(plane in name) and
(prefix in name)][0]
is_cuda = torch.cuda.is_available()
device = torch.device("cuda" if is_cuda else "cpu")
mrnet = torch.load(f'C:/Users/GlaDOS/mrnet/models/{model_name}')
mrnet = mrnet.to(device)
_ = mrnet.eval()
dataset = MRDataset('C:/Users/GlaDOS/mrnet/data/',
task,
plane,
transform=None,
train=False)
loader = torch.utils.data.DataLoader(dataset,
batch_size=1,
shuffle=False,
num_workers=0,
drop_last=False)
def returnCAM(feature_conv, weight_softmax, class_idx):
size_upsample = (256, 256)
bz, nc, h, w = feature_conv.shape
slice_cams = []
for s in range(bz):
for idx in class_idx:
cam = weight_softmax[idx].dot(feature_conv[s].reshape((nc, h*w)))
cam = cam.reshape(h, w)
cam = cam - np.min(cam)
cam_img = cam / np.max(cam)
cam_img = np.uint8(255 * cam_img)
slice_cams.append(cv2.resize(cam_img, size_upsample))
return slice_cams
patients = []
for i, (image, label, _) in tqdm_notebook(enumerate(loader), total=len(loader)):
patient_data = {}
patient_data['mri'] = image
patient_data['label'] = label[0][0][1].item()
patient_data['id'] = '0' * (4 - len(str(i))) + str(i)
patients.append(patient_data)
acl = list(filter(lambda d: d['label'] == 1, patients))
def create_patiens_cam(case, plane):
patient_id = case['id']
mri = case['mri']
folder_path = f'C:/Users/GlaDOS/mrnet/cams/{plane}/{patient_id}/'
if os.path.isdir(folder_path):
shutil.rmtree(folder_path)
os.makedirs(folder_path)
os.makedirs(folder_path + 'slices/')
os.makedirs(folder_path + 'cams/')
params = list(mrnet.parameters())
weight_softmax = np.squeeze(params[-2].cpu().data.numpy())
num_slices = mri.shape[1]
global feature_blobs
feature_blobs = []
mri = mri.to(device)
logit = mrnet(mri)
size_upsample = (256, 256)
feature_conv = feature_blobs[0]
h_x = F.softmax(logit, dim=1).data.squeeze(0)
probs, idx = h_x.sort(0, True)
probs = probs.cpu().numpy()
idx = idx.cpu().numpy()
slice_cams = returnCAM(feature_blobs[-1], weight_softmax, idx[:1])
for s in tqdm_notebook(range(num_slices), leave=False):
slice_pil = (transforms
.ToPILImage()(mri.cpu()[0][s] / 255))
slice_pil.save(folder_path + f'slices/{s}.png',
dpi=(300, 300))
img = mri[0][s].cpu().numpy()
img = img.transpose(1, 2, 0)
heatmap = (cv2
.cvtColor(cv2.applyColorMap(
cv2.resize(slice_cams[s], (256, 256)),
cv2.COLORMAP_JET),
cv2.COLOR_BGR2RGB)
)
result = heatmap * 0.3 + img * 0.5
pil_img_cam = Image.fromarray(np.uint8(result))
pil_img_cam.save(folder_path + f'cams/{s}.png', dpi=(300, 300))
Use seaborn:
import seaborn as sns
sns_plot.savefig('output.png')
I want to use TensorFlow's ImageDataGenerator.flow_from_directory() to load my dataset but my output is not a classification but a regression. So I used class_mode=None so no labels are assigned to my data, but now I have to label my training examples and I don't know how (I have my labels as a list). Is there a way around this?
Example code:
labels = [0.75, 21.60, 10.12] # example labels
# load dataset from directory
image_generator = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1./255)
train_data = image_generator.flow_from_directory(batch_size=batch_size, directory=train_x_dir, target_size=(224, 224), class_mode=None, shuffle=False)
# assign labels to training examples
# ???
Since I got no direct answer I assume this can't be done in TF 2.3.
So I referred to a thread mentioned by AerysS, specificaly to answer from user timehaven and used his code to generate batches from pandas dataframe using Keras' load_img and img_to_array. Code was written for Python 2.7 so I made a few changes to port it and it works for me with Python 3.6.8.
data_generator.py
from __future__ import print_function
from keras.preprocessing.image import load_img
from keras.preprocessing.image import img_to_array
import numpy as np
import pandas as pd
import bcolz
import threading
import os
import sys
import glob
import shutil
bcolz_lock = threading.Lock()
# old_blosc_nthreads = bcolz.blosc_set_nthreads(1)
# assert bcolz.blosc_set_nthreads(1) == 1
def safe_bcolz_open(fname, idx=None, debug=False):
with bcolz_lock:
if idx is None:
X2 = bcolz.open(fname)
else:
X2 = bcolz.open(fname)[idx]
if debug:
df_debug = pd.DataFrame(X2, index=idx)
assert X2.shape[0] == len(idx)
assert X2.shape == df_debug.shape
df_debug = df_debug.astype(int)
test_idx = (df_debug.subtract(df_debug.index.values, axis=0) == 0).all(axis=1)
assert test_idx.all(), df_debug[~test_idx]
return X2
class threadsafe_iter:
def __init__(self, it):
self.it = it
self.lock = threading.Lock()
assert self.lock is not bcolz_lock
def __iter__(self):
return self
def next(self):
with self.lock:
return self.it.next()
def __next__(self):
with self.lock:
return next(self.it)
def threadsafe_generator(f):
def g(*a, **kw):
return threadsafe_iter(f(*a, **kw))
return g
#threadsafe_generator
def generator_from_df(df, batch_size, target_size, features=None,
debug_merged=False):
if features is not None:
assert os.path.exists(features)
assert safe_bcolz_open(features).shape[0] == df.shape[0], "Features rows must match df!"
nbatches, n_skipped_per_epoch = divmod(df.shape[0], batch_size)
count = 1
epoch = 0
# New epoch.
while 1:
df = df.sample(frac=1) # frac=1 is same as shuffling df.
epoch += 1
i, j = 0, batch_size
# Mini-batches within epoch.
mini_batches_completed = 0
for _ in range(nbatches):
sub = df.iloc[i:j]
try:
X = np.array([(2 * (img_to_array(load_img(f, target_size=target_size)) / 255.0 - 0.5)) for f in sub.imgpath])
Y = sub.target.values
if features is None:
mini_batches_completed += 1
yield X, Y
else:
X2 = safe_bcolz_open(features, sub.index.values, debug=debug_merged)
mini_batches_completed += 1
yield [X, X2], Y
except IOError as err:
count -= 1
i = j
j += batch_size
count += 1
train.py
from data_generator import generator_from_df
def construct_dataframe(img_path, labels_path):
data = {}
data['imgpath'] = glob(os.path.join(img_path, '*.png'))
data['target'] = load_labels(labels_path)
return pd.DataFrame(data)
train_df = construct_dataframe(train_x_dir, train_y_dir)
train_generator = generator_from_df(train_df, batch_size, (img_size, img_size))
# load and compile model
# ...
model.fit(train_generator, ...)
I'm following this tutorial.
I'm at the last part where we combine the models in a regression.
I'm coding this in jupyter as follows:
import shutil
import os
import time
from datetime import datetime
import argparse
import pandas
import numpy as np
from tqdm import tqdm
from tqdm import tqdm_notebook
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
from torchsample.transforms import RandomRotate, RandomTranslate, RandomFlip, ToTensor, Compose, RandomAffine
from torchvision import transforms
import torch.nn.functional as F
from tensorboardX import SummaryWriter
import dataloader
from dataloader import MRDataset
import model
from sklearn import metrics
def extract_predictions(task, plane, train=True):
assert task in ['acl', 'meniscus', 'abnormal']
assert plane in ['axial', 'coronal', 'sagittal']
models = os.listdir('models/')
model_name = list(filter(lambda name: task in name and plane in name, models))[0]
model_path = f'models/{model_name}'
mrnet = torch.load(model_path)
_ = mrnet.eval()
train_dataset = MRDataset('data/',
task,
plane,
transform=None,
train=train,
)
train_loader = torch.utils.data.DataLoader(train_dataset,
batch_size=1,
shuffle=False,
num_workers=10,
drop_last=False)
predictions = []
labels = []
with torch.no_grad():
for image, label, _ in tqdm_notebook(train_loader):
logit = mrnet(image.cuda())
prediction = torch.sigmoid(logit)
predictions.append(prediction.item())
labels.append(label.item())
return predictions, labels
task = 'acl'
results = {}
for plane in ['axial', 'coronal', 'sagittal']:
predictions, labels = extract_predictions(task, plane)
results['labels'] = labels
results[plane] = predictions
X = np.zeros((len(predictions), 3))
X[:, 0] = results['axial']
X[:, 1] = results['coronal']
X[:, 2] = results['sagittal']
y = np.array(labels)
logreg = LogisticRegression(solver='lbfgs')
logreg.fit(X, y)
task = 'acl'
results_val = {}
for plane in ['axial', 'coronal', 'sagittal']:
predictions, labels = extract_predictions(task, plane, train=False)
results_val['labels'] = labels
results_val[plane] = predictions
y_pred = logreg.predict_proba(X_val)[:, 1]
metrics.roc_auc_score(y_val, y_pred)
However I get this error:
ValueError Traceback (most recent call last)
<ipython-input-2-979acb314bc5> in <module>
3
4 for plane in ['axial', 'coronal', 'sagittal']:
----> 5 predictions, labels = extract_predictions(task, plane)
6 results['labels'] = labels
7 results[plane] = predictions
<ipython-input-1-647731b6b5c8> in extract_predictions(task, plane, train)
54 logit = mrnet(image.cuda())
55 prediction = torch.sigmoid(logit)
---> 56 predictions.append(prediction.item())
57 labels.append(label.item())
58
ValueError: only one element tensors can be converted to Python scalars
Here's the MRDataset code in case:
class MRDataset(data.Dataset):
def __init__(self, root_dir, task, plane, train=True, transform=None, weights=None):
super().__init__()
self.task = task
self.plane = plane
self.root_dir = root_dir
self.train = train
if self.train:
self.folder_path = self.root_dir + 'train/{0}/'.format(plane)
self.records = pd.read_csv(
self.root_dir + 'train-{0}.csv'.format(task), header=None, names=['id', 'label'])
else:
transform = None
self.folder_path = self.root_dir + 'valid/{0}/'.format(plane)
self.records = pd.read_csv(
self.root_dir + 'valid-{0}.csv'.format(task), header=None, names=['id', 'label'])
self.records['id'] = self.records['id'].map(
lambda i: '0' * (4 - len(str(i))) + str(i))
self.paths = [self.folder_path + filename +
'.npy' for filename in self.records['id'].tolist()]
self.labels = self.records['label'].tolist()
self.transform = transform
if weights is None:
pos = np.sum(self.labels)
neg = len(self.labels) - pos
self.weights = torch.FloatTensor([1, neg / pos])
else:
self.weights = torch.FloatTensor(weights)
def __len__(self):
return len(self.paths)
def __getitem__(self, index):
array = np.load(self.paths[index])
label = self.labels[index]
if label == 1:
label = torch.FloatTensor([[0, 1]])
elif label == 0:
label = torch.FloatTensor([[1, 0]])
if self.transform:
array = self.transform(array)
else:
array = np.stack((array,)*3, axis=1)
array = torch.FloatTensor(array)
# if label.item() == 1:
# weight = np.array([self.weights[1]])
# weight = torch.FloatTensor(weight)
# else:
# weight = np.array([self.weights[0]])
# weight = torch.FloatTensor(weight)
return array, label, self.weights
I've only trained my models using 1 and 2 epochs for each plane of the MRI instead of 35 as in the tutorial, not sure if that has anything to do with it. Other than that I'm stranded as to what this could be? I also removed normalize=False in the options for train_dataset as it kept giving me an error and I read that it could be removed, but I'm not so sure?
Only a tensor that contains a single value can be converted to a scalar with item(), try printing the contents of prediction, I imagine this is a vector of probabilities indicating which label is most likely. Using argmax on prediction will give you your actual predicted label (assuming your labels are 0-n).