Custom transformation on Subset of CIFAR10 - PyTorch - python

I am trying to create a custom transformation to part of the CIFAR10 data set which superimposing of an image over the dataset. I was able to download the data and divide it into subsets. Using the following code:
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
traindata = datasets.CIFAR10('./data', train=True, download=True,
transform= transform_train)
partitions = 5
traindata_split = torch.utils.data.random_split(traindata, [int(traindata.data.shape[0] / partitions) for _ in range(partitions)])
then I wanted to modify part of the splits so I created the following class and functions to use as as follows:
class MyDataset(Dataset): # https://discuss.pytorch.org/t/torch-utils-data-dataset-random-split/32209/3
def __init__(self, subset, transform=None):
self.subset = subset
self.transform = transform
def __getitem__(self, index):
x, y = self.subset[index]
if self.transform:
x = self.transform(x)
return x, y
def __len__(self):
return len(self.subset)
and
class ImageSuperImpose(object):
""" Image input as PIL and output as PIL
To be used as part of torchvision.transforms
Args: p, a threshold value to control image thinning
"""
def __init__(self, p=0):
self.p = p
def __call__(self, image):
img = cv2.imread('img.jpg')
img = img('float32')/255
imgSm = cv2.resize(img,(32,32))
np_arr = image.cpu().detach().numpy().T
sample = cv2.addWeighted(np_arr, 1, imgSm, 1, 0)
sample = sample.T
t = torch.from_numpy(sample)
return sample
transform_train2 = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
ImagePoisoning(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
datasetA = MyDataset(
traindata_split[0], transform= transform_train2
)
test_loader = torch.utils.data.DataLoader(datasetA, batch_size=128, shuffle=True)
But when I tried to train the model on the subset I got the following error:
RuntimeError: The size of tensor a (32) must match the size of tensor b (3) at non-singleton dimension 0
** UPDATE**
Here is the full given error
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-20-7428084b03be> in <module>()
----> 1 train(model, opt, test_loader, 3)
9 frames
<ipython-input-14-fcb03e1d7685> in client_update(client_model, optimizer, train_loader, epoch)
5 client_model.train()
6 for e in range(epoch):
----> 7 for batch_idx, (data, target) in enumerate(train_loader):
8 data, target = data.to(device), target.to(device)
9 optimizer.zero_grad()
/usr/local/lib/python3.6/dist-packages/torch/utils/data/dataloader.py in __next__(self)
433 if self._sampler_iter is None:
434 self._reset()
--> 435 data = self._next_data()
436 self._num_yielded += 1
437 if self._dataset_kind == _DatasetKind.Iterable and \
/usr/local/lib/python3.6/dist-packages/torch/utils/data/dataloader.py in _next_data(self)
473 def _next_data(self):
474 index = self._next_index() # may raise StopIteration
--> 475 data = self._dataset_fetcher.fetch(index) # may raise StopIteration
476 if self._pin_memory:
477 data = _utils.pin_memory.pin_memory(data)
/usr/local/lib/python3.6/dist-packages/torch/utils/data/_utils/fetch.py in fetch(self, possibly_batched_index)
42 def fetch(self, possibly_batched_index):
43 if self.auto_collation:
---> 44 data = [self.dataset[idx] for idx in possibly_batched_index]
45 else:
46 data = self.dataset[possibly_batched_index]
/usr/local/lib/python3.6/dist-packages/torch/utils/data/_utils/fetch.py in <listcomp>(.0)
42 def fetch(self, possibly_batched_index):
43 if self.auto_collation:
---> 44 data = [self.dataset[idx] for idx in possibly_batched_index]
45 else:
46 data = self.dataset[possibly_batched_index]
<ipython-input-7-1bde43acaff0> in __getitem__(self, index)
7 x, y = self.subset[index]
8 if self.transform:
----> 9 x = self.transform(x)
10 return x, y
11
/usr/local/lib/python3.6/dist-packages/torchvision/transforms/transforms.py in __call__(self, img)
65 def __call__(self, img):
66 for t in self.transforms:
---> 67 img = t(img)
68 return img
69
/usr/local/lib/python3.6/dist-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
725 result = self._slow_forward(*input, **kwargs)
726 else:
--> 727 result = self.forward(*input, **kwargs)
728 for hook in itertools.chain(
729 _global_forward_hooks.values(),
/usr/local/lib/python3.6/dist-packages/torchvision/transforms/transforms.py in forward(self, tensor)
224 Tensor: Normalized Tensor image.
225 """
--> 226 return F.normalize(tensor, self.mean, self.std, self.inplace)
227
228 def __repr__(self):
/usr/local/lib/python3.6/dist-packages/torchvision/transforms/functional.py in normalize(tensor, mean, std, inplace)
282 if std.ndim == 1:
283 std = std.view(-1, 1, 1)
--> 284 tensor.sub_(mean).div_(std)
285 return tensor
286
RuntimeError: The size of tensor a (32) must match the size of tensor b (3) at non-singleton dimension 0

Related

BiLSTM forward() - RuntimeError: shape '[-1, 38]' is invalid for input of size 1

Goal: implement bidirectionality in LSTM.
I'm new to Deep Learning and chose pytorch-lightening for minimal coding. Progress has been made, thanks to responses from prior posts.
forward() now needs to facilitate nn.LSTM(... bidirectional=True).
I'm basing my latest amendments on this disscuss.pytorch.org response.
Error
Error is based on mismatch of shapes.
Which data needs to be shaped for which layers?
I'm far out of my depths.
RuntimeError: shape '[-1, 38]' is invalid for input of size 1
Code
from argparse import ArgumentParser
import torchmetrics
import pytorch_lightning as pl
import torch
import torch.nn as nn
import torch.nn.functional as F
class LSTMClassifier(nn.Module):
def __init__(self,
num_classes,
batch_size=10,
embedding_dim=100,
hidden_dim=50,
vocab_size=128):
super(LSTMClassifier, self).__init__()
initrange = 0.1
self.num_labels = num_classes
n = len(self.num_labels)
self.hidden_dim = hidden_dim
self.batch_size = batch_size
self.num_layers = 1
self.word_embeddings = nn.Embedding(vocab_size, embedding_dim)
self.word_embeddings.weight.data.uniform_(-initrange, initrange)
self.lstm = nn.LSTM(input_size=embedding_dim, hidden_size=hidden_dim, num_layers=self.num_layers, batch_first=True, bidirectional=True) # !
#self.classifier = nn.Linear(hidden_dim, self.num_labels[0])
self.classifier = nn.Linear(2 * hidden_dim, self.num_labels[0]) # !
def repackage_hidden(h):
"""Wraps hidden states in new Tensors, to detach them from their history."""
if isinstance(h, torch.Tensor):
return h.detach()
else:
return tuple(repackage_hidden(v) for v in h)
def forward(self, sentence, labels=None):
embeds = self.word_embeddings(sentence)
# lstm_out, _ = self.lstm(embeds) # lstm_out - 2 tensors, _ - hidden layer
lstm_out, hidden = self.lstm(embeds)
# Calculate number of directions
self.num_directions = 2 if self.lstm.bidirectional == True else 1
# Extract last hidden state
# final_state = hidden.view(self.num_layers, self.num_directions, self.batch_size, self.hidden_dim)[-1]
final_state = hidden[0].view(self.num_layers, self.num_directions, self.batch_size, self.hidden_dim)[-1]
# Handle directions
final_hidden_state = None
if self.num_directions == 1:
final_hidden_state = final_state.squeeze(0)
elif self.num_directions == 2:
h_1, h_2 = final_state[0], final_state[1]
# final_hidden_state = h_1 + h_2 # Add both states (requires changes to the input size of first linear layer + attention layer)
final_hidden_state = torch.cat((h_1, h_2), 1) # Concatenate both states
print("len(final_hidden_state)", len(final_hidden_state))
print("len(labels)", len(labels))
print("final_hidden_state.shape", final_hidden_state.shape)
print("labels", labels)
self.linear_dims = [0]
# Define set of fully connected layers (Linear Layer + Activation Layer) * #layers
self.linears = nn.ModuleList()
for i in range(0, len(self.linear_dims)-1):
linear_layer = nn.Linear(self.linear_dims[i], self.linear_dims[i+1])
self.init_weights(linear_layer)
self.linears.append(linear_layer)
if i == len(self.linear_dims) - 1:
break # no activation after output layer!!!
self.linears.append(nn.ReLU())
X = final_hidden_state
# Push through linear layers
for l in self.linears:
X = l(X)
# tag_space = self.classifier(hidden[:,0,:] + hidden[:,-1,:]) # ! # torch.flip(lstm_out[:,-1,:], [0, 1]) - 1 tensor
#logits = F.log_softmax(final_hidden_state, dim=1)
logits = F.cross_entropy(final_hidden_state, labels[0].view(-1))
loss = None
if labels:
# print("len(logits.view(-1, self.num_labels[0]))", len(logits.view(-1, self.num_labels[0])))
print("len(self.num_labels)", len(self.num_labels))
print("self.num_labels[0]", self.num_labels[0])
print("len(labels[0].view(-1))", len(labels[0].view(-1)))
loss = F.cross_entropy(logits.view(-1, self.num_labels[0]), labels[0].view(-1))
return loss, logits
class LSTMTaggerModel(pl.LightningModule):
def __init__(
self,
num_classes,
class_map,
from_checkpoint=False,
model_name='last.ckpt',
learning_rate=3e-6,
**kwargs,
):
super().__init__()
self.save_hyperparameters()
self.learning_rate = learning_rate
self.model = LSTMClassifier(num_classes=num_classes)
# self.model.load_state_dict(torch.load(model_name), strict=False) # !
self.class_map = class_map
self.num_classes = num_classes
self.valid_acc = torchmetrics.Accuracy()
self.valid_f1 = torchmetrics.F1()
def forward(self, *input, **kwargs):
return self.model(*input, **kwargs)
def training_step(self, batch, batch_idx):
x, y_true = batch
loss, _ = self(x, labels=y_true)
self.log('train_loss', loss)
return loss
def validation_step(self, batch, batch_idx):
x, y_true = batch
_, y_pred = self(x, labels=y_true)
preds = torch.argmax(y_pred, axis=1)
self.valid_acc(preds, y_true[0])
self.log('val_acc', self.valid_acc, prog_bar=True)
self.valid_f1(preds, y_true[0])
self.log('f1', self.valid_f1, prog_bar=True)
def configure_optimizers(self):
'Prepare optimizer and schedule (linear warmup and decay)'
opt = torch.optim.Adam(params=self.parameters(), lr=self.learning_rate)
sch = torch.optim.lr_scheduler.CosineAnnealingLR(opt, T_max=10)
return [opt], [sch]
def training_epoch_end(self, training_step_outputs):
avg_loss = torch.tensor([x['loss']
for x in training_step_outputs]).mean()
self.log('train_loss', avg_loss)
print(f'###score: train_loss### {avg_loss}')
def validation_epoch_end(self, val_step_outputs):
acc = self.valid_acc.compute()
f1 = self.valid_f1.compute()
self.log('val_score', acc)
self.log('f1', f1)
print(f'###score: val_score### {acc}')
def add_model_specific_args(parent_parser):
parser = parent_parser.add_argument_group("OntologyTaggerModel")
parser = ArgumentParser(parents=[parent_parser], add_help=False)
parser.add_argument("--learning_rate", default=2e-3, type=float)
return parent_parser
Traceback:
Global seed set to 42
GPU available: False, used: False
TPU available: False, using: 0 TPU cores
| Name | Type | Params
---------------------------------------------
0 | model | LSTMClassifier | 77.4 K
1 | valid_acc | Accuracy | 0
2 | valid_f1 | F1 | 0
---------------------------------------------
77.4 K Trainable params
0 Non-trainable params
77.4 K Total params
0.310 Total estimated model params size (MB)
Validation sanity check: 0it [00:00, ?it/s]
len(final_hidden_state) 10
len(labels) 1
final_hidden_state.shape torch.Size([10, 100])
labels [tensor([ 2, 31, 26, 37, 22, 5, 31, 36, 5, 10])]
len(self.num_labels) 1
self.num_labels[0] 38
len(labels[0].view(-1)) 10
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-16-3f817f701f20> in <module>
11 """.split()
12
---> 13 run_training(args)
<ipython-input-5-bb0d8b014e32> in run_training(input)
66 shutil.copyfile(labels_file_orig, labels_file_cp)
67 trainer = pl.Trainer.from_argparse_args(args, callbacks=[checkpoint_callback], logger=loggers)
---> 68 trainer.fit(model, dm)
69 model_file = os.path.join(args.modeldir, 'last.ckpt')
70 trainer.save_checkpoint(model_file, weights_only=True)
~/anaconda3/envs/pytorch_latest_p36/lib/python3.6/site-packages/pytorch_lightning/trainer/trainer.py in fit(self, model, train_dataloader, val_dataloaders, datamodule)
497
498 # dispath `start_training` or `start_testing` or `start_predicting`
--> 499 self.dispatch()
500
501 # plugin will finalized fitting (e.g. ddp_spawn will load trained model)
~/anaconda3/envs/pytorch_latest_p36/lib/python3.6/site-packages/pytorch_lightning/trainer/trainer.py in dispatch(self)
544
545 else:
--> 546 self.accelerator.start_training(self)
547
548 def train_or_test_or_predict(self):
~/anaconda3/envs/pytorch_latest_p36/lib/python3.6/site-packages/pytorch_lightning/accelerators/accelerator.py in start_training(self, trainer)
71
72 def start_training(self, trainer):
---> 73 self.training_type_plugin.start_training(trainer)
74
75 def start_testing(self, trainer):
~/anaconda3/envs/pytorch_latest_p36/lib/python3.6/site-packages/pytorch_lightning/plugins/training_type/training_type_plugin.py in start_training(self, trainer)
112 def start_training(self, trainer: 'Trainer') -> None:
113 # double dispatch to initiate the training loop
--> 114 self._results = trainer.run_train()
115
116 def start_testing(self, trainer: 'Trainer') -> None:
~/anaconda3/envs/pytorch_latest_p36/lib/python3.6/site-packages/pytorch_lightning/trainer/trainer.py in run_train(self)
605 self.progress_bar_callback.disable()
606
--> 607 self.run_sanity_check(self.lightning_module)
608
609 # set stage for logging
~/anaconda3/envs/pytorch_latest_p36/lib/python3.6/site-packages/pytorch_lightning/trainer/trainer.py in run_sanity_check(self, ref_model)
858
859 # run eval step
--> 860 _, eval_results = self.run_evaluation(max_batches=self.num_sanity_val_batches)
861
862 self.on_sanity_check_end()
~/anaconda3/envs/pytorch_latest_p36/lib/python3.6/site-packages/pytorch_lightning/trainer/trainer.py in run_evaluation(self, max_batches, on_epoch)
723 # lightning module methods
724 with self.profiler.profile("evaluation_step_and_end"):
--> 725 output = self.evaluation_loop.evaluation_step(batch, batch_idx, dataloader_idx)
726 output = self.evaluation_loop.evaluation_step_end(output)
727
~/anaconda3/envs/pytorch_latest_p36/lib/python3.6/site-packages/pytorch_lightning/trainer/evaluation_loop.py in evaluation_step(self, batch, batch_idx, dataloader_idx)
164 model_ref._current_fx_name = "validation_step"
165 with self.trainer.profiler.profile("validation_step"):
--> 166 output = self.trainer.accelerator.validation_step(args)
167
168 # capture any logged information
~/anaconda3/envs/pytorch_latest_p36/lib/python3.6/site-packages/pytorch_lightning/accelerators/accelerator.py in validation_step(self, args)
175
176 with self.precision_plugin.val_step_context(), self.training_type_plugin.val_step_context():
--> 177 return self.training_type_plugin.validation_step(*args)
178
179 def test_step(self, args):
~/anaconda3/envs/pytorch_latest_p36/lib/python3.6/site-packages/pytorch_lightning/plugins/training_type/training_type_plugin.py in validation_step(self, *args, **kwargs)
129
130 def validation_step(self, *args, **kwargs):
--> 131 return self.lightning_module.validation_step(*args, **kwargs)
132
133 def test_step(self, *args, **kwargs):
<ipython-input-15-6ef4e0993417> in validation_step(self, batch, batch_idx)
130 def validation_step(self, batch, batch_idx):
131 x, y_true = batch
--> 132 _, y_pred = self(x, labels=y_true)
133 preds = torch.argmax(y_pred, axis=1)
134 self.valid_acc(preds, y_true[0])
~/anaconda3/envs/pytorch_latest_p36/lib/python3.6/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
725 result = self._slow_forward(*input, **kwargs)
726 else:
--> 727 result = self.forward(*input, **kwargs)
728 for hook in itertools.chain(
729 _global_forward_hooks.values(),
<ipython-input-15-6ef4e0993417> in forward(self, *input, **kwargs)
120
121 def forward(self, *input, **kwargs):
--> 122 return self.model(*input, **kwargs)
123
124 def training_step(self, batch, batch_idx):
~/anaconda3/envs/pytorch_latest_p36/lib/python3.6/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
725 result = self._slow_forward(*input, **kwargs)
726 else:
--> 727 result = self.forward(*input, **kwargs)
728 for hook in itertools.chain(
729 _global_forward_hooks.values(),
<ipython-input-15-6ef4e0993417> in forward(self, sentence, labels)
93 print("self.num_labels[0]", self.num_labels[0])
94 print("len(labels[0].view(-1))", len(labels[0].view(-1)))
---> 95 loss = F.cross_entropy(logits.view(-1, self.num_labels[0]), labels[0].view(-1))
96 return loss, logits
97
RuntimeError: shape '[-1, 38]' is invalid for input of size 1
My problem was 2 things.
One, I had to run classifier() before calculating cross_entropy().
Secondly, I had to pass X, final_hidden_layer.flatten().
X = final_hidden_state
# Push through linear layers
for l in self.linears:
X = l(X)
logits = self.classifier(X)
This achieves a working model. However, the first epoch's validation score is 0%.
This will require further work.

Key Error while fine tunning T5 for summarization with HuggingFace

I am trying to fine tune the T5 transformer for summarization but I am receiving a key error message:
KeyError: 'Indexing with integers (to access backend Encoding for a given batch index) is not available when using Python based tokenizers'
The code I am using is basically this:
model_name = '...'
tokenizer = T5Tokenizer.from_pretrained(model_name)
model = T5ForConditionalGeneration.from_pretrained(model_name)
model.to(device)
(...)
df_dataset = df_dataset[['summary','document']]
df_dataset.document = 'summarize: ' + df_dataset.document
X = list(df_dataset['document'])
y = list(df_dataset['summary'])
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2)
X_train_tokenized = tokenizer(X_train, padding=True, truncation=True, max_length=512)
y_train_tokenized = tokenizer(y_train, padding=True, truncation=True, max_length=512)
X_val_tokenized = tokenizer(X_val, padding=True, truncation=True, max_length=512)
y_val_tokenized = tokenizer(y_val, padding=True, truncation=True, max_length=512)
# Create torch dataset
class Dataset(torch.utils.data.Dataset):
def __init__(self, encodings, labels):
self.encodings = encodings
self.labels = labels
def __getitem__(self, idx):
item = {key: torch.tensor(val[idx]) for key, val in self.encodings.items()}
item['labels'] = torch.tensor(self.labels[idx])
return item
def __len__(self):
return len(self.labels)
training_set = Dataset(X_train_tokenized, y_train_tokenized)
validation_set = Dataset(X_val_tokenized, y_val_tokenized)
# Define Trainer
args = TrainingArguments(
output_dir="output",
evaluation_strategy="steps",
eval_steps=500,
per_device_train_batch_size=TRAIN_BATCH_SIZE,
per_device_eval_batch_size=VALID_BATCH_SIZE,
num_train_epochs=TRAIN_EPOCHS,
save_steps=3000,
load_best_model_at_end = True,
)
trainer = Trainer(
model=model,
args=args,
train_dataset=training_set,
eval_dataset=validation_set,
compute_metrics=compute_metrics,
callbacks=[EarlyStoppingCallback(early_stopping_patience=3)],
)
trainer.train()
And the full error:
---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
<ipython-input-29-f31e4c5cde21> in <module>
1 # Train pre-trained model
----> 2 trainer.train()
c:\programdata\anaconda3\envs\summa\lib\site-packages\transformers\trainer.py in train(self, resume_from_checkpoint, trial, **kwargs)
1099 self.control = self.callback_handler.on_epoch_begin(self.args, self.state, self.control)
1100
-> 1101 for step, inputs in enumerate(epoch_iterator):
1102
1103 # Skip past any already trained steps if resuming training
c:\programdata\anaconda3\envs\summa\lib\site-packages\torch\utils\data\dataloader.py in __next__(self)
515 if self._sampler_iter is None:
516 self._reset()
--> 517 data = self._next_data()
518 self._num_yielded += 1
519 if self._dataset_kind == _DatasetKind.Iterable and \
c:\programdata\anaconda3\envs\summa\lib\site-packages\torch\utils\data\dataloader.py in _next_data(self)
555 def _next_data(self):
556 index = self._next_index() # may raise StopIteration
--> 557 data = self._dataset_fetcher.fetch(index) # may raise StopIteration
558 if self._pin_memory:
559 data = _utils.pin_memory.pin_memory(data)
c:\programdata\anaconda3\envs\summa\lib\site-packages\torch\utils\data\_utils\fetch.py in fetch(self, possibly_batched_index)
42 def fetch(self, possibly_batched_index):
43 if self.auto_collation:
---> 44 data = [self.dataset[idx] for idx in possibly_batched_index]
45 else:
46 data = self.dataset[possibly_batched_index]
c:\programdata\anaconda3\envs\summa\lib\site-packages\torch\utils\data\_utils\fetch.py in <listcomp>(.0)
42 def fetch(self, possibly_batched_index):
43 if self.auto_collation:
---> 44 data = [self.dataset[idx] for idx in possibly_batched_index]
45 else:
46 data = self.dataset[possibly_batched_index]
<ipython-input-24-67979e648b75> in __getitem__(self, idx)
7 def __getitem__(self, idx):
8 item = {key: torch.tensor(val[idx]) for key, val in self.encodings.items()}
----> 9 item['labels'] = torch.tensor(self.labels[idx])
10 return item
11
c:\programdata\anaconda3\envs\summa\lib\site-packages\transformers\tokenization_utils_base.py in __getitem__(self, item)
232 return self._encodings[item]
233 else:
--> 234 raise KeyError(
235 "Indexing with integers (to access backend Encoding for a given batch index) "
236 "is not available when using Python based tokenizers"
KeyError: 'Indexing with integers (to access backend Encoding for a given batch index) is not available when using Python based tokenizers'
And if change the line:
tokenizer = T5Tokenizer.from_pretrained(model_name)
To:
tokenizer = T5TokenizerFast.from_pretrained(model_name)
the error changes to:
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-28-f31e4c5cde21> in <module>
1 # Train pre-trained model
----> 2 trainer.train()
c:\programdata\anaconda3\envs\summa\lib\site-packages\transformers\trainer.py in train(self, resume_from_checkpoint, trial, **kwargs)
1099 self.control = self.callback_handler.on_epoch_begin(self.args, self.state, self.control)
1100
-> 1101 for step, inputs in enumerate(epoch_iterator):
1102
1103 # Skip past any already trained steps if resuming training
c:\programdata\anaconda3\envs\summa\lib\site-packages\torch\utils\data\dataloader.py in __next__(self)
515 if self._sampler_iter is None:
516 self._reset()
--> 517 data = self._next_data()
518 self._num_yielded += 1
519 if self._dataset_kind == _DatasetKind.Iterable and \
c:\programdata\anaconda3\envs\summa\lib\site-packages\torch\utils\data\dataloader.py in _next_data(self)
555 def _next_data(self):
556 index = self._next_index() # may raise StopIteration
--> 557 data = self._dataset_fetcher.fetch(index) # may raise StopIteration
558 if self._pin_memory:
559 data = _utils.pin_memory.pin_memory(data)
c:\programdata\anaconda3\envs\summa\lib\site-packages\torch\utils\data\_utils\fetch.py in fetch(self, possibly_batched_index)
42 def fetch(self, possibly_batched_index):
43 if self.auto_collation:
---> 44 data = [self.dataset[idx] for idx in possibly_batched_index]
45 else:
46 data = self.dataset[possibly_batched_index]
c:\programdata\anaconda3\envs\summa\lib\site-packages\torch\utils\data\_utils\fetch.py in <listcomp>(.0)
42 def fetch(self, possibly_batched_index):
43 if self.auto_collation:
---> 44 data = [self.dataset[idx] for idx in possibly_batched_index]
45 else:
46 data = self.dataset[possibly_batched_index]
<ipython-input-23-67979e648b75> in __getitem__(self, idx)
7 def __getitem__(self, idx):
8 item = {key: torch.tensor(val[idx]) for key, val in self.encodings.items()}
----> 9 item['labels'] = torch.tensor(self.labels[idx])
10 return item
11
RuntimeError: Could not infer dtype of tokenizers.Encoding
Any idea of what is wrong?
This is because this tokenizer returns an object with the following structure
You have to amend the __getitem__ method of your dataset class along the lines of
class ForT5Dataset(torch.utils.data.Dataset):
def __init__(self, inputs, targets):
self.inputs = inputs
self.targets = targets
def __len__(self):
return len(self.targets)
def __getitem__(self, index):
input_ids = torch.tensor(self.inputs["input_ids"][index]).squeeze()
target_ids = torch.tensor(self.targets["input_ids"][index]).squeeze()
return {"input_ids": input_ids, "labels": target_ids}
and pass data prop when initializing, like:
train_ds = ForT5Dataset(train_in.data, train_out.data).

ValueError: tile cannot extend outside image pytorch

I am loading NumPy arrays as images in PyTorch while training the model, it’s giving me this error, I tried everything but couldn’t figure out pls help…, I am training a classifier model.......................................................................................................................................................................................
ValueError Traceback (most recent call last)
<ipython-input-12-b4d3f7be01c1> in <module>
1 # training
----> 2 trained_model = train(n_epochs, np.Inf, loaders, model, optimizer, criterion)
<ipython-input-10-b4d180a2c041> in train(n_epochs, valid_loss_min_input, loaders, model, optimizer, criterion, device, checkpoint_path, best_model_path)
29 ###################
30 model.train()
---> 31 for batch_idx, (data, target) in enumerate(loaders['train']):
32 # move to gpu
33 data, target = data.to(device), target.to(device)
G:\anaconda3\envs\data_env\lib\site-packages\torch\utils\data\dataloader.py in __next__(self)
343
344 def __next__(self):
--> 345 data = self._next_data()
346 self._num_yielded += 1
347 if self._dataset_kind == _DatasetKind.Iterable and \
G:\anaconda3\envs\data_env\lib\site-packages\torch\utils\data\dataloader.py in _next_data(self)
383 def _next_data(self):
384 index = self._next_index() # may raise StopIteration
--> 385 data = self._dataset_fetcher.fetch(index) # may raise StopIteration
386 if self._pin_memory:
387 data = _utils.pin_memory.pin_memory(data)
G:\anaconda3\envs\data_env\lib\site-packages\torch\utils\data\_utils\fetch.py in fetch(self, possibly_batched_index)
42 def fetch(self, possibly_batched_index):
43 if self.auto_collation:
---> 44 data = [self.dataset[idx] for idx in possibly_batched_index]
45 else:
46 data = self.dataset[possibly_batched_index]
G:\anaconda3\envs\data_env\lib\site-packages\torch\utils\data\_utils\fetch.py in <listcomp>(.0)
42 def fetch(self, possibly_batched_index):
43 if self.auto_collation:
---> 44 data = [self.dataset[idx] for idx in possibly_batched_index]
45 else:
46 data = self.dataset[possibly_batched_index]
G:\anaconda3\envs\data_env\lib\site-packages\torchvision\datasets\folder.py in __getitem__(self, index)
135 sample = self.loader(path)
136 if self.transform is not None:
--> 137 sample = self.transform(sample)
138 if self.target_transform is not None:
139 target = self.target_transform(target)
G:\anaconda3\envs\data_env\lib\site-packages\torchvision\transforms\transforms.py in __call__(self, img)
59 def __call__(self, img):
60 for t in self.transforms:
---> 61 img = t(img)
62 return img
63
<ipython-input-3-88cdee8f0d6c> in __call__(self, img)
7 im = np.asarray(img)
8 im = detect(im)
----> 9 img = Image.fromarray(im)
10 img = img.resize(size=(128, 128))
11 return img
G:\anaconda3\envs\data_env\lib\site-packages\PIL\Image.py in fromarray(obj, mode)
2768 obj = obj.tostring()
2769
-> 2770 return frombuffer(mode, size, obj, "raw", rawmode, 0, 1)
2771
2772
G:\anaconda3\envs\data_env\lib\site-packages\PIL\Image.py in frombuffer(mode, size, data, decoder_name, *args)
2708 return im
2709
-> 2710 return frombytes(mode, size, data, decoder_name, args)
2711
2712
G:\anaconda3\envs\data_env\lib\site-packages\PIL\Image.py in frombytes(mode, size, data, decoder_name, *args)
2648
2649 im = new(mode, size)
-> 2650 im.frombytes(data, decoder_name, args)
2651 return im
2652
G:\anaconda3\envs\data_env\lib\site-packages\PIL\Image.py in frombytes(self, data, decoder_name, *args)
795 # unpack data
796 d = _getdecoder(self.mode, decoder_name, args)
--> 797 d.setimage(self.im)
798 s = d.decode(data)
799
ValueError: tile cannot extend outside the image
In the custom function, I am trying to preprocess the image and then calling it
It works fine when I test out the code loading a small batch for display

Pytorch transforms.RandomRotation() does not work on Google Colab

Normally i was working on letter&digit recognition on my computer and I wanted to move my project to Colab but unfortunately there was an error (you can see the error below).
after some debugging i found which line is giving me error.
transforms.RandomRotation(degrees=(90, -90))
below i wrote simple abstract code to show this error.This code does not work on colab but it works fine at my own computer environment.Problem might be about the different versions of pytorch library i have version 1.3.1 on my computer and colab uses version 1.4.0.
import torch
import torchvision
from torchvision import datasets, transforms
import matplotlib.pyplot as plt
transformOpt = transforms.Compose([
transforms.RandomRotation(degrees=(90, -90)),
transforms.ToTensor()
])
train_set = datasets.MNIST(
root='', train=True, transform=transformOpt, download=True)
test_set = datasets.MNIST(
root='', train=False, transform=transformOpt, download=True)
train_loader = torch.utils.data.DataLoader(
dataset=train_set,
batch_size=100,
shuffle=True)
test_loader = torch.utils.data.DataLoader(
dataset=test_set,
batch_size=100,
shuffle=False)
images, labels = next(iter(train_loader))
plt.imshow(images[0].view(28, 28), cmap="gray")
plt.show()
The full error I got when I execute this sample code above on Google Colab.
TypeError Traceback (most recent call last)
<ipython-input-1-8409db422154> in <module>()
24 shuffle=False)
25
---> 26 images, labels = next(iter(train_loader))
27 plt.imshow(images[0].view(28, 28), cmap="gray")
28 plt.show()
10 frames
/usr/local/lib/python3.6/dist-packages/torch/utils/data/dataloader.py in __next__(self)
343
344 def __next__(self):
--> 345 data = self._next_data()
346 self._num_yielded += 1
347 if self._dataset_kind == _DatasetKind.Iterable and \
/usr/local/lib/python3.6/dist-packages/torch/utils/data/dataloader.py in _next_data(self)
383 def _next_data(self):
384 index = self._next_index() # may raise StopIteration
--> 385 data = self._dataset_fetcher.fetch(index) # may raise StopIteration
386 if self._pin_memory:
387 data = _utils.pin_memory.pin_memory(data)
/usr/local/lib/python3.6/dist-packages/torch/utils/data/_utils/fetch.py in fetch(self, possibly_batched_index)
42 def fetch(self, possibly_batched_index):
43 if self.auto_collation:
---> 44 data = [self.dataset[idx] for idx in possibly_batched_index]
45 else:
46 data = self.dataset[possibly_batched_index]
/usr/local/lib/python3.6/dist-packages/torch/utils/data/_utils/fetch.py in <listcomp>(.0)
42 def fetch(self, possibly_batched_index):
43 if self.auto_collation:
---> 44 data = [self.dataset[idx] for idx in possibly_batched_index]
45 else:
46 data = self.dataset[possibly_batched_index]
/usr/local/lib/python3.6/dist-packages/torchvision/datasets/mnist.py in __getitem__(self, index)
95
96 if self.transform is not None:
---> 97 img = self.transform(img)
98
99 if self.target_transform is not None:
/usr/local/lib/python3.6/dist-packages/torchvision/transforms/transforms.py in __call__(self, img)
68 def __call__(self, img):
69 for t in self.transforms:
---> 70 img = t(img)
71 return img
72
/usr/local/lib/python3.6/dist-packages/torchvision/transforms/transforms.py in __call__(self, img) 1001 angle = self.get_params(self.degrees) 1002
-> 1003 return F.rotate(img, angle, self.resample, self.expand, self.center, self.fill) 1004 1005 def
__repr__(self):
/usr/local/lib/python3.6/dist-packages/torchvision/transforms/functional.py in rotate(img, angle, resample, expand, center, fill)
727 fill = tuple([fill] * 3)
728
--> 729 return img.rotate(angle, resample, expand, center, fillcolor=fill)
730
731
/usr/local/lib/python3.6/dist-packages/PIL/Image.py in rotate(self, angle, resample, expand, center, translate, fillcolor) 2003 w, h = nw, nh 2004
-> 2005 return self.transform((w, h), AFFINE, matrix, resample, fillcolor=fillcolor) 2006 2007 def save(self, fp, format=None, **params):
/usr/local/lib/python3.6/dist-packages/PIL/Image.py in transform(self, size, method, data, resample, fill, fillcolor) 2297 raise ValueError("missing method data") 2298
-> 2299 im = new(self.mode, size, fillcolor) 2300 if method == MESH: 2301 # list of quads
/usr/local/lib/python3.6/dist-packages/PIL/Image.py in new(mode, size, color) 2503 im.palette = ImagePalette.ImagePalette() 2504 color = im.palette.getcolor(color)
-> 2505 return im._new(core.fill(mode, size, color)) 2506 2507
TypeError: function takes exactly 1 argument (3 given)
You're absolutely correct. torchvision 0.5 has a bug in RandomRotation() in the fill argument probably due to incompatible Pillow version. This issue has now been fixed (PR#1760) and will be resolved in the next release.
Temporarily, you add fill=(0,) to RandomRotation transform to fix it.
transforms.RandomRotation(degrees=(90, -90), fill=(0,))

Pytorch custom Dataset class giving wrong output

I am trying to use this class I built for a dataset but it saying that it should be a PIL or ndarray. Im not quite sure whats wrong with it. Here is the class that I am using
class RotateDataset(Dataset):
def __init__(self, image_list, size,transform = None):
self.image_list = image_list
self.size = size
self.transform = transform
def __len__(self):
return len(self.image_list)
def __getitem__(self, idx):
img = cv2.imread(self.image_list[idx])
image_height, image_width = img.shape[:2]
print("ID: ", idx)
if idx % 2 == 0:
label = 0 # Set label
# chose negative or positive rotation
rotation_degree = random.randrange(35, 50, 1)
posnegrot = np.random.randint(2)
if posnegrot == 0:
#positive rotation
#rotation_matrix = cv2.getRotationMatrix2D((num_cols/2, num_rows/2), rotation_degree, 1)
#img = cv2.warpAffine(img, rotation_matrix, (num_cols, num_rows))
img = rotate_image(img, rotation_degree)
img = crop_around_center(img, *largest_rotated_rect(image_width,
image_height,
math.radians(rotation_degree)))
else:
# Negative rotation
rotation_degree = -rotation_degree
img = crop_around_center(img, *largest_rotated_rect(image_width,
image_height,
math.radians(rotation_degree)))
else:
label = 1
img = cv2.resize(img, self.size, cv2.INTER_AREA)
return self.transform(img), self.transform(label)
The error that it is giving me is
TypeError: pic should be PIL Image or ndarray. Got class 'int'
It should give me a img (tensor) and a label (tensor)
but I dont think it is doing it correctly.
TypeError Traceback (most recent call last)
<ipython-input-34-f47943b2600c> in <module>
2 train_loss = 0.0
3 net.train()
----> 4 for image, label in enumerate(train_loader):
5 if train_on_gpu:
6 image, label = image.cuda(), label.cuda()
~\Anaconda3\envs\TF2\lib\site-packages\torch\utils\data\dataloader.py in __next__(self)
343
344 def __next__(self):
--> 345 data = self._next_data()
346 self._num_yielded += 1
347 if self._dataset_kind == _DatasetKind.Iterable and \
~\Anaconda3\envs\TF2\lib\site-packages\torch\utils\data\dataloader.py in _next_data(self)
383 def _next_data(self):
384 index = self._next_index() # may raise StopIteration
--> 385 data = self._dataset_fetcher.fetch(index) # may raise StopIteration
386 if self._pin_memory:
387 data = _utils.pin_memory.pin_memory(data)
~\Anaconda3\envs\TF2\lib\site-packages\torch\utils\data\_utils\fetch.py in fetch(self, possibly_batched_index)
42 def fetch(self, possibly_batched_index):
43 if self.auto_collation:
---> 44 data = [self.dataset[idx] for idx in possibly_batched_index]
45 else:
46 data = self.dataset[possibly_batched_index]
~\Anaconda3\envs\TF2\lib\site-packages\torch\utils\data\_utils\fetch.py in <listcomp>(.0)
42 def fetch(self, possibly_batched_index):
43 if self.auto_collation:
---> 44 data = [self.dataset[idx] for idx in possibly_batched_index]
45 else:
46 data = self.dataset[possibly_batched_index]
<ipython-input-28-6c77357ff619> in __getitem__(self, idx)
35 label = 1
36 img = cv2.resize(img, self.size, cv2.INTER_AREA)
---> 37 return self.transform(img), self.transform(label)
~\Anaconda3\envs\TF2\lib\site-packages\torchvision\transforms\transforms.py in __call__(self, pic)
99 Tensor: Converted image.
100 """
--> 101 return F.to_tensor(pic)
102
103 def __repr__(self):
~\Anaconda3\envs\TF2\lib\site-packages\torchvision\transforms\functional.py in to_tensor(pic)
53 """
54 if not(_is_pil_image(pic) or _is_numpy(pic)):
---> 55 raise TypeError('pic should be PIL Image or ndarray. Got {}'.format(type(pic)))
56
57 if _is_numpy(pic) and not _is_numpy_image(pic):
TypeError: pic should be PIL Image or ndarray. Got <class 'int'>
As discussed in the comments, the problem was applying transform on label as well. The label should instead simply be written as tensor:
return self.transform(img), torch.tensor(label)

Categories

Resources