I am running Alexnet on CIFAR10 dataset using Pytorch Lightning, here is my model:
class SelfSupervisedModel(pl.LightningModule):
def __init__(self, hparams=None, num_classes=10, batch_size=128):
super(SelfSupervisedModel, self).__init__()
self.batch_size = batch_size
self.loss_fn = nn.CrossEntropyLoss()
self.hparams["lr"] = ModelHelper.Hyperparam.Learning_rate
self.model = torchvision.models.alexnet(pretrained=False)
def forward(self, x):
return self.model(x)
def training_step(self, train_batch, batch_idx):
inputs, targets = train_batch
predictions = self(inputs)
loss = self.loss_fn(predictions, targets)
return {'loss': loss}
def validation_step(self, test_batch, batch_idx):
inputs, targets = test_batch
predictions = self(inputs)
val_loss = self.loss_fn(predictions, targets)
_, preds = tf.max(predictions, 1)
acc = tf.sum(preds == targets.data) / (targets.shape[0] * 1.0)
return {'val_loss': val_loss, 'val_acc': acc, 'target': targets, 'preds': predictions}
def validation_epoch_end(self, outputs):
avg_loss = tf.stack([x['val_loss'] for x in outputs]).mean()
avg_acc = tf.stack([x['val_acc'].float() for x in outputs]).mean()
logs = {'val_loss': avg_loss, 'val_acc': avg_acc}
print(f'validation_epoch_end logs => {logs}')
OutputMatrix.predictions = tf.cat([tmp['preds'] for tmp in outputs])
OutputMatrix.targets = tf.cat([tmp['target'] for tmp in outputs])
return {'progress_bar': logs}
def configure_optimizers(self):
return tf.optim.SGD(self.parameters(), lr=self.hparams["lr"], momentum=0.9)
I am storing the predicted and true values in OutputMatrix.predictions and OutputMatrix.targets which are used to generate confusion matrix looks like below:
I'm pretty much sure that this should not be the output though. Can not find where is the mistake. Any help would be appreciated.
I would suggest using Torchmetrics and the internal log method, so the code could like:
class MyModule(LightningModule):
def __init__(self):
...
self.train_acc = torchmetrics.Accuracy()
self.valid_acc = torchmetrics.Accuracy()
def training_step(self, batch, batch_idx):
x, y = batch
preds = self(x)
...
self.train_acc(preds, y)
self.log('train_acc', self.train_acc, on_step=True, on_epoch=False)
def validation_step(self, batch, batch_idx):
logits = self(x)
...
self.valid_acc(logits, y)
self.log('valid_acc', self.valid_acc, on_step=True, on_epoch=True)
as you can also find in the docs related to PL integration.
Related
I try to fight with overfitting, this is why I decided to look through documentation (https://pytorch-lightning.readthedocs.io/en/stable/common/evaluation_basic.html#train-with-the-validation-loop), where I found that you can pass in Trainer.fit training and validation dataloader. The question is that - should I use this method, or I can simply pass the dataloader class in Trainer.fit to prevent overfitting ?
Code DataLoader:
class ClassifierDataModule(pl.LightningDataModule):
def __init__(self, train_dataset:pd.DataFrame, val_dataset:pd.DataFrame, batch_size:int):
super().__init__()
self.prepare_data_per_node = False
self.train_dataset = train_dataset
self.val_dataset = val_dataset
self.batch_size=batch_size
def train_dataloader(self):
return DataLoader(self.train_dataset, batch_size=self.batch_size, shuffle=True, num_workers=os.cpu_count())
def val_dataloader(self):
return DataLoader(self.val_dataset, batch_size=self.batch_size, shuffle=True, num_workers=os.cpu_count())
data_module_classifier = ClassifierDataModule(train_dataset,val_dataset,test_dataset,BATCH_SIZE )
And here is my Trainer.fit():
model = MulticlassClassificationLIGHT(class_weights)
#trainer.fit(model, data_module_classifier) # SHOULD I USE THIS METHOD TO PREVENT OVERFITTING
trainer.fit(model, data_module_classifier.train_dataloader(),data_module_classifier.val_dataloader() ) # OR THIS ONE ?
My LightningModule just in case:
class MulticlassClassificationLIGHT(pl.LightningModule):
def __init__(self,class_weights):
super(MulticlassClassificationLIGHT, self).__init__()
self.num_feature=35
self.num_class=36
self.layer_1 = nn.Linear(self.num_feature, 512)
self.layer_2 = nn.Linear(512, 128)
self.layer_3 = nn.Linear(128, 64)
self.layer_out = nn.Linear(64, self.num_class)
self.relu = nn.ReLU()
self.dropout = nn.Dropout(p=0.2)
self.batchnorm1 = nn.BatchNorm1d(512)
self.batchnorm2 = nn.BatchNorm1d(128)
self.batchnorm3 = nn.BatchNorm1d(64)
self.loss = nn.CrossEntropyLoss(weight=class_weights.to(device))
def forward(self, x):
x = self.layer_1(x)
x = self.batchnorm1(x)
x = self.relu(x)
x = self.layer_2(x)
x = self.batchnorm2(x)
x = self.relu(x)
x = self.dropout(x)
x = self.layer_3(x)
x = self.batchnorm3(x)
x = self.relu(x)
x = self.dropout(x)
x = self.layer_out(x)
return x
def training_step(self, batch, batch_idx):
x, y = batch
logits = self.forward(x)
loss = self.loss(logits, y)
self.log("train_loss", loss, prog_bar=True, logger=True)
return loss
def validation_step(self, batch, batch_idx):
x, y = batch
logits = self.forward(x)
loss = self.loss(logits, y)
self.log("val_loss", loss, prog_bar=True, logger=True) # I ask Trainer to "ModelCheckpoint" this loss
return loss
Passing validation data loader during training does not fix overfitting. It allows to measure the overfitting/underfitting of the model. We want performance on validation data to be closer to performance on training data in case of a well-fit model.
Regarding the syntax, This should work :
trainer.fit(model=model, train_dataloaders =data_module_classifier.train_dataloader(), val_dataloaders =data_module_classifier.val_dataloader())
documentation for fit here - https://pytorch-lightning.readthedocs.io/en/latest/common/trainer.html#trainer-class-api
I'm using an example for training a model on MNIST dataset from pytorch-lightning's documentation (see here), to which I tried to add a prediction step. However, when performing trainer.predict(model) I get an error:
AttributeError: 'list' object has no attribute 'flatten'
I followed the instructions and examples I found online (adding the functions predict_step, predict_dataloader and adding a stage under setup function) and it looks pretty simple - however it doesn't work.
Here's the code I'm running:
import os
import torch
from pytorch_lightning import LightningModule, Trainer
from pytorch_lightning.callbacks.progress import TQDMProgressBar
from pytorch_lightning.loggers import CSVLogger
from torch import nn
from torch.nn import functional as F
from torch.utils.data import DataLoader, random_split
from torchmetrics import Accuracy
from torchvision import transforms
from torchvision.datasets import MNIST
PATH_DATASETS = os.environ.get("PATH_DATASETS", ".")
BATCH_SIZE = 256 if torch.cuda.is_available() else 64
class LitMNIST(LightningModule):
def __init__(self, data_dir=PATH_DATASETS, hidden_size=64, learning_rate=2e-4):
super().__init__()
# Set our init args as class attributes
self.data_dir = data_dir
self.hidden_size = hidden_size
self.learning_rate = learning_rate
# Hardcode some dataset specific attributes
self.num_classes = 10
self.dims = (1, 28, 28)
channels, width, height = self.dims
self.transform = transforms.Compose(
[
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,)),
]
)
# Define PyTorch model
self.model = nn.Sequential(
nn.Flatten(),
nn.Linear(channels * width * height, hidden_size),
nn.ReLU(),
nn.Dropout(p=0.9),
nn.Linear(hidden_size, hidden_size),
nn.ReLU(),
nn.Dropout(0.1),
nn.Linear(hidden_size, self.num_classes),
)
self.val_accuracy = Accuracy(task='multiclass', num_classes=10) # I fixed this since the code from the tutorial didn't work
self.test_accuracy = Accuracy(task='multiclass', num_classes=10) # I fixed this since the code from the tutorial didn't work
def forward(self, x):
x = self.model(x)
return F.log_softmax(x, dim=1)
def training_step(self, batch, batch_idx):
x, y = batch
logits = self(x)
loss = F.nll_loss(logits, y)
return loss
def validation_step(self, batch, batch_idx):
x, y = batch
logits = self(x)
loss = F.nll_loss(logits, y)
preds = torch.argmax(logits, dim=1)
self.val_accuracy.update(preds, y)
# Calling self.log will surface up scalars for you in TensorBoard
self.log("val_loss", loss, prog_bar=True)
self.log("val_acc", self.val_accuracy, prog_bar=True)
def test_step(self, batch, batch_idx):
x, y = batch
logits = self(x)
loss = F.nll_loss(logits, y)
preds = torch.argmax(logits, dim=1)
self.test_accuracy.update(preds, y)
# Calling self.log will surface up scalars for you in TensorBoard
self.log("test_loss", loss, prog_bar=True)
self.log("test_acc", self.test_accuracy, prog_bar=True)
def predict_step(self, batch, batch_idx, dataloader_idx=0):
return self(batch)
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), lr=self.learning_rate)
return optimizer
####################
# DATA RELATED HOOKS
####################
def prepare_data(self):
# download
MNIST(self.data_dir, train=True, download=True)
MNIST(self.data_dir, train=False, download=True)
def setup(self, stage=None):
# Assign train/val datasets for use in dataloaders
if stage == "fit" or stage is None:
mnist_full = MNIST(self.data_dir, train=True, transform=self.transform)
self.mnist_train, self.mnist_val = random_split(mnist_full, [55000, 5000])
# Assign test dataset for use in dataloader(s)
if stage == "test" or stage is None:
self.mnist_test = MNIST(self.data_dir, train=False, transform=self.transform)
if stage == "predict":
self.mnist_predict = MNIST(self.data_dir, train=False, transform=self.transform)
def train_dataloader(self):
return DataLoader(self.mnist_train, batch_size=BATCH_SIZE)
def val_dataloader(self):
return DataLoader(self.mnist_val, batch_size=BATCH_SIZE)
def test_dataloader(self):
return DataLoader(self.mnist_test, batch_size=BATCH_SIZE)
def predict_dataloader(self):
return DataLoader(self.mnist_predict, batch_size=BATCH_SIZE)
model = LitMNIST(hidden_size=2)
trainer = Trainer(
accelerator="auto",
devices=1 if torch.cuda.is_available() else None, # limiting got iPython runs
max_epochs=3,
callbacks=[TQDMProgressBar(refresh_rate=20)],
logger=CSVLogger(save_dir="logs/"),
)
trainer.fit(model)
predict = trainer.predict(model)
What is the problem?
You forgot to separate intput and label in the batch:
def predict_step(self, batch, batch_idx, dataloader_idx=0):
x, y = batch
return self(x)
I want to customize TensorFlow model. I need a custom training algorithm like these:
I don't want my model to be inside the custom model just the training algorithm.
class CustomModel(keras.Model):
def __init__(self,inputs, outputs, echo=False):
super().__init__()
self.echo = echo
def train_step(self, data):
x, y = data
with tf.GradientTape() as tape:
y_pred = self(x, training=True)
loss = self.compiled_loss(y, y_pred, regularization_losses=self.losses)
print(loss)
if self.echo:
print('*')
trainable_vars = self.trainable_variables
gradients = tape.gradient(loss, trainable_vars)
self.optimizer.apply_gradients(zip(gradients, trainable_vars))
self.compiled_metrics.update_state(y, y_pred)
return {m.name: m.result() for m in self.metrics}
inputs = keras.Input(shape=(224,224,3))
x = keras.layers.Conv2D(32,(3,3))(inputs)
x = keras.layers.Conv2D(64,3)(x)
x = keras.layers.Conv2D(64,3)(x)
x = keras.layers.AveragePooling2D()(x)
x = keras.layers.Flatten()(x)
x = keras.layers.Dense(64, activation='relu')(x)
x = keras.layers.Dense(3, activation='softmax')(x)
model = CustomModel( inputs, x,echo= True)
model.compile(optimizer="adam", loss="mse", metrics=["mae"])
opt = Adam(learning_rate=0.0001)
model.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy'])
epochs = 5
history = model.fit_generator(train_generator,
validation_data=valid_generator, verbose=1, epochs=epochs)
error:
NotImplementedError: When subclassing the `Model` class, you should implement a `call` method.
You don't need to provide them (inputs, outputs) argument in the init function of your sub-class model. You can implement the call method in your sub-class model as follows:
class CustomModel(keras.Model):
...
...
# A call function needs to be implemented
def call(self, inputs, *args, **kwargs):
return self(inputs)
update
Based on the comments, here's a possible workaround. You build the model with the provided input/output within init.
class CustomModel(keras.Model):
def __init__(self, inputs, x, echo=False, **kwargs):#student
super().__init__(**kwargs)
self.model = keras.Model(inputs, x)
self.echo = echo
def call(self, inputs, *args, **kwargs):
return self.model(inputs)
def train_step(self, data):
x, y = data
with tf.GradientTape() as tape:
y_pred = self.model(x, training=True) # Forward pass
...
# Compute gradients
trainable_vars = self.model.trainable_variables
gradients = ...
# Update weights
...
return {m.name: m.result() for m in self.metrics}
class Seq2Seq(keras.Model):
def __init__(self, enc_v_dim, dec_v_dim, emb_dim, units, attention_layer_size, max_pred_len, start_token,
end_token):
super().__init__()
self.units = units
....
def encode(self, x):
o = self.enc_embeddings(x)
init_s = [tf.zeros((x.shape[0], self.units)), tf.zeros((x.shape[0], self.units))]
...
return s
def inference(self, x, return_align=False):
...
return pred_id
def train_logits(self, x, y, seq_len):
...
return logits
def step(self, x, y, seq_len):
with tf.GradientTape() as tape:
logits = self.train_logits(x, y, seq_len)
dec_out = y[:, 1:] # ignore <GO>
loss = self.cross_entropy(dec_out, logits)
grads = tape.gradient(loss, self.trainable_variables)
self.opt.apply_gradients(zip(grads, self.trainable_variables))
return loss
I found a model for natural language processing that works well and trained well. But I don't know how to save it, it's different from the typical model structure I've seen, and I can't call functions like build or compile. I would like to know how can I save such a model.
I am trying to modify a non-trainable model variable from a callback on beginning of each epoch. Essentially I would like to have a mechanism similar to the learning rate scheduler (which has built in infrastructure in TF) but applicable to an arbitrary model variable. The code below is a minimum example to show the concept. I am trying to modify the decay variable but it does not work. Apparently the initial value of the variable (1.0) is treated as a constant and folded by the graph and never looked at again as training progresses even though the variable seems to be properly modified (to 0.5) by the callback.
dense1 = tf.keras.layers.Dense(10)
decay = tf.Variable(1.0, trainable=False)
dense2 = tf.keras.layers.Dense(10)
def epoch_callback(epoch):
nonlocal decay
tf.keras.backend.set_value(decay, 0.5)
#decay.assign(0.5)
print(tf.keras.backend.get_value(decay))
input = tf.keras.layers.Input((MAX_LENGTH,))
x = dense1(input)
with tf.control_dependencies([decay]):
x = x * decay
prediction = dense2(x)
model = tf.keras.Model(inputs=[input], outputs=[prediction])
model.compile(optimizer=tf.keras.optimizers.Adam(), loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True))
callbacks = [tf.keras.callbacks.LambdaCallback(on_epoch_begin = lambda epoch, logs: epoch_callback(epoch))]
model.fit(train_ds, epochs=EPOCHS, verbose=1, callbacks=callbacks, validation_data=eval_ds)
#nbro: Here you go. The code below is what worked for me. I use a teacher forcing protocol and the per-epoch decay variable is used to "lower teacher's voice" as training progresses.
class Teacher(tf.keras.layers.Layer):
def __init__(self, embedding, name='teacher', **kwargs):
super().__init__(name=name, **kwargs)
...
def build(self, input_shape):
...
def call(self, inputs, training=None):
x, y, decay = inputs
...
if training:
y = tf.multiply(y, decay)
else:
y = tf.multiply(y, tf.constant(0.0))
...
return x
def get_config(self):
return {}
class MyNet(tf.keras.Model):
def __init__(self, name='mynet', **kwargs):
super().__init__(name=name, **kwargs)
def build(self, input_shape):
...
self.teacher = Teacher()
self.decay = tf.Variable(1.0, trainable=False)
...
def set_decay(self, decay):
self.decay.assign(decay)
#tf.function
def call(self, example, training=None):
x, y = example
...
x = self.teacher((x, y, self.decay))
...
return x
def get_config(self):
return {}
def main():
train_ds = ...
eval_ds = ...
train_ds = train_ds.map(lambda data, label: ((data, label), label), num_parallel_calls=tf.data.experimental.AUTOTUNE)
eval_ds = eval_ds.map(lambda data, label: ((data, label), label), num_parallel_calls=tf.data.experimental.AUTOTUNE)
strategy = tf.distribute.MirroredStrategy()
with strategy.scope():
the_net = MyNet()
inputs = tf.keras.layers.Input((MAX_LENGTH,), dtype='int64', name='inputs')
targets = tf.keras.layers.Input((MAX_LENGTH,), dtype='int64', name='targets')
prediction = the_net((inputs, targets))
model = tf.keras.Model(inputs=[inputs, targets], outputs=[prediction])
model.compile(optimizer=tf.keras.optimizers.Adam(), loss=CosineSimilarity(name='val_loss'))
def _callback_fun(epoch, start = 0, steps = 8):
the_net.set_decay(tf.clip_by_value((start+steps-epoch)/steps, clip_value_min=tf.constant(0.0), clip_value_max=tf.constant(1.0)))
callbacks = [tf.keras.callbacks.LambdaCallback(on_epoch_begin=lambda epoch, logs: _callback_fun(epoch))]
model.fit(train_ds, epochs=EPOCHS, verbose=2, callbacks=callbacks, validation_data=eval_ds)
if __name__ == '__main__':
main()