Restoring TF Eager model without using training code - python

I am training (and saving) a very simple model in eager mode as follows:
import os
import tensorflow as tf
import tensorflow.contrib.eager as tfe
tf.enable_eager_execution()
NUM_EXAMPLES = 2000
training_inputs = tf.random_normal([NUM_EXAMPLES])
noise = tf.random_normal([NUM_EXAMPLES])
outputs = training_inputs * 3 + 2 + noise
class Model(tf.keras.Model):
def __init__(self):
super(Model, self).__init__()
self.W = tfe.Variable(5., name="weight")
self.b = tfe.Variable(0., name="bias")
def predict(self, input):
return self.W * input + self.b
def loss(model, inputs, outputs):
error = model.predict(inputs) - outputs
return tf.reduce_mean(tf.square(error))
def grad(model, inputs, outputs):
with tf.GradientTape() as tape:
loss_value = loss(model, inputs, outputs)
return tape.gradient(loss_value, [model.W, model.b])
if __name__ == "__main__":
model = Model()
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01)
for i in range(300):
gradients = grad(model, training_inputs, outputs)
optimizer.apply_gradients(zip(gradients, [model.W, model.b]),
global_step=tf.train.get_or_create_global_step())
checkpoint_dir = './checkpoints'
checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt")
root = tfe.Checkpoint(optimizer=optimizer,
model=model,
optimizer_step=tf.train.get_or_create_global_step())
root.save(file_prefix=checkpoint_prefix)
The only ways I found to save/restore (with Checkpoint or Saver) imply having access to the Model class to load it elsewhere, for instance:
model = Model()
checkpointer = tfe.Checkpoint(model=model)
checkpointer.restore(tf.train.latest_checkpoint('checkpoints/'))
print(model.predict(7))
The save method from tf.keras.Model doesn't seem to be implemented yet for Eager mode:
model.save("keras_model")
>>> NotImplementedError
Is there another way to save and load the model without having to instantiate a new Model object?

Related

PyTorch-Lightning error on trainer.predict()

I'm using an example for training a model on MNIST dataset from pytorch-lightning's documentation (see here), to which I tried to add a prediction step. However, when performing trainer.predict(model) I get an error:
AttributeError: 'list' object has no attribute 'flatten'
I followed the instructions and examples I found online (adding the functions predict_step, predict_dataloader and adding a stage under setup function) and it looks pretty simple - however it doesn't work.
Here's the code I'm running:
import os
import torch
from pytorch_lightning import LightningModule, Trainer
from pytorch_lightning.callbacks.progress import TQDMProgressBar
from pytorch_lightning.loggers import CSVLogger
from torch import nn
from torch.nn import functional as F
from torch.utils.data import DataLoader, random_split
from torchmetrics import Accuracy
from torchvision import transforms
from torchvision.datasets import MNIST
PATH_DATASETS = os.environ.get("PATH_DATASETS", ".")
BATCH_SIZE = 256 if torch.cuda.is_available() else 64
class LitMNIST(LightningModule):
def __init__(self, data_dir=PATH_DATASETS, hidden_size=64, learning_rate=2e-4):
super().__init__()
# Set our init args as class attributes
self.data_dir = data_dir
self.hidden_size = hidden_size
self.learning_rate = learning_rate
# Hardcode some dataset specific attributes
self.num_classes = 10
self.dims = (1, 28, 28)
channels, width, height = self.dims
self.transform = transforms.Compose(
[
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,)),
]
)
# Define PyTorch model
self.model = nn.Sequential(
nn.Flatten(),
nn.Linear(channels * width * height, hidden_size),
nn.ReLU(),
nn.Dropout(p=0.9),
nn.Linear(hidden_size, hidden_size),
nn.ReLU(),
nn.Dropout(0.1),
nn.Linear(hidden_size, self.num_classes),
)
self.val_accuracy = Accuracy(task='multiclass', num_classes=10) # I fixed this since the code from the tutorial didn't work
self.test_accuracy = Accuracy(task='multiclass', num_classes=10) # I fixed this since the code from the tutorial didn't work
def forward(self, x):
x = self.model(x)
return F.log_softmax(x, dim=1)
def training_step(self, batch, batch_idx):
x, y = batch
logits = self(x)
loss = F.nll_loss(logits, y)
return loss
def validation_step(self, batch, batch_idx):
x, y = batch
logits = self(x)
loss = F.nll_loss(logits, y)
preds = torch.argmax(logits, dim=1)
self.val_accuracy.update(preds, y)
# Calling self.log will surface up scalars for you in TensorBoard
self.log("val_loss", loss, prog_bar=True)
self.log("val_acc", self.val_accuracy, prog_bar=True)
def test_step(self, batch, batch_idx):
x, y = batch
logits = self(x)
loss = F.nll_loss(logits, y)
preds = torch.argmax(logits, dim=1)
self.test_accuracy.update(preds, y)
# Calling self.log will surface up scalars for you in TensorBoard
self.log("test_loss", loss, prog_bar=True)
self.log("test_acc", self.test_accuracy, prog_bar=True)
def predict_step(self, batch, batch_idx, dataloader_idx=0):
return self(batch)
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), lr=self.learning_rate)
return optimizer
####################
# DATA RELATED HOOKS
####################
def prepare_data(self):
# download
MNIST(self.data_dir, train=True, download=True)
MNIST(self.data_dir, train=False, download=True)
def setup(self, stage=None):
# Assign train/val datasets for use in dataloaders
if stage == "fit" or stage is None:
mnist_full = MNIST(self.data_dir, train=True, transform=self.transform)
self.mnist_train, self.mnist_val = random_split(mnist_full, [55000, 5000])
# Assign test dataset for use in dataloader(s)
if stage == "test" or stage is None:
self.mnist_test = MNIST(self.data_dir, train=False, transform=self.transform)
if stage == "predict":
self.mnist_predict = MNIST(self.data_dir, train=False, transform=self.transform)
def train_dataloader(self):
return DataLoader(self.mnist_train, batch_size=BATCH_SIZE)
def val_dataloader(self):
return DataLoader(self.mnist_val, batch_size=BATCH_SIZE)
def test_dataloader(self):
return DataLoader(self.mnist_test, batch_size=BATCH_SIZE)
def predict_dataloader(self):
return DataLoader(self.mnist_predict, batch_size=BATCH_SIZE)
model = LitMNIST(hidden_size=2)
trainer = Trainer(
accelerator="auto",
devices=1 if torch.cuda.is_available() else None, # limiting got iPython runs
max_epochs=3,
callbacks=[TQDMProgressBar(refresh_rate=20)],
logger=CSVLogger(save_dir="logs/"),
)
trainer.fit(model)
predict = trainer.predict(model)
What is the problem?
You forgot to separate intput and label in the batch:
def predict_step(self, batch, batch_idx, dataloader_idx=0):
x, y = batch
return self(x)

pytorch_lightning.utilities.exceptions.MisconfigurationException when training in pytorch lightning

I am training a sample model with dummy data then i got this error. I have gave everything properly but still i am getting this error: No `configure_optimizers()` method defined. Lightning `Trainer` expects as minimum a `training_step()`, `train_dataloader()` and `configure_optimizers()` to be defined. when i start training. Is the problem because the way i feed the dummy data into network or is their any other reason.
import torch
from torch import nn, optim
import pytorch_lightning as pl
from torch.utils.data import DataLoader
class ImageClassifier(pl.LightningModule):
def __init__(self, learning_rate=0.001):
super().__init__()
self.learning_rate = learning_rate
self.conv_layer1 = nn.Conv2d(in_channels=3, out_channels=3, kernel_size=3, stride=1, padding=1)
def forward(self,x):
output = self.conv_layer1(x)
print(output.shape)
return output
def training_step(self,batch, batch_idx):
inputs, targets = batch
output = self(inputs)
accuracy = self.binary_accuracy(output, targets)
loss = self.loss(output, targets)
self.log('train_accuracy', accuracy, prog_bar=True)
self.log('train_loss', loss)
return {'loss':loss,"training_accuracy": accuracy}
def test_step(self, batch, batch_idx):
inputs, targets = batch
outputs = self.inputs(inputs)
accuracy = self.binary_accuracy(outputs, targets)
loss = self.loss(outputs, targets)
self.log('test_accuracy', accuracy)
return {"test_loss":loss, "test_accuracy":accuracy}
def configure_optimizer(self):
params = self.parameters()
optimizer = optim.Adam(params=params, lr=self.learning_rate)
return optimizer
def binary_accuracy(self, outputs, inputs):
_, outputs = torch.max(outputs,1)
correct_results_sum = (outputs == targets).sum().float()
acc = correct_results_sum/targets.shape[0]
return acc
model = ImageClassifier()
Input = DataLoader(torch.randn(1,3,28,28))
trainer = pl.Trainer(max_epochs=10, progress_bar_refresh_rate=1)
trainer.fit(model, train_dataloader = Input)
In your code, the method name is configure_optimizer(). Therefore, no configure_optimizers() method defined. Seems like an error in name of the function.
I have a same problem, then I realize that a wrong method name could lead to the error. just make sure you type medthods name or import package and use it appropriately.

tensorflow autodiff slower than pytorch's counterpart

I am using tensorflow 2.0 and trying to evaluate gradients for backpropagating to a simple feedforward neural network. Here's how my model looks like:
def __init__(self, input_size, output_size):
inputs = tf.keras.Input(shape=(input_size,))
hidden_layer1 = tf.keras.layers.Dense(30, activation='relu')(inputs)
outputs = tf.keras.layers.Dense(output_size)(hidden_layer1)
self.model = tf.keras.Model(inputs=inputs, outputs=outputs)
self.optimizer = tf.keras.optimizers.Adam(learning_rate=0.001)
self.loss_function = tf.keras.losses.Huber()
The forward pass to this network is fine but when I use gradient tape to train the model, it is at least 10x slower than PyTorch.
Training function:
def learn_modified_x(self, inputs, targets, actions):
with tf.GradientTape() as tape:
predictions = self.model(inputs)
predictions_for_action = gather_single_along_axis(predictions, actions)
loss = self.loss_function(targets, predictions_for_action)
grads = tape.gradient(loss, self.model.trainable_weights)
self.optimizer.apply_gradients(zip(grads, self.model.trainable_weights))
I tried commenting lines to find what is actually causing the problem. I discovered that tape.gradient is a significant contributor to this situation.
Any idea?
PyTorch implementation
def __init__(self, input_size, nb_action):
super(Network, self).__init__()
self.input_size = input_size
self.nb_action = nb_action
self.fc1 = nn.Linear(input_size, 30)
self.fc2 = nn.Linear(30, nb_action)
def forward(self, state):
x = F.relu(self.fc1(state))
q_values = self.fc2(x)
return q_values
def learn(self, batch_state, batch_next_state, batch_reward, batch_action):
outputs = self.model(batch_state).gather(1, batch_action.unsqueeze(1)).squeeze(1)
next_outputs = self.model(batch_next_state).detach().max(1)[0]
target = self.gamma*next_outputs + batch_reward
td_loss = F.smooth_l1_loss(outputs, target)
self.optimizer.zero_grad()
td_loss.backward(retain_variables = True)
self.optimizer.step()
def __init__(self,...):
...
self.model.call = tf.function(self.model.call)
...
you need use tf.function to wrap your model's call function.

pytorch, Using nn.DataParallel in LSTM

/pytorch/aten/src/ATen/native/cudnn/RNN.cpp:1266: UserWarning: RNN module weights are not part of single contiguous chunk of memory.
This means they need to be compacted at every call, possibly greatly increasing memory usage. To compact weights again call flatten_parameters().
Hello. I am using pytorch.
I am trying to use DataParallel function in pytorch,
but the model is LSTM. I'm warned to flatten the model again,
but I don't know when and where to flatten.
Can you let me know?
This is my model
import torch.nn as nn
from torchvision import models
class ConvLstm(nn.Module):
def __init__(self, latent_dim, model, hidden_size, lstm_layers, bidirectional, n_class):
super(ConvLstm, self).__init__()
self.conv_model = Pretrained_conv(latent_dim, model)
self.Lstm = Lstm(latent_dim, hidden_size, lstm_layers, bidirectional)
self.output_layer = nn.Sequential(
nn.Linear(2 * hidden_size if bidirectional ==
True else hidden_size, n_class),
nn.Softmax(dim=-1)
)
def forward(self, x):
batch_size, timesteps, channel_x, h_x, w_x = x.shape
conv_input = x.view(batch_size * timesteps, channel_x, h_x, w_x)
conv_output = self.conv_model(conv_input)
lstm_input = conv_output.view(batch_size, timesteps, -1)
lstm_output = self.Lstm(lstm_input)
lstm_output = lstm_output[:, -1, :]
output = self.output_layer(lstm_output)
return output
class Pretrained_conv(nn.Module):
def __init__(self, latent_dim, model):
if model == 'resnet152':
super(Pretrained_conv, self).__init__()
self.conv_model = models.resnet152(pretrained=True)
# ====== freezing all of the layers ======
for param in self.conv_model.parameters():
param.requires_grad = False
# ====== changing the last FC layer to an output with the size we need. this layer is un freezed ======
self.conv_model.fc = nn.Linear(
self.conv_model.fc.in_features, latent_dim)
def forward(self, x):
return self.conv_model(x)
class Lstm(nn.Module):
def __init__(self, latent_dim, hidden_size, lstm_layers, bidirectional):
super(Lstm, self).__init__()
self.Lstm = nn.LSTM(latent_dim, hidden_size=hidden_size,
num_layers=lstm_layers, batch_first=True, bidirectional=bidirectional)
self.hidden_state = None
def reset_hidden_state(self):
self.hidden_state = None
def forward(self, x):
output, self.hidden_state = self.Lstm(x, self.hidden_state)
return output
Enter LSTM and execute the following code.
def foward_step(model, images, labels, criterion, mode=''):
model.module.Lstm.reset_hidden_state()
if mode == 'test':
with torch.no_grad():
output = model(images)
else:
output = model(images)
loss = criterion(output, labels)
# Accuracy calculation
predicted_labels = output.detach().argmax(dim=1)
acc = (predicted_labels == labels).cpu().numpy().sum()
return loss, acc, predicted_labels.cpu()
This is main
model = nn.DataParallel(model, device_ids=[0,1,2,3]).cuda()

Saving/Loading PyTorch Models Wrapped in sklearn-compatible Estimators

I have written an Autoencoder using PyTorch and I have rolled it into a custom sklearn BaseEstimator. I normally train the estimator on a machine with a GPU and save them for later evaluation using pickle. If I try to load an estimator on a machine where the model was stored on the GPU, I get the following error:
RuntimeError: Attempting to deserialize object on a CUDA device but torch.cuda.is_available() is False. If you are running on a CPU-only machine, please use torch.load with map_location='cpu' to map your storages to the CPU.
Is there a way to force the PyTorch model to be moved to the CPU before pickling the estimator without an explicit call?
Is there a way to unpickle an estimator that was saved while the model was on the GPU?
The following is an example of my PyTorch model and sklearn comptatable estimator, along with an example of how I am trying to save and load my models.
PyTorch Model
import torch.nn as nn
class _AutoEncoder(nn.Module):
def __init__(self, input_dim, output_dim, encoder_dim=4):
super(_AutoEncoder, self).__init__()
hidden_dim = int( (input_dim+encoder_dim)/2 )
layers = []
layers.append( nn.Linear(input_dim, hidden_dim) )
layers.append( nn.Linear(hidden_dim, encoder_dim) )
self.encoder = nn.Sequential(*layers)
layers = []
layers.append( nn.Linear(encoder_dim, hidden_dim) )
layers.append( nn.Linear(hidden_dim, output_dim) )
self.decoder = nn.Sequential(*layers)
def forward(self, X):
return self.decoder( self.encoder( X ) )
sklearn Compatible Estimator
import warnings
import inspect
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn import utils as sk_utils
import torch
import torch.nn as nn
import torch.optim as optim
import torch.utils.data
class AutoEncoder(BaseEstimator, TransformerMixin):
def __init__(
self,
encoder_dim=4,
n_epochs=200,
batch_size=None,
shuffle=True,
use_cuda=False
):
super(AutoEncoder, self).__init__()
args, _, _, values = inspect.getargvalues(inspect.currentframe())
values.pop("self")
for arg, val in values.items():
setattr(self, arg, val)
if use_cuda:
if torch.cuda.is_available():
self.device = torch.device("cuda")
else:
self.device = torch.device("cpu")
warnings.warn("cuda not avaliable", UserWarning)
else:
self.device = torch.device("cpu")
def fit(self, X, y=None):
# X, y = sk_utils.check_X_y(X, y, ensure_2d=False, allow_nd=True)
self._model = self._train_classifier(X, y)
return self
def transform(self, X):
sk_utils.validation.check_is_fitted(self, ['_model'])
X = sk_utils.check_array(X)
X = torch.from_numpy(X.astype(np.float32)).to(self.device)
with torch.no_grad():
self._model.eval()
output = self._model.forward( X )
return output.cpu().numpy()
def encode(self, X):
sk_utils.validation.check_is_fitted(self, ['_model'])
X = sk_utils.check_array(X)
X = torch.from_numpy(X.astype(np.float32)).to(self.device)
with torch.no_grad():
self._model.eval()
output = self._model.encoder( X )
return output.cpu().numpy()
def decode(self, X):
sk_utils.validation.check_is_fitted(self, ['_model'])
X = sk_utils.check_array(X)
X = torch.from_numpy(X.astype(np.float32)).to(self.device)
with torch.no_grad():
self._model.eval()
output = self._model.decoder( X )
return output.cpu().numpy()
def _train_classifier(self, x_train, y_train):
x_train = torch.from_numpy(x_train.astype(np.float32)).to(self.device)
y_train = torch.from_numpy(y_train.astype(np.float32)).to(self.device)
input_dim = x_train.shape[-1]
output_dim = y_train.shape[-1]
model = _AutoEncoder(input_dim, output_dim, encoder_dim=self.encoder_dim).to(self.device)
loss_function = nn.MSELoss()
optimizer = optim.Adam(model.parameters())
print model
if self.batch_size is None:
return self._batch_train_simple_classifier(x_train, y_train, model, loss_function, optimizer)
else:
return self._minibatch_train_simple_classifier(x_train, y_train, model, loss_function, optimizer)
def _batch_train_simple_classifier(self, x_train, y_train, model, loss_function, optimizer):
for epoch in range(1, self.n_epochs+1):
model.train()
optimizer.zero_grad()
outputs = model.forward(x_train)
loss = loss_function(outputs, y_train)
loss.backward()
optimizer.step()
if epoch % 10 == 0 or epoch == self.n_epochs:
message = "Train Epoch: {:5d}, Loss: {:15.6f}".format(
epoch,
loss.item()
)
print message
return model
def _minibatch_train_simple_classifier(self, x_train, y_train, model, loss_function, optimizer):
train_data = torch.utils.data.TensorDataset(x_train, y_train)
train_loader = torch.utils.data.DataLoader(train_data, batch_size=self.batch_size, shuffle=self.shuffle)
for epoch in range(1, self.n_epochs+1):
for data, target in train_loader:
model.train()
optimizer.zero_grad()
outputs = model.forward(data)
loss = loss_function(outputs, target)
loss.backward()
optimizer.step()
if epoch % 10 == 0 or epoch == self.n_epochs:
model.eval()
outputs = model.forward(x_train)
loss = loss_function(outputs, y_train)
message = "Train Epoch: {:5d}, Loss: {:15.6f}".format(
epoch,
loss.item()
)
print message
return model
Training
This is normally done on a machine with a GPU.
from sklearn import datasets as sk_datasets
digits = sk_datasets.load_digits(n_class=10, return_X_y=False)
data = digits.data
ae = AutoEncoder(
encoder_dim=2,
n_epochs=100,
batch_size=128,
shuffle=True,
use_cuda=True
)
data_fitted = ae.fit_transform(data, data)
Saving the Estimator
I'd like to find a way to have the PyTorch model moded to the CPU before it is saved without having an explicit call. Maybe a function as part of the AutoEncoder class that gets called as it is pickled?
with open("autoencoder.pkl", "wb") as fp:
# ae._model needs to be moved to the CPU here.
# I don't want to have to call ae._model.cpu() explicitly
pickle.dump(ae, fp)
Loading
I can not figure out how to load the estimator on a machine without a GPU if it was saved while the PyTorch model was still on the GPU.
# This gives an error if the model was saved while on the GPU,
# and a GPU is not avaiable when loading.
with open("autoencoder.pkl", "rb") as fp:
model = pickle.load(fp)
# This also a similar error. I also would not expect this to
# work since the pickle file contains an sklearn estimator
# wrapping a PyTorch model.
with open("autoencoder.pkl", "rb") as fp:
touch.load(fp, map_location="cpu")

Categories

Resources