How to make prediction from train Pytorch and PytorchText model? - python

General speaking, after I have successfully trained a text RNN model with Pytorch, using PytorchText to leverage data loading on an origin source, I would like to test with other data sets (a sort of blink test) that are from different sources but the same text format.
First I defined a class to handle the data loading.
class Dataset(object):
def __init__(self, config):
# init what I need
def load_data(self, df: pd.DataFrame, *args):
# implementation below
# Data format like `(LABEL, TEXT)`
def load_data_but_error(self, df: pd.DataFrame):
# implementation below
# Data format like `(TEXT)`
Here is the detail of load_data which I load data that trained successfully.
TEXT = data.Field(sequential=True, tokenize=tokenizer, lower=True, fix_length=self.config.max_sen_len)
LABEL = data.Field(sequential=False, use_vocab=False)
datafields = [(label_col, LABEL), (data_col, TEXT)]
# split my data to train/test
train_df, test_df = train_test_split(df, test_size=0.33, random_state=random_state)
train_examples = [data.Example.fromlist(i, datafields) for i in train_df.values.tolist()]
train_data = data.Dataset(train_examples, datafields)
# split train to train/val
train_data, val_data = train_data.split(split_ratio=0.8)
# build vocab
TEXT.build_vocab(train_data, vectors=Vectors(w2v_file))
self.word_embeddings = TEXT.vocab.vectors
self.vocab = TEXT.vocab
test_examples = [data.Example.fromlist(i, datafields) for i in test_df.values.tolist()]
test_data = data.Dataset(test_examples, datafields)
self.train_iterator = data.BucketIterator(
(train_data),
batch_size=self.config.batch_size,
sort_key=lambda x: len(x.title),
repeat=False,
shuffle=True)
self.val_iterator, self.test_iterator = data.BucketIterator.splits(
(val_data, test_data),
batch_size=self.config.batch_size,
sort_key=lambda x: len(x.title),
repeat=False,
shuffle=False)
Next is my code (load_data_but_error) to load others source but causing error
TEXT = data.Field(sequential=True, tokenize=tokenizer, lower=True, fix_length=self.config.max_sen_len)
datafields = [('title', TEXT)]
examples = [data.Example.fromlist(i, datafields) for i in df.values.tolist()]
blink_test = data.Dataset(examples, datafields)
self.blink_test = data.BucketIterator(
(blink_test),
batch_size=self.config.batch_size,
sort_key=lambda x: len(x.title),
repeat=False,
shuffle=True)
When I was executing code, I had an error AttributeError: 'Field' object has no attribute 'vocab' which has a question at here but it doesn't like my situation as here I had vocab from load_data and I want to use it for blink tests.
My question is what the correct way to load and feed new data with a trained PyTorch model for testing current model is?

What I need are
to keep TEXT in load_data and reuse in load_data_but_error by assigning to class variables
add train=True to object data.BucketIterator on load_data_but_error function

Not really sure, but considering you have re-defined TEXT, you will have to explicitly create the vocab for your Field TEXT again. This can be done as follows:
TEXT.build_vocab(examples, min_freq = 2)
This particular statement adds the word from your data to the vocab only if it occurs at least two times in your data-set examples, you can change it as per your requirement.
You can read about build_vocab method at https://torchtext.readthedocs.io/en/latest/data.html#torchtext.data.Field.build_vocab.

Related

How to use DistilBERT Huggingface NLP model to perform sentiment analysis on new data?

I am using DistilBERT to do sentiment analysis on my dataset. The dataset contains text and a label for each row which identifies whether the text is a positive or negative movie review (eg: 1 = positive and 0 = negative). Here is the code from the huggingface documentation (https://huggingface.co/transformers/custom_datasets.html?highlight=imdb)
#This dataset can be explored in the Hugging Face model hub (IMDb), and can be alternatively downloaded with the 🤗 Datasets library with load_dataset("imdb").
wget http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz
tar -xf aclImdb_v1.tar.gz
#This data is organized into pos and neg folders with one text file per example. Let’s write a function that can read this in.
from pathlib import Path
def read_imdb_split(split_dir):
split_dir = Path(split_dir)
texts = []
labels = []
for label_dir in ["pos", "neg"]:
for text_file in (split_dir/label_dir).iterdir():
texts.append(text_file.read_text())
labels.append(0 if label_dir is "neg" else 1)
return texts, labels
train_texts, train_labels = read_imdb_split('aclImdb/train')
test_texts, test_labels = read_imdb_split('aclImdb/test')
from sklearn.model_selection import train_test_split
train_texts, val_texts, train_labels, val_labels = train_test_split(train_texts, train_labels, test_size=.2)
from transformers import DistilBertTokenizerFast
tokenizer = DistilBertTokenizerFast.from_pretrained('distilbert-base-uncased')
train_encodings = tokenizer(train_texts, truncation=True, padding=True)
val_encodings = tokenizer(val_texts, truncation=True, padding=True)
test_encodings = tokenizer(test_texts, truncation=True, padding=True)
import torch
class IMDbDataset(torch.utils.data.Dataset):
def __init__(self, encodings, labels):
self.encodings = encodings
self.labels = labels
def __getitem__(self, idx):
item = {key: torch.tensor(val[idx]) for key, val in self.encodings.items()}
item['labels'] = torch.tensor(self.labels[idx])
return item
def __len__(self):
return len(self.labels)
train_dataset = IMDbDataset(train_encodings, train_labels)
val_dataset = IMDbDataset(val_encodings, val_labels)
test_dataset = IMDbDataset(test_encodings, test_labels)
#Now that our datasets our ready, we can fine-tune a model either #with the 🤗 Trainer/TFTrainer or with native PyTorch/TensorFlow. See #training.
#Fine-tuning with Trainer
#The steps above prepared the datasets in the way that the trainer is #expected. Now all we need to do is create a model to fine-tune, #define the TrainingArguments/TFTrainingArguments and instantiate a #Trainer/TFTrainer.
from transformers import DistilBertForSequenceClassification, Trainer, TrainingArguments
training_args = TrainingArguments(
output_dir='./results', # output directory
num_train_epochs=3, # total number of training epochs
per_device_train_batch_size=16, # batch size per device during training
per_device_eval_batch_size=64, # batch size for evaluation
warmup_steps=500, # number of warmup steps for learning rate scheduler
weight_decay=0.01, # strength of weight decay
logging_dir='./logs', # directory for storing logs
logging_steps=10,
)
model = DistilBertForSequenceClassification.from_pretrained("distilbert-base-uncased")
trainer = Trainer(
model=model, # the instantiated 🤗 Transformers model to be trained
args=training_args, # training arguments, defined above
train_dataset=train_dataset, # training dataset
eval_dataset=val_dataset # evaluation dataset
)
trainer.train()
#We can also train with Pytorch/Tensorflow
from torch.utils.data import DataLoader
from transformers import DistilBertForSequenceClassification, AdamW
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
model = DistilBertForSequenceClassification.from_pretrained('distilbert-base-uncased')
model.to(device)
model.train()
train_loader = DataLoader(train_dataset, batch_size=16, shuffle=True)
optim = AdamW(model.parameters(), lr=5e-5)
for epoch in range(3):
for batch in train_loader:
optim.zero_grad()
input_ids = batch['input_ids'].to(device)
attention_mask = batch['attention_mask'].to(device)
labels = batch['labels'].to(device)
outputs = model(input_ids, attention_mask=attention_mask, labels=labels)
loss = outputs[0]
loss.backward()
optim.step()
model.eval()
I want to know test this model on a new piece of data. So, I have a dataframe which contains a piece of text/review for each row, and I want to predict the label. Does anyone know how I would go about doing that? I apologize, I am very new to this and would greatly appreciate any help! I tried taking in text, cleaning it, and then doing
prediction = model.predict(text)
and I got an error saying DistilBERT has no attribute .predict.
If you just want to use the model, you can use the corresponding pipeline:
from transformers import pipeline
classifier = pipeline('sentiment-analysis')
Then you can use it:
classifier("I hate this book")
The code that you've shared from the documentation essentially covers the training and evaluation loop. Beware that your shared code contains two ways of fine-tuning, once with the trainer, which also includes evaluation, and once with native Pytorch/TF, which contains just the training portion and not the evaluation portion.
Here is how the native method can be tweaked to generate predictions on the test set:
# Put model in evaluation mode
model.eval()
# Tracking variables for storing ground truth and predictions
predictions , true_labels = [], []
# Prediction Loop
for batch in test_dataset:
# Unpack the inputs from our dataloader and move to GPU/accelerator
input_ids = batch['input_ids'].to(device)
attention_mask = batch['attention_mask'].to(device)
labels = batch['labels'].to(device)
# Telling the model not to compute or store gradients, saving memory and
# speeding up prediction
with torch.no_grad():
# Forward pass, calculate logit predictions
outputs = model(input_ids, attention_mask=attention_mask,
labels=labels)
logits = outputs[0]
# Move logits and labels to CPU
logits = logits.detach().cpu().numpy()
label_ids = labels.to('cpu').numpy()
# Store predictions and true labels
predictions.append(logits)
true_labels.append(label_ids)
After the execution of this loop, predictions will contain logits, i.e., the probability distribution from the model before any form of normalization.
You can use the following to pick the label with the maximum score from the logits, and produce a classification report
from sklearn.metrics import classification_report, accuracy_score
# Combine the results across all batches.
flat_predictions = np.concatenate(predictions, axis=0)
# For each sample, pick the label (0 or 1) with the higher score.
flat_predictions = np.argmax(flat_predictions, axis=1).flatten()
# Combine the correct labels for each batch into a single list.
flat_true_labels = np.concatenate(true_labels, axis=0)
# Accuracy
print(accuracy_score(flat_true_labels, flat_predictions))
# Classification Report
report = classification_report(flat_true_labels, flat_predictions)
For a more elegant way of performing predictions, you can create a BERTModel Class that would contain different methods and variables for handling the tokenization, creation of dataloader, running the predictions, etc.
You can try code like this example: Link-BERT
You'll arrange the dataset according to the BERT model. D Section in this link, you can just change the model name and your dataset.

Multi task classification CNN

i have a df which is constructed like this, i have 5 columns and i have to perform a multi task or multi output classification
Daytime Environment Filename Weather
day wet 2018-10.png light_fog [Example of one row]
my problem is when i have done the flow from dataframe, i don't know how to use tf.data.dataset in order to build the dataset. Someone suggest me to use TFRecords but i have never used it. How can i do without using it?
train_data_gen = ImageDataGenerator(rescale=1. / 255)
train_gen = train_data_gen.flow_from_dataframe(train_df,
directory=dataset_dir,
x_col="Filename",
y_col=["Daytime", "Weather", "Environment"],
class_mode="multi_output",
target_size=(img_size, img_size),
batch_size=batch_size,
shuffle=True,
seed=SEED)
According to the tutorial (https://www.tensorflow.org/tutorials/load_data/pandas_dataframe#with_tfdata), you should be able to do something similar:
filenames = df.pop('Filename')
feature_names = ["Daytime", "Weather", "Environment"]
features = df[numeric_feature_names]
shuffle_buffer_size = len(df.index) # Or whatever shuffle size you want to use
ds = tf.data.Dataset.from_tensor_slices((filenames, features))
ds = ds.shuffle(buffer_size=shuffle_buffer_size, reshuffle_each_iteration=True)
# Performs the function for each pair of a filename and its corresponding features
ds = ds.map(prepare_image)
dataset = ds.batch(batch_size)
# Prefetch is just for optimization
dataset = ds.prefetch(buffer_size=1)
def prepare_image(filepath, features):
# You can do whatever transformations in here.
file_content = tf.io.read_file(image_file_path)
image = tf.io.decode_png(file_content)
return image, features
Note that this should just give you the idea. I did not test it, so it may throw an error or two.

Converting two Numpy data sets into a particularr PyTorch data set

I want to play around with a neural network that recognizes handwritten numbers. I found some of these on the web which use PyTorch, however they seem to download the data from the MNIST website in a particular format. My data is, however, available as follows:
with np.load('prediction-challenge-01-data.npz') as fh:
data_x = fh['data_x']
data_y = fh['data_y']
Where data_x is the training data and data_y are the labels of the pictures. I want these data sets to be in the same format as trainloader as shown below:
trainset = datasets.MNIST('/data/mnist', download=True, train=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True)
Where trainloader already has the training set data_x and labels data_y together in one set.
Is there any way to do this?
Edit: Shapes of data_x and data_y:
In [1]: data_x.shape
Out[2]: (20000, 1, 28, 28)
In [5]: data_y.shape
Out[7]: (20000,)
You can easily create your own dataset. Just inherit from torch.utils.data.Dataset and implement
__getitem__ at the very least:
Here is a quick and dirty example to get you going:
class YourOwnDataset(torch.utils.data.Dataset):
def __init__(self, input_file_path, transformations) :
super().__init__()
self.path = input_file_path
self.transforms = transformations
with np.load(self.path) as fh:
# I assume fh['data_x'] is a list you get the idea
self.data = fh['data_x']
self.labels = fh['data_y']
# in getitem, we retrieve one item based on the input index
def __getitem__(self, index):
data = self.data[index]
# based on the loss you chose and what you have in mind,
# you can transform you label, here I assume they are
# integer numbers (like, 1, 3, etc as labels used for classification)
label = self.labels[index]
img = convert/reshape your data into img
img = self.transforms(img)
return img, labels
def __len__(self):
return len(self.data)
and you can create your dataset like :
from torchvision import transforms
# add any number of transformations you like, I just added ToTensor()
transformations = transforms.Compose([transforms.ToTensor()])
trainset = YourOwnDataset('prediction-challenge-01-data.npz', transformations )
trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True)

Preprocess huge data with a custom data generator function for keras

Actually I'm building a keras model and I have a dataset in the msg format with over 10 million instances with 40 features which are all categorical. For the moment i'm using just a sample of it since reading all the dataset and encoding it is impossbile to fit into the memory. Here a part of the code i'm using:
import pandas as pd
from category_encoders import BinaryEncoder as be
from sklearn.preprocessing import StandardScaler
def model():
model = Sequential()
model.add(Dense(120, input_dim=233, kernel_initializer='uniform', activation='selu'))
model.add(Dense(12, kernel_initializer='uniform', activation='sigmoid'))
model.compile(SGD(lr=0.008),loss='mean_squared_error', metrics=['accuracy'])
return model
def addrDataLoading():
data=pd.read_msgpack('datum.msg')
data=data.dropna(subset=['s_address','d_address'])
data=data.sample(300000) # taking a sample of all the dataset to make the encoding possible
y=data[['s_address','d_address']]
x=data.drop(['s_address','d_address'],1)
encX = be().fit(x, y)
numeric_X= encX.transform(x)
encY=be().fit(y,y)
numeric_Y=encY.transform(y)
scaler=StandardScaler()
X_all=scaler.fit_transform(numeric_X)
x_train=X_all[0:250000,:]
y_train=numeric_Y.iloc[0:250000,:]
x_val=X_all[250000:,:]
y_val=numeric_Y.iloc[250000:,:]
return x_train,y_train,x_val,y_val
x_train,y_train,x_val,y_val=addrDataLoading()
model.fit(x_train, y_train,validation_data=(x_val,y_val),nb_epoch=20, batch_size=200)
So my question is how to use a custom data generator function to read and process all the data I have and not just a sample, and then use fit_generator() function to train my model?
EDIT
This is a sample of the data:
netData
I think that taking different samples from the data results in different encoding dimensions.
For this sample there's 16 different categories: 4 addresses (3 bit), 4 hostnames (3 bit ), 1 subnetmask (1 bit), 5 infrastructures (3 bit ), 1 accesszone(1 bit ), so the binary encoding will give us 11 bit and the new dimension of the data is 11 previously 5. So let's say for another sample in the address column we have 8 different categories this will give 4 bit in binary and we let the same number of categories in the other columns so the overall encoding will result in 12 dimensions. I believe that what's causing the problem.
Slightly slow solution (repeating the same actions)
Edit - fit BinatyEncoder before create generators
Drop NA first and work with clean data further to avoid reassignments of data frame.
data = pd.read_msgpack('datum.msg')
data.dropna(subset=['s_address','d_address']).to_msgpack('datum_clean.msg')
In this solution data_generator can process same data multiple times. If it's not critical, you can use this solution.
Define function which reads the data snd splits index to train and test. It won't consume a lot of memory.
import pandas as pd
from category_encoders import BinaryEncoder as be
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
import numpy as np
def model():
#some code defining the model
def train_test_index_split():
# if there's enough memory to add one more column
data = pd.read_msgpack('datum_cleaned.msg')
train_idx, test_idx = train_test_split(data.index)
return data, train_idx, test_idx
data, train_idx, test_idx = train_test_index_split()
Define and initialize data generator, both for train and validation
def data_generator(data, encX, encY, bathc_size, n_steps, index):
# EDIT: As the data was cleaned, you don't need dropna
# data = data.dropna(subset=['s_address','d_address'])
for i in range(n_steps):
batch_idx = np.random.choice(index, batch_size)
sample = data.loc[batch_idx]
y = sample[['s_address', 'd_address']]
x = sample.drop(['s_address', 'd_address'], 1)
numeric_X = encX.transform(x)
numeric_Y = encY.transform(y)
scaler = StandardScaler()
X_all = scaler.fit_transform(numeric_X)
yield X_all, numeric_Y
Edited part now train binary encoders. You should sub-sample your data to create representative training set for encoders. I guess error with the shape of the data was caused by incorrecly trained BinaryEncoder (Error when checking input: expected dense_9_input to have shape (233,) but got array with shape (234,)):
def get_minimal_unique_frame(df):
return (pd.Series([df[column].unique() for column in df], index=df.columns)
.apply(pd.Series) # tranform list on unique values to pd.Series
.T # transope frame: columns is columns again
.fillna(method='ffill')) # fill NaNs with last value
x = get_minimal_unique_frame(data.drop(['s_address', 'd_address'], 1))
y = get_minimal_unique_frame(data[['s_address', 'd_address']])
NB: I never used category_encoders and have incompatible system configuration, so can't install and check it. So, former code can evoke problems. In that case, I guess, you should compare length of x and y data frames and make it the same, and probaly change an index of data frames.
encX = be().fit(x, y)
encY = be().fit(y, y)
batch_size = 200
train_steps = 100000
val_steps = 5000
train_gen = data_generator(data, encX, encY, batch_size, train_steps, train_idx)
test_gen = data_generator(data, encX, encY, batch_size, test_steps, test_idx)
Edit Please provide an exapmple of x_sample, run train_gen and save output, and post x_samples, y_smaples:
x_samples = []
y_samples = []
for i in range(10):
x_sample, y_sample = next(train_gen)
x_samples.append(x_sample)
y_samples.append(y_sample)
Note: data generator won't stop itself. But itt will be stopped after train_steps by fit_generator method.
Fit model with generators:
model.fit_generator(generator=train_gen, steps_per_epoch=train_steps, epochs=1,
validation_data=test_gen, validation_steps=val_steps)
As far as I know, python does not copy pandas dataframes if you won't do it explicitply with copy() or so. Because of it, both generators use the same object. But if you use Jupyter Notebook, data leaks/uncollected carbage may occur, and a memory troubles comes with them.
More efficient solution - scketch
Clean your data
data = pd.read_msgpack('datum.msg')
data.dropna(subset=['s_address','d_address']).to_msgpack('datum_clean.msg')
Create train/test split, preprocess it and store as numpy array, if you have enough disk space.
data, train_idx, test_idx = train_test_index_split()
def data_preprocessor(data, path, index):
# data = data.dropna(subset=['s_address','d_address'])
sample = data.loc[batch_idx]
y = sample[['s_address', 'd_address']]
x = sample.drop(['s_address', 'd_address'], 1)
encX = be().fit(x, y)
numeric_X = encX.transform(x)
encY = be().fit(y, y)
numeric_Y = encY.transform(y)
scaler = StandardScaler()
X_all = scaler.fit_transform(numeric_X)
np.save(path + '_X', X_all)
np.save(path + '_y', numeric_Y)
data_preprocessor(data, 'train', train_idx)
data_preprocessor(data, 'test', test_idx)
Delete unnecessary data:
del data
Load your files and use following generator:
train_X = np.load('train_X.npy')
train_y = np.load('train_y.npy')
test_X = np.load('test_X.npy')
test_y = np.load('test_y.npy')
def data_generator(X, y, batch_size, n_steps):
idxs = np.arange(len(X))
np.random.shuffle(idxs)
ptr = 0
for _ in range(n_steps):
batch_idx = idxs[ptr:ptr+batch_size]
x_sample = X[batch_idx]
y_sample = y[batch_idx]
ptr += batch_size
if ptr > len(X):
ptr = 0
yield x_sapmple, y_sample
Prepare generators:
train_gen = data_generator(train_X, train_y, batch_size, train_steps)
test_gen = data_generator(test_X, test_y, batch_size, test_steps)
And fit the model finaly. Hope one of this solutions will help. At least if python does pass arrays and data frames buy reference, not by value. Stackoverflow answer about it.

How to use the iterator 'make_initializable_iterator' of tensorflow within a 'input_fn'?

I want to train my mode with a tf.estimator.Estimator and load my data by Dataset API.Because my data,for example 'mnist', is a array(tensor),so I try to load it with 'tf.data.Dataset.from_tensor_slices'.But I don't how to initialize 'make_initializable_iterator' within a 'input_fn'.
If I can use 'make_one_shot_iterator' to train successfully, but it load slowly before training. And 《Higher-Level APIs in TensorFlow》is a good example to 'make_initializable_iterator' within a 'input_fn',but it needs to return a 'iterator_initializer_hook' to other function from 'input_fn' . I want to know is there any other better or more elegant way?
def input_fn():
mnist_data = input_data.read_data_sets('mnist_data', one_hot=False)
images = mnist_data.train.images.reshape([-1, 28, 28, 1])
labels = np.asarray(mnist_data.train.labels, dtype=np.int64)
# Build dataset iterator
dataset = tf.data.Dataset.from_tensor_slices((images, labels))
dataset = dataset.repeat(None) # Infinite iterations
dataset = dataset.shuffle(buffer_size=10000)
dataset = dataset.batch(100)
iterator = dataset.make_one_shot_iterator()
next_example = iterator.get_next()
# Set runhook to initialize iterator
return next_example
In TensorFlow version 1.5 and later, the tf.estimator.Estimator will automatically create and initialize an initializable iterator when you return a tf.data.Dataset from your input_fn. This enables you to write the following code, without having to worry about initialization or hooks:
def input_fn():
mnist_data = input_data.read_data_sets('mnist_data', one_hot=False)
images = mnist_data.train.images.reshape([-1, 28, 28, 1])
labels = np.asarray(mnist_data.train.labels, dtype=np.int64)
# Build dataset.
dataset = tf.data.Dataset.from_tensor_slices((images, labels))
dataset = dataset.repeat(None) # Infinite iterations
dataset = dataset.shuffle(buffer_size=10000)
dataset = dataset.batch(100)
return dataset
Inside your code, add this:
self.hooks.append(utils_hooks.DatasetHook(iter))
In the run_loop.py, before the call into your fn, add this
for hook in dataset_hooks:
sess.run(hook.iterator().initializer)
Then, it should be fine.

Categories

Resources