I'm using pytorch lightining and I have this error but I'm non really understanding what is the problem. I create a Deep Learning pipeline to run with hyperparameters searching and I think that the problem is in.
I omitted some part of the code because I think they are irrelevant for this issue (due to stackoverflow restrictions). Thanks for the help!
class ProtBertBFDClassifier(pl.LightningModule):
def __init__(self,hparams) -> None:
super(ProtBertBFDClassifier, self).__init__()
self.hparams = hparams
self.batch_size = self.hparams.batch_size
self.model_name = pretrained_model_name
self.dataset = Loc_dataset()
self.metric_acc = Accuracy()
# build model
self.__build_model()
# Loss criterion initialization.
self.__build_loss()
if self.hparams.nr_frozen_epochs > 0:
self.freeze_encoder()
else:
self._frozen = False
self.nr_frozen_epochs = self.hparams.nr_frozen_epochs
def __build_model(self) -> None:
""" Init BERT model + tokenizer + classification head."""
self.ProtBertBFD = BertModel.from_pretrained(self.model_name,gradient_checkpointing=self.hparams.gradient_checkpointing)
self.encoder_features = 1024
# Tokenizer
self.tokenizer = BertTokenizer.from_pretrained(self.model_name, do_lower_case=False)
# Label Encoder
self.label_encoder = LabelEncoder(
self.hparams.label_set.split(","), reserved_labels=[]
)
self.label_encoder.unknown_index = None
# Classification head
self.classification_head = nn.Sequential(
nn.Linear(self.encoder_features*4, self.label_encoder.vocab_size),
nn.Tanh(),
)
.....
def predict(self, sample: dict) -> dict:
""" Predict function.
:param sample: dictionary with the text we want to classify.
Returns:
Dictionary with the input text and the predicted label.
"""
......
def pool_strategy(self, features,
pool_cls=True, pool_max=True, pool_mean=True,
pool_mean_sqrt=True):
token_embeddings = features['token_embeddings']
cls_token = features['cls_token_embeddings']
attention_mask = features['attention_mask']
## Pooling strategy
output_vectors = []
if pool_cls:
output_vectors.append(cls_token)
if pool_max:
input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
token_embeddings[input_mask_expanded == 0] = -1e9 # Set padding tokens to large negative value
max_over_time = torch.max(token_embeddings, 1)[0]
output_vectors.append(max_over_time)
if pool_mean or pool_mean_sqrt:
input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
sum_embeddings = torch.sum(token_embeddings * input_mask_expanded, 1)
#If tokens are weighted (by WordWeights layer), feature 'token_weights_sum' will be present
if 'token_weights_sum' in features:
sum_mask = features['token_weights_sum'].unsqueeze(-1).expand(sum_embeddings.size())
else:
sum_mask = input_mask_expanded.sum(1)
sum_mask = torch.clamp(sum_mask, min=1e-9)
if pool_mean:
output_vectors.append(sum_embeddings / sum_mask)
if pool_mean_sqrt:
output_vectors.append(sum_embeddings / torch.sqrt(sum_mask))
output_vector = torch.cat(output_vectors, 1)
return output_vector
........
inputs = self.tokenizer.batch_encode_plus(sample["seq"],
add_special_tokens=True,
padding=True,
truncation=True,
max_length=self.hparams.max_length)
if not prepare_target:
return inputs, {}
# Prepare target:
try:
targets = {"labels": self.label_encoder.batch_encode(sample["label"])}
return inputs, targets
except RuntimeError:
print(sample["label"])
raise Exception("Label encoder found an unknown label.")
......
def validation_step(self, batch: tuple, batch_nb: int, *args, **kwargs) -> dict:
""" Similar to the training step but with the model in eval mode.
Returns:
- dictionary passed to the validation_end function.
"""
inputs, targets = batch
model_out = self.forward(**inputs)
loss_val = self.loss(model_out, targets)
y = targets["labels"]
y_hat = model_out["logits"]
labels_hat = torch.argmax(y_hat, dim=1)
val_acc = self.metric_acc(labels_hat, y)
output = OrderedDict({"val_loss": loss_val, "val_acc": val_acc,})
return output
def validation_epoch_end(self, outputs: list) -> dict:
""" Function that takes as input a list of dictionaries returned by the validation_step
function and measures the model performance accross the entire validation set.
Returns:
- Dictionary with metrics to be added to the lightning logger.
"""
val_loss_mean = torch.stack([x['val_loss'] for x in outputs]).mean()
val_acc_mean = torch.stack([x['val_acc'] for x in outputs]).mean()
tqdm_dict = {"val_loss": val_loss_mean, "val_acc": val_acc_mean}
result = {
"progress_bar": tqdm_dict,
"log": tqdm_dict,
"val_loss": val_loss_mean,
}
return result
.......
def test_epoch_end(self, outputs: list) -> dict:
""" Function that takes as input a list of dictionaries returned by the validation_step
function and measures the model performance accross the entire validation set.
Returns:
- Dictionary with metrics to be added to the lightning logger.
"""
test_loss_mean = torch.stack([x['test_loss'] for x in outputs]).mean()
test_acc_mean = torch.stack([x['test_acc'] for x in outputs]).mean()
tqdm_dict = {"test_loss": test_loss_mean, "test_acc": test_acc_mean}
result = {
"progress_bar": tqdm_dict,
"log": tqdm_dict,
"test_loss": test_loss_mean,
}
return result
def configure_optimizers(self):
""" Sets different Learning rates for different parameter groups. """
parameters = [
{"params": self.classification_head.parameters()},
{
"params": self.ProtBertBFD.parameters(),
"lr": self.hparams.encoder_learning_rate,
},
]
optimizer = optim.Adam(parameters, lr=self.hparams.learning_rate)
return [optimizer], []
def __retrieve_dataset(self, train=True, val=True, test=True):
""" Retrieves task specific dataset """
if train:
return self.dataset.load_dataset(hparams.train_csv)
elif val:
return self.dataset.load_dataset(hparams.dev_csv)
elif test:
return self.dataset.load_dataset(hparams.test_csv)
else:
print('Incorrect dataset split')
def train_dataloader(self) -> DataLoader:
""" Function that loads the train set. """
self._train_dataset = self.__retrieve_dataset(val=False, test=False)
return DataLoader(
dataset=self._train_dataset,
sampler=RandomSampler(self._train_dataset),
batch_size=self.hparams.batch_size,
collate_fn=self.prepare_sample,
num_workers=self.hparams.loader_workers,
)
....
#classmethod
def add_model_specific_args(
cls, parser: HyperOptArgumentParser
) -> HyperOptArgumentParser:
""" Parser for Estimator specific arguments/hyperparameters.
:param parser: HyperOptArgumentParser obj
Returns:
- updated parser
"""
parser.opt_list(
"--max_length",
default=1536,
type=int,
help="Maximum sequence length.",
)
parser.add_argument(
"--encoder_learning_rate",
default=5e-06,
type=float,
help="Encoder specific learning rate.",
)
return parser
# these are project-wide arguments
parser = HyperOptArgumentParser(
strategy="random_search",
description="Minimalist ProtBERT Classifier",
add_help=True,
)
# Early Stopping
parser.add_argument(
"--monitor", default="val_acc", type=str, help="Quantity to monitor."
)
parser.add_argument(
"--metric_mode",
default="max",
type=str,
help="If we want to min/max the monitored quantity.",
choices=["auto", "min", "max"],
)
parser.add_argument(
"--patience",
default=5,
type=int,
help=(
"Number of epochs with no improvement "
"after which training will be stopped."
),
)
parser.add_argument(
"--accumulate_grad_batches",
default=32,
type=int,
help=(
"Accumulated gradients runs K small batches of size N before "
"doing a backwards pass."
),
)
# gpu/tpu args
parser.add_argument("--gpus", type=int, default=1, help="How many gpus")
parser.add_argument("--tpu_cores", type=int, default=None, help="How many tpus")
parser.add_argument(
"--val_percent_check",
default=1.0,
type=float,
help=(
"If you don't want to use the entire dev set (for debugging or "
"if it's huge), set how much of the dev set you want to use with this flag."
),
)
# each LightningModule defines arguments relevant to it
parser = ProtBertBFDClassifier.add_model_specific_args(parser)
hparams = parser.parse_known_args()[0]
"""
Main training routine specific for this project
:param hparams:
"""
seed_everything(hparams.seed)
# ------------------------
# 1 INIT LIGHTNING MODEL
# ------------------------
model = ProtBertBFDClassifier(hparams)
This is the error:
1 frames
<ipython-input-26-561494d91469> in __init__(self)
10 def __init__(self) -> None:
11 super(ProtBertBFDClassifier, self).__init__()
---> 12 self.hparams = parser.parse_known_args()[0]
13 self.batch_size = self.hparams.batch_size
14
/usr/local/lib/python3.7/dist-packages/torch/nn/modules/module.py in __setattr__(self, name, value)
1223 buffers[name] = value
1224 else:
-> 1225 object.__setattr__(self, name, value)
1226
1227 def __delattr__(self, name):
AttributeError: can't set attribute
pip install pytorch-lightning==1.2.10
Related
I am fine-tuning a HuggingFace transformer model (PyTorch version), using the HF Seq2SeqTrainingArguments & Seq2SeqTrainer, and I want to display in Tensorboard the train and validation losses (in the same chart).
As far as I understand in order to plot the two losses together I need to use the SummaryWriter. The HF Callbacks documenation describes a TensorBoardCallback function that can receive a tb_writer argument:
https://huggingface.co/docs/transformers/v4.21.1/en/main_classes/callback#transformers.integrations.TensorBoardCallback
However, I cannot figure out what is the right way to use it, if it is even supposed to be used with the Trainer API.
My code looks something like this:
args = Seq2SeqTrainingArguments(
output_dir=output_dir,
evaluation_strategy='epoch',
learning_rate= 1e-5,
per_device_train_batch_size=batch_size,
per_device_eval_batch_size=batch_size,
weight_decay=0.01,
save_total_limit=3,
num_train_epochs=num_train_epochs,
predict_with_generate=True,
logging_steps=logging_steps,
report_to='tensorboard',
push_to_hub=False,
)
trainer = Seq2SeqTrainer(
model,
args,
train_dataset=tokenized_train_data,
eval_dataset=tokenized_val_data,
data_collator=data_collator,
tokenizer=tokenizer,
compute_metrics=compute_metrics,
)
I would assume I should include the callback to TensorBoard in the trainer, e.g.,
callbacks = [TensorBoardCallback(tb_writer=tb_writer)]
but I cannot find a comprehensive example of how to use/what to import to use it.
I also found this feature request on GitHub,
https://github.com/huggingface/transformers/pull/4020
but no example of use, so I am confused...
Any insight will be appreciated
The only way I know of to plot two values on the same TensorBoard graph is to use two separate SummaryWriters with the same root directory. For example, the logging directories might be: log_dir/train and log_dir/eval.
This approach is used in this answer but for TensorFlow instead of pytorch.
In order to do this with the 🤗 Trainer API a custom callback is needed that takes two SummaryWriters. Here is the code for my custom callback CombinedTensorBoardCallback, that I made by modifying the code for TensorBoardCallback:
import os
from transformers.integrations import TrainerCallback, is_tensorboard_available
def custom_rewrite_logs(d, mode):
new_d = {}
eval_prefix = "eval_"
eval_prefix_len = len(eval_prefix)
test_prefix = "test_"
test_prefix_len = len(test_prefix)
for k, v in d.items():
if mode == 'eval' and k.startswith(eval_prefix):
if k[eval_prefix_len:] == 'loss':
new_d["combined/" + k[eval_prefix_len:]] = v
elif mode == 'test' and k.startswith(test_prefix):
if k[test_prefix_len:] == 'loss':
new_d["combined/" + k[test_prefix_len:]] = v
elif mode == 'train':
if k == 'loss':
new_d["combined/" + k] = v
return new_d
class CombinedTensorBoardCallback(TrainerCallback):
"""
A [`TrainerCallback`] that sends the logs to [TensorBoard](https://www.tensorflow.org/tensorboard).
Args:
tb_writer (`SummaryWriter`, *optional*):
The writer to use. Will instantiate one if not set.
"""
def __init__(self, tb_writers=None):
has_tensorboard = is_tensorboard_available()
if not has_tensorboard:
raise RuntimeError(
"TensorBoardCallback requires tensorboard to be installed. Either update your PyTorch version or"
" install tensorboardX."
)
if has_tensorboard:
try:
from torch.utils.tensorboard import SummaryWriter # noqa: F401
self._SummaryWriter = SummaryWriter
except ImportError:
try:
from tensorboardX import SummaryWriter
self._SummaryWriter = SummaryWriter
except ImportError:
self._SummaryWriter = None
else:
self._SummaryWriter = None
self.tb_writers = tb_writers
def _init_summary_writer(self, args, log_dir=None):
log_dir = log_dir or args.logging_dir
if self._SummaryWriter is not None:
self.tb_writers = dict(train=self._SummaryWriter(log_dir=os.path.join(log_dir, 'train')),
eval=self._SummaryWriter(log_dir=os.path.join(log_dir, 'eval')))
def on_train_begin(self, args, state, control, **kwargs):
if not state.is_world_process_zero:
return
log_dir = None
if state.is_hyper_param_search:
trial_name = state.trial_name
if trial_name is not None:
log_dir = os.path.join(args.logging_dir, trial_name)
if self.tb_writers is None:
self._init_summary_writer(args, log_dir)
for k, tbw in self.tb_writers.items():
tbw.add_text("args", args.to_json_string())
if "model" in kwargs:
model = kwargs["model"]
if hasattr(model, "config") and model.config is not None:
model_config_json = model.config.to_json_string()
tbw.add_text("model_config", model_config_json)
# Version of TensorBoard coming from tensorboardX does not have this method.
if hasattr(tbw, "add_hparams"):
tbw.add_hparams(args.to_sanitized_dict(), metric_dict={})
def on_log(self, args, state, control, logs=None, **kwargs):
if not state.is_world_process_zero:
return
if self.tb_writers is None:
self._init_summary_writer(args)
for tbk, tbw in self.tb_writers.items():
logs_new = custom_rewrite_logs(logs, mode=tbk)
for k, v in logs_new.items():
if isinstance(v, (int, float)):
tbw.add_scalar(k, v, state.global_step)
else:
logger.warning(
"Trainer is attempting to log a value of "
f'"{v}" of type {type(v)} for key "{k}" as a scalar. '
"This invocation of Tensorboard's writer.add_scalar() "
"is incorrect so we dropped this attribute."
)
tbw.flush()
def on_train_end(self, args, state, control, **kwargs):
for tbw in self.tb_writers.values():
tbw.close()
self.tb_writers = None
If you want to combine train and eval for other metrics besides the loss then custom_rewrite_logs should be modified accordingly.
As usual, the callback goes in the Trainer constructor. In my test example it was:
trainer = Trainer(
model=rnn,
args=train_args,
train_dataset=train_dataset,
eval_dataset=validation_dataset,
tokenizer=tokenizer,
compute_metrics=compute_metrics,
callbacks=[CombinedTensorBoardCallback]
)
Also you might want to remove the default TensorBoardCallback or else in addition to the combined loss graph, the training loss and validation loss will both appear separately as it does by default.
trainer.remove_callback(TensorBoardCallback)
Here is the resulting TensorBoard view:
it's pretty simple. You mention it in the "Seq2SeqTrainingArguments". There is no need to define it explicitly in the "Seq2SeqTrainer" function.
model_arguments = Seq2SeqTrainingArguments(output_dir= "./best_model/",
num_train_epochs = EPOCHS,
overwrite_output_dir= True,
do_train= True,
do_eval= True,
do_predict= True,
auto_find_batch_size= True,
evaluation_strategy = 'epoch',
warmup_steps = 10000,
logging_dir = "./log_files/",
disable_tqdm = False,
load_best_model_at_end = True,
save_strategy= 'epoch',
save_total_limit = 1,
per_device_eval_batch_size= BATCH_SIZE,
per_device_train_batch_size= BATCH_SIZE,
predict_with_generate=True,
report_to='wandb',
run_name="rober_based_encoder_decoder_text_summarisation"
)
meanwhile you can have other callbacks:
early_stopping = EarlyStoppingCallback(early_stopping_patience= 5,
early_stopping_threshold= 0.001)
Then you pass the arguments and callbacks as the list through the trainer arguments:
trainer = Seq2SeqTrainer(model = model,
compute_metrics= compute_metrics,
args= model_arguments,
train_dataset= Train,
eval_dataset= Val,
tokenizer=tokenizer,
callbacks= [early_stopping, ]
)
Train the model. Make sure you log into the wandb before training
trainer.train()
The following training curve is generated using the same Tensorflow + Keras script written in Python:
RED line uses five features.
GREEN line uses seven features.
BLUE line uses nine features.
Can anyone tell me the probable cause of the oscillation of the GREEN line so that I can troubleshoot my script?
Source code:
import os
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
#os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "0" # Use both gpus for training.
import sys, random
import time
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.callbacks import ModelCheckpoint
import numpy as np
from lxml import etree, objectify
# <editor-fold desc="GPU">
# resolve GPU related issues.
try:
physical_devices = tf.config.list_physical_devices('GPU')
for gpu_instance in physical_devices:
tf.config.experimental.set_memory_growth(gpu_instance, True)
except Exception as e:
pass
# END of try
# </editor-fold>
# <editor-fold desc="Lxml helper">
class LxmlHelper:
#classmethod
def objectify_xml(cls, input_path_dir):
file_dom = etree.parse(input_path_dir) # parse xml and convert it into DOM
file_xml_bin = etree.tostring(file_dom, pretty_print=False, encoding="ascii") # encode DOM into ASCII object
file_xml_text = file_xml_bin.decode() # convert binary ASCII object into ASCII text
objectified_xml = objectify.fromstring(file_xml_text) # convert text into a Doxygen object
return objectified_xml
# </editor-fold>
# <editor-fold desc="def encode(letter)">
def encode(letter: str):
if letter == 'H':
return [1.0, 0.0, 0.0]
elif letter == 'E':
return [0.0, 1.0, 0.0]
elif letter == 'C':
return [0.0, 0.0, 1.0]
elif letter == '-':
return [0.0, 0.0, 0.0]
# END of function
def encode_string_1(pattern_str: str):
# Iterate over the string
one_hot_binary_str = []
for ch in pattern_str:
try:
one_hot_binary_str = one_hot_binary_str + encode(ch)
except Exception as e:
print(pattern_str, one_hot_binary_str, ch)
# END of for loop
return one_hot_binary_str
# END of function
def encode_string_2(pattern_str: str):
# Iterate over the string
one_hot_binary_str = []
for ch in pattern_str:
temp_encoded_vect = [encode(ch)]
one_hot_binary_str = one_hot_binary_str + temp_encoded_vect
# END of for loop
return one_hot_binary_str
# END of function
# </editor-fold>
# <editor-fold desc="def load_data()">
def load_data_k(fname: str, class_index: int, feature_start_index: int, **selection):
"""Loads data for training and validation
:param fname: (``string``) - name of the file with the data
:param selection: (``kwargs``) - see below
:return: four tensorflow tensors: training input, training output, validation input and validation output
:Keyword Arguments:
* *top_n_lines* (``number``) --
take top N lines of the input and disregard the rest
* *random_n_lines* (``number``) --
take random N lines of the input and disregard the rest
* *validation_part* (``float``) --
separate N_lines * given_fraction of the input lines from the training set and use
them for validation. When the given_fraction = 1.0, then the same input set of
N_lines is used both for training and validation (this is the default)
"""
i = 0
file = open(fname)
if "top_n_lines" in selection:
lines = [next(file) for _ in range(int(selection["top_n_lines"]))]
elif "random_n_lines" in selection:
tmp_lines = file.readlines()
lines = random.sample(tmp_lines, int(selection["random_n_lines"]))
else:
lines = file.readlines()
data_x, data_y, data_z = [], [], []
for l in lines:
row = l.strip().split() # return a list of words from the line.
x = [float(ix) for ix in row[feature_start_index:]] # convert 3rd to 20th word into a vector of float numbers.
y = encode(row[class_index]) # convert the 3rd word into binary.
z = encode_string_1(row[class_index+1])
data_x.append(x) # append the vector into 'data_x'
data_y.append(y) # append the vector into 'data_y'
data_z.append(z) # append the vector into 'data_z'
# END for l in lines
num_rows = len(data_x)
given_fraction = selection.get("validation_part", 1.0)
if given_fraction > 0.9999:
valid_x, valid_y, valid_z = data_x, data_y, data_z
else:
n = int(num_rows * given_fraction)
data_x, data_y, data_z = data_x[n:], data_y[n:], data_z[n:]
valid_x, valid_y, valid_z = data_x[:n], data_y[:n], data_z[:n]
# END of if-else block
tx = tf.convert_to_tensor(data_x, np.float32)
ty = tf.convert_to_tensor(data_y, np.float32)
tz = tf.convert_to_tensor(data_z, np.float32)
vx = tf.convert_to_tensor(valid_x, np.float32)
vy = tf.convert_to_tensor(valid_y, np.float32)
vz = tf.convert_to_tensor(valid_z, np.float32)
return tx, ty, tz, vx, vy, vz
# END of the function
# </editor-fold>
# <editor-fold desc="def create_model()">
def create_model(n_hidden_1, n_hidden_2, num_classes, num_features):
# create the model
model = Sequential()
model.add(tf.keras.layers.InputLayer(input_shape=(num_features,)))
model.add(tf.keras.layers.Dense(n_hidden_1, activation='sigmoid'))
model.add(tf.keras.layers.Dense(n_hidden_2, activation='sigmoid'))
###model.add(tf.keras.layers.Dense(n_hidden_3, activation='sigmoid'))
model.add(tf.keras.layers.Dense(num_classes, activation='softmax'))
# instantiate the optimizer
opt = keras.optimizers.SGD(learning_rate=LEARNING_RATE)
# compile the model
model.compile(
optimizer=opt,
loss="categorical_crossentropy",
metrics="categorical_accuracy"
)
# return model
return model
# </editor-fold>
if __name__ == "__main__":
# <editor-fold desc="(input/output parameters)">
my_project_routine = LxmlHelper.objectify_xml("my_project_evaluate.xml")
# input data
INPUT_DATA_FILE = str(my_project_routine.input.input_data_file)
INPUT_PATH = str(my_project_routine.input.input_path)
CLASS_INDEX = int(my_project_routine.input.class_index)
FEATURE_INDEX = int(my_project_routine.input.feature_index)
# output data
OUTPUT_PATH = str(my_project_routine.output.output_path)
MODEL_FILE = str(my_project_routine.output.model_file)
TRAINING_PROGRESS_FILE = str(my_project_routine.output.training_progress_file)
# Learning parameters
LEARNING_RATE = float(my_project_routine.training_params.learning_rate)
EPOCH_SIZE = int(my_project_routine.training_params.epoch_size)
BATCH_SIZE = int(my_project_routine.training_params.batch_size)
INPUT_LINES_COUNT = int(my_project_routine.input.input_lines_count)
VALIDATION_PART = float(my_project_routine.training_params.validation_part)
SAVE_PERIOD = str(my_project_routine.output.save_period)
# NN parameters
HIDDEN_LAYER_1_NEURON_COUNT = int(my_project_routine.hidden_layers.one)
HIDDEN_LAYER_2_NEURON_COUNT = int(my_project_routine.hidden_layers.two)
###HIDDEN_LAYER_3_NEURON_COUNT = int(my_project_routine.hidden_layers.three)
CLASS_COUNT = int(my_project_routine.class_count)
FEATURES_COUNT = int(my_project_routine.features_count)
input_file_path_str = os.path.join(INPUT_PATH, INPUT_DATA_FILE)
training_progress_file_path_str = os.path.join(OUTPUT_PATH, TRAINING_PROGRESS_FILE)
model_file_path = os.path.join(OUTPUT_PATH, MODEL_FILE)
# command-line arg processing
input_file_name_str = None
if len(sys.argv) > 1:
input_file_name_str = sys.argv[1]
else:
input_file_name_str = input_file_path_str
# END of if-else
# </editor-fold>
# <editor-fold desc="(load data from file)">
# load training data from the disk
train_x, train_y, _, validate_x, validate_y, _ = \
load_data_k(
fname=input_file_name_str,
class_index=CLASS_INDEX,
feature_start_index=FEATURE_INDEX,
random_n_lines=INPUT_LINES_COUNT,
validation_part=VALIDATION_PART
)
print("training data size : ", len(train_x))
print("validation data size : ", len(validate_x))
# </editor-fold>
### STEPS_PER_EPOCH = len(train_x) // BATCH_SIZE
### VALIDATION_STEPS = len(validate_x) // BATCH_SIZE
# <editor-fold desc="(model creation)">
# load previously saved NN model
model = None
try:
model = keras.models.load_model(model_file_path)
print("Loading NN model from file.")
model.summary()
except Exception as ex:
print("No NN model found for loading.")
# END of try-except
# </editor-fold>
# <editor-fold desc="(model run)">
# # if there is no model loaded, create a new model
if model is None:
csv_logger = keras.callbacks.CSVLogger(training_progress_file_path_str)
checkpoint = ModelCheckpoint(
model_file_path,
monitor='loss',
verbose=1,
save_best_only=True,
mode='auto',
save_freq='epoch'
)
callbacks_vector = [
csv_logger,
checkpoint
]
# Set mirror strategy
#strategy = tf.distribute.MirroredStrategy(devices=["/device:GPU:0","/device:GPU:1"])
#with strategy.scope():
print("New NN model created.")
# create sequential NN model
model = create_model(
n_hidden_1=HIDDEN_LAYER_1_NEURON_COUNT,
n_hidden_2=HIDDEN_LAYER_2_NEURON_COUNT,
##n_hidden_3=HIDDEN_LAYER_3_NEURON_COUNT,
num_classes=CLASS_COUNT,
num_features=FEATURES_COUNT
)
# Train the model with the new callback
history = model.fit(
train_x, train_y,
validation_data=(validate_x, validate_y),
batch_size=BATCH_SIZE,
epochs=EPOCH_SIZE,
callbacks=[callbacks_vector],
shuffle=True,
verbose=2
)
print(history.history.keys())
# END of ... with
# END of ... if
# </editor-fold>
Plotting Script
import os
from argparse import ArgumentParser
import random
from typing import List
import matplotlib.pyplot as plt
import numpy as np
import math
import sys
import datetime
class Quad:
def __init__(self, x_vector, y_vector, color_char, label_str):
self.__x_vector = x_vector
self.__y_vector = y_vector
self.__color_char = color_char
self.__label_str = label_str
def get_x_vector(self):
return self.__x_vector
def get_y_vector(self):
return self.__y_vector
def get_color_char(self):
return self.__color_char
def get_label_str(self):
return self.__label_str
class HecaPlotClass:
def __init__(self):
self.__x_label_str: str = None
self.__y_label_str: str = None
self.__title_str: str = None
self.__trio_vector: List[Quad] = []
self.__plotter = plt
#property
def x_label_str(self):
return self.__x_label_str
#x_label_str.setter
def x_label_str(self, t):
self.__x_label_str = t
#property
def y_label_str(self):
return self.__y_label_str
#y_label_str.setter
def y_label_str(self, t):
self.__y_label_str = t
#property
def title_str(self):
return self.__title_str
#title_str.setter
def title_str(self, t):
self.__title_str = t
def add_y_axes(self, trio_obj: Quad):
self.__trio_vector.append(trio_obj)
def generate_plot(self):
for obj in self.__trio_vector:
x_vector = obj.get_x_vector()
y_vector = obj.get_y_vector()
label_str = obj.get_label_str()
# print(label_str)
# print(len(x_vector))
# print(len(y_vector))
self.__plotter.plot(
x_vector,
y_vector,
color=obj.get_color_char(),
label=label_str
)
# END of ... for loop
# Naming the x-axis, y_1_vector-axis and the whole graph
self.__plotter.xlabel(self.__x_label_str)
self.__plotter.ylabel(self.__y_label_str)
self.__plotter.title(self.__title_str)
# Adding legend, which helps us recognize the curve according to it's color
self.__plotter.legend()
# To load the display window
#self.__plotter.show()
def save_png(self, output_directory_str):
output_file_str = os.path.join(output_directory_str, self.__title_str + '.png')
self.__plotter.savefig(output_file_str)
def save_pdf(self, output_directory_str):
output_file_str = os.path.join(output_directory_str, self.__title_str + '.pdf')
self.__plotter.savefig(output_file_str)
class MainClass(object):
__colors_vector = ['red', 'green', 'blue', 'cyan', 'magenta', 'yellow', 'orange', 'lightgreen', 'crimson']
__working_dir = r"."
__file_names_vector = ["training_progress-32.txt", "training_progress-64.txt", "training_progress-128.txt"]
__input_files_vector = []
__output_directory = None
__column_no_int = 0
__split_percentage_at_tail_int = 100
__is_pdf_output = False
__is_png_output = False
# <editor-fold desc="def load_data()">
#classmethod
def __load_data(cls, fname: str, percetage_int:int, column_no_int:int):
np_array = np.loadtxt(
fname,
# usecols=range(1,11),
dtype=np.float32,
skiprows=1,
delimiter=","
)
size_vector = np_array.shape
array_len_int = size_vector[0]
rows_count_int = int(percetage_int * array_len_int / 100)
np_array = np_array[-rows_count_int:]
x = np_array[:, 0]
y = np_array[:, column_no_int]
return x, y
# END of the function
# </editor-fold>
# <editor-fold desc="(__parse_args())">
#classmethod
def __parse_args(cls):
# initialize argument parser
my_parser = ArgumentParser()
my_parser.add_argument("-c", help="column no.", type=int)
my_parser.add_argument('-i', nargs='+', help='a list of input files', required=True)
my_parser.add_argument("-o", help="output directory", type=str)
my_parser.add_argument("-n", help="percentage of data to split from tail", type=float)
my_parser.add_argument("--pdf", help="PDF output", action='store_true')
my_parser.add_argument("--png", help="PNG output", action='store_true')
# parse the argument
args = my_parser.parse_args()
cls.__input_files_vector = args.i
cls.__output_directory = args.o
cls.__split_percentage_at_tail_int = args.n
cls.__column_no_int = args.c
cls.__is_pdf_output = args.pdf
cls.__is_png_output = args.png
# </editor-fold>
#classmethod
def main(cls):
cls.__parse_args()
if cls.__input_files_vector is None:
cls.__input_files_vector = cls.__file_names_vector
if cls.__output_directory is None:
cls.__output_directory = cls.__working_dir
if cls.__split_percentage_at_tail_int is None:
cls.__split_percentage_at_tail_int = 100
if cls.__column_no_int is None:
cls.__column_no_int = 1
my_project_plot_obj = HecaPlotClass()
i = 0
for file_path_str in cls.__input_files_vector:
print(file_path_str)
x_vector, y_vector = cls.__load_data(os.path.join(cls.__working_dir, file_path_str), cls.__split_percentage_at_tail_int, cls.__column_no_int)
my_project_plot_obj.x_label_str = "Epoch"
my_project_plot_obj.y_label_str = "Accuracy"
my_project_plot_obj.title_str = "training_plot-{date:%Y-%m-%d_%H:%M:%S}".format(date=datetime.datetime.now())
my_project_plot_obj.x_axis_vector = x_vector
if i == 0:
random_int = 0
else:
random_int = i % (len(cls.__colors_vector)-1)
# END of ... if
print("random_int : ", random_int)
my_project_plot_obj.add_y_axes(Quad(x_vector, y_vector, cls.__colors_vector[random_int], file_path_str))
i = i + 1
# END of ... for loop
my_project_plot_obj.generate_plot()
my_project_plot_obj.save_png(cls.__output_directory)
my_project_plot_obj.save_pdf(cls.__output_directory)
if __name__ == "__main__":
MainClass.main()
The primary reason could be improper (non-random ~ ordered) distribution of data.
If you notice the accuracy beyond epoch 180, there is a orderly switching between the accuracy between ~0.43 (approx.) and ~0.33 (~approx.), and occasionally ~0.23 (approx.). The more important thing to notice is that the accuracy is decreasing (there's no improvement in validation accuracy) as we increase the epochs.
The accuracy can increase in such cases if you (1) reduce batch size, or (2) use a better optimizer like Adam. And check the learning rate.
These changes can help the shift and oscillation, as well.
Additionally, Running average of the accuracy can be plotted to avoid the oscillation. This is again a mitigation scheme rather than a correction scheme. But, what it does is removes the order (partition of the data) and mixes the nearby data.
Lastly, I would also reshuffle the data and normalize after each layer. See if that helps.
Generally, sharp jumps and flat lines in the accuracy usually mean that a group of examples is classified as a given class at a same time. If your dataset contains, say, 50 examples with the same combination of 7 features then they would go into the same class at the same time. This is what probably causes sharp jumps - identical or similar examples clustered together.
So for example, if you have 50 men aged 64, and a decision boundary to classify them as more prone to an illness shifts from >65 to >63, then accuracy changes rapidly as all of them change classification at the same time.
Regarding the oscillation of the curve - due to the fact above, oscillation will be amplified by small changes in learning. Your network learns based on cross entropy, which means that it minimizes the difference between target and your predictions. This means that it operates on the difference between probability and target (say, 0.3 vs class 0) instead of class and target like accuracy (so, 0 vs 0) in the same example. Cross entropy is much more smooth as it is not affected by the issue outlined above.
GLUE_TASKS = ["cola", "mnli", "mnli-mm", "mrpc", "qnli", "qqp", "rte", "sst2", "stsb", "wnli"]
#useful in preprocessing, this sets what each task does.
task_to_keys = {
"cola": ("sentence", None),
"mnli": ("premise", "hypothesis"),
"mnli-mm": ("premise", "hypothesis"),
"mrpc": ("sentence1", "sentence2"),
"qnli": ("question", "sentence"),
"qqp": ("question1", "question2"),
"rte": ("sentence1", "sentence2"),
"sst2": ("sentence", None),
"stsb": ("sentence1", "sentence2"),
"wnli": ("sentence1", "sentence2"),
}
def set_module_grad_status(module, flag=False):
if isinstance(module, list):
for m in module:
set_module_grad_status(m, flag)
else:
for p in module.parameters():
p.requires_grad = flag
def compute_metrics(eval_pred):
predictions, labels = eval_pred
if task != "stsb":
predictions = np.argmax(predictions, axis=1)
else:
predictions = predictions[:, 0]
return metric.compute(predictions=predictions, references=labels)
#setting some model parameters
task = "qnli"
sentence1_key, sentence2_key = task_to_keys[task]
# model_checkpoint = "test-glue/basemodel"
num_labels = 3 if task.startswith("mnli") else 1 if task=="stsb" else 2
metric_name = "pearson" if task == "stsb" else "matthews_correlation" if task == "cola" else "accuracy"
validation_key = "validation_mismatched" if task == "mnli-mm" else "validation_matched" if task == "mnli" else "validation"
model_checkpoint = "google/mobilebert-uncased"
tokenizer = AutoTokenizer.from_pretrained(model_checkpoint, use_fast=True)
model = AutoModelForSequenceClassification.from_pretrained(model_checkpoint, num_labels=num_labels)
model = model.to(device)
args = TrainingArguments(
"test-glue",
evaluation_strategy = "epoch",
learning_rate=2e-5,
per_device_train_batch_size=batch_size,
per_device_eval_batch_size=batch_size,
num_train_epochs=epochs,
weight_decay=0.01,
save_steps=0
# load_best_model_at_end=True,
# metric_for_best_model=metric_name,
)
#load dataset
actual_task = "mnli" if task == "mnli-mm" else task #some error checking..
dataset = load_dataset("glue", actual_task)
metric = load_metric('glue', actual_task)
# tokenizer = AutoTokenizer.from_pretrained(model_checkpoint, use_fast=True)
def preprocess_function(examples):
if sentence2_key is None:
return tokenizer(examples[sentence1_key], truncation=True)
return tokenizer(examples[sentence1_key], examples[sentence2_key], truncation=True)
encoded_dataset = dataset.map(preprocess_function, batched=True)
trainer = Trainer(
model,
args,
train_dataset=encoded_dataset["train"],
eval_dataset=encoded_dataset[validation_key],
tokenizer=tokenizer,
compute_metrics=compute_metrics
)
trainer.train()
I not quite sure why I am getting the error "RuntimeError: The size of tensor a (549) must match the size of tensor b (512) at non-singleton dimension 1" at the like "trainer.train()". Like isn't the each sample supposed to be consistently size of 512. This only happens with MobileBERT model and not on DistilBERT model. The code that I am using is mostly based on the huggingface colab tutorial https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/text_classification.ipynb#scrollTo=7k8ge1L1IrJk
I am experimenting with the gpt-2 model's conditional text generation to tweak it for a good chatbot. I am using nsheppard's code for retraining it on my custom dataset.
I trained my model on a custom dataset of conversations that I pulled from my facebook data. I changed the sample length to 20 as they are dialogues during interactive conditional generation.
The dataset looks something like this:
How are you
Hi Great and you
Am also good
So you re a graphic designer
Yeah
How can you contribute to making the game In d graphics aspect
Can you show me some of your work if u don t mind
Am planning to learn making it a motion type
U can go through my photos
K
Can you make animations for it
Flash animations to be specific
No please only stable ones
Ok
But, after the training when i try to chat with it, it is instead completing my sentences instead of replying to them.
User >>> bye
======================================== SAMPLE 1 ========================================
and
hi
are there any positions in khrzh being appointed right now
I understand that the interactive_conditional_samples.py was built to complete the sentence based on the prompt, but I thought changing the dataset would work and sure it doesn't work.
train.py
#!/usr/bin/env python3
# Usage:
# PYTHONPATH=src ./train --dataset <file|directory|glob>
import argparse
import json
import os
import numpy as np
import tensorflow as tf
import time
import tqdm
from tensorflow.core.protobuf import rewriter_config_pb2
import model, sample, encoder
from load_dataset import load_dataset, Sampler
from accumulate import AccumulatingOptimizer
import memory_saving_gradients
CHECKPOINT_DIR = 'checkpoint'
SAMPLE_DIR = 'samples'
parser = argparse.ArgumentParser(
description='Fine-tune GPT-2 on your custom dataset.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--dataset', metavar='PATH', type=str, required=True, help='Input file, directory, or glob pattern (utf-8 text, or preencoded .npz files).')
parser.add_argument('--model_name', metavar='MODEL', type=str, default='117M', help='Pretrained model name')
parser.add_argument('--combine', metavar='CHARS', type=int, default=50000, help='Concatenate input files with <|endoftext|> separator into chunks of this minimum size')
parser.add_argument('--batch_size', metavar='SIZE', type=int, default=1, help='Batch size')
parser.add_argument('--learning_rate', metavar='LR', type=float, default=0.00002, help='Learning rate for Adam')
parser.add_argument('--accumulate_gradients', metavar='N', type=int, default=1, help='Accumulate gradients across N minibatches.')
parser.add_argument('--memory_saving_gradients', default=False, action='store_true', help='Use gradient checkpointing to reduce vram usage.')
parser.add_argument('--only_train_transformer_layers', default=False, action='store_true', help='Restrict training to the transformer blocks.')
parser.add_argument('--optimizer', type=str, default='adam', help='Optimizer. <adam|sgd>.')
parser.add_argument('--noise', type=float, default=0.0, help='Add noise to input training data to regularize against typos.')
parser.add_argument('--top_k', type=int, default=40, help='K for top-k sampling.')
parser.add_argument('--top_p', type=float, default=0.0, help='P for top-p sampling. Overrides top_k if set > 0.')
parser.add_argument('--restore_from', type=str, default='latest', help='Either "latest", "fresh", or a path to a checkpoint file')
parser.add_argument('--run_name', type=str, default='run1', help='Run id. Name of subdirectory in checkpoint/ and samples/')
parser.add_argument('--sample_every', metavar='N', type=int, default=100, help='Generate samples every N steps')
parser.add_argument('--sample_length', metavar='TOKENS', type=int, default=1023, help='Sample this many tokens')
parser.add_argument('--sample_num', metavar='N', type=int, default=1, help='Generate this many samples')
parser.add_argument('--save_every', metavar='N', type=int, default=1000, help='Write a checkpoint every N steps')
parser.add_argument('--val_dataset', metavar='PATH', type=str, default=None, help='Dataset for validation loss, defaults to --dataset.')
parser.add_argument('--val_batch_size', metavar='SIZE', type=int, default=2, help='Batch size for validation.')
parser.add_argument('--val_batch_count', metavar='N', type=int, default=40, help='Number of batches for validation.')
parser.add_argument('--val_every', metavar='STEPS', type=int, default=0, help='Calculate validation loss every STEPS steps.')
def maketree(path):
try:
os.makedirs(path)
except:
pass
def randomize(context, hparams, p):
if p > 0:
mask = tf.random.uniform(shape=tf.shape(context)) < p
noise = tf.random.uniform(shape=tf.shape(context), minval=0, maxval=hparams.n_vocab, dtype=tf.int32)
return tf.where(mask, noise, context)
else:
return context
def main():
args = parser.parse_args()
enc = encoder.get_encoder(args.model_name)
hparams = model.default_hparams()
with open(os.path.join('models', args.model_name, 'hparams.json')) as f:
hparams.override_from_dict(json.load(f))
if args.sample_length > hparams.n_ctx:
raise ValueError(
"Can't get samples longer than window size: %s" % hparams.n_ctx)
if args.model_name == '345M':
args.memory_saving_gradients = True
if args.optimizer == 'adam':
args.only_train_transformer_layers = True
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.graph_options.rewrite_options.layout_optimizer = rewriter_config_pb2.RewriterConfig.OFF
with tf.Session(config=config) as sess:
context = tf.placeholder(tf.int32, [args.batch_size, None])
context_in = randomize(context, hparams, args.noise)
output = model.model(hparams=hparams, X=context_in)
loss = tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=context[:, 1:], logits=output['logits'][:, :-1]))
if args.val_every > 0:
val_context = tf.placeholder(tf.int32, [args.val_batch_size, None])
val_output = model.model(hparams=hparams, X=val_context)
val_loss = tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=val_context[:, 1:], logits=val_output['logits'][:, :-1]))
val_loss_summary = tf.summary.scalar('val_loss', val_loss)
tf_sample = sample.sample_sequence(
hparams=hparams,
length=args.sample_length,
context=context,
batch_size=args.batch_size,
temperature=1.0,
top_k=args.top_k,
top_p=args.top_p)
all_vars = [v for v in tf.trainable_variables() if 'model' in v.name]
train_vars = [v for v in all_vars if '/h' in v.name] if args.only_train_transformer_layers else all_vars
if args.optimizer == 'adam':
opt = tf.train.AdamOptimizer(learning_rate=args.learning_rate)
elif args.optimizer == 'sgd':
opt = tf.train.GradientDescentOptimizer(learning_rate=args.learning_rate)
else:
exit('Bad optimizer:', args.optimizer)
if args.accumulate_gradients > 1:
if args.memory_saving_gradients:
exit("Memory saving gradients are not implemented for gradient accumulation yet.")
opt = AccumulatingOptimizer(
opt=opt,
var_list=train_vars)
opt_reset = opt.reset()
opt_compute = opt.compute_gradients(loss)
opt_apply = opt.apply_gradients()
summary_loss = tf.summary.scalar('loss', opt_apply)
else:
if args.memory_saving_gradients:
opt_grads = memory_saving_gradients.gradients(loss, train_vars)
else:
opt_grads = tf.gradients(loss, train_vars)
opt_grads = list(zip(opt_grads, train_vars))
opt_apply = opt.apply_gradients(opt_grads)
summary_loss = tf.summary.scalar('loss', loss)
summary_lr = tf.summary.scalar('learning_rate', args.learning_rate)
summaries = tf.summary.merge([summary_lr, summary_loss])
summary_log = tf.summary.FileWriter(
os.path.join(CHECKPOINT_DIR, args.run_name))
saver = tf.train.Saver(
var_list=all_vars,
max_to_keep=5,
keep_checkpoint_every_n_hours=2)
sess.run(tf.global_variables_initializer())
if args.restore_from == 'latest':
ckpt = tf.train.latest_checkpoint(
os.path.join(CHECKPOINT_DIR, args.run_name))
if ckpt is None:
# Get fresh GPT weights if new run.
ckpt = tf.train.latest_checkpoint(
os.path.join('models', args.model_name))
elif args.restore_from == 'fresh':
ckpt = tf.train.latest_checkpoint(
os.path.join('models', args.model_name))
else:
ckpt = tf.train.latest_checkpoint(args.restore_from)
print('Loading checkpoint', ckpt)
saver.restore(sess, ckpt)
print('Loading dataset...')
chunks = load_dataset(enc, args.dataset, args.combine)
data_sampler = Sampler(chunks)
if args.val_every > 0:
val_chunks = load_dataset(enc, args.val_dataset, args.combine) if args.val_dataset else chunks
print('dataset has', data_sampler.total_size, 'tokens')
print('Training...')
if args.val_every > 0:
# Sample from validation set once with fixed seed to make
# it deterministic during training as well as across runs.
val_data_sampler = Sampler(val_chunks, seed=1)
val_batches = [[val_data_sampler.sample(1024) for _ in range(args.val_batch_size)]
for _ in range(args.val_batch_count)]
counter = 1
counter_path = os.path.join(CHECKPOINT_DIR, args.run_name, 'counter')
if os.path.exists(counter_path):
# Load the step number if we're resuming a run
# Add 1 so we don't immediately try to save again
with open(counter_path, 'r') as fp:
counter = int(fp.read()) + 1
def save():
maketree(os.path.join(CHECKPOINT_DIR, args.run_name))
print(
'Saving',
os.path.join(CHECKPOINT_DIR, args.run_name,
'model-{}').format(counter))
saver.save(
sess,
os.path.join(CHECKPOINT_DIR, args.run_name, 'model'),
global_step=counter)
with open(counter_path, 'w') as fp:
fp.write(str(counter) + '\n')
def generate_samples():
print('Generating samples...')
context_tokens = data_sampler.sample(1)
all_text = []
index = 0
while index < args.sample_num:
out = sess.run(
tf_sample,
feed_dict={context: args.batch_size * [context_tokens]})
for i in range(min(args.sample_num - index, args.batch_size)):
text = enc.decode(out[i])
text = '======== SAMPLE {} ========\n{}\n'.format(
index + 1, text)
all_text.append(text)
index += 1
print(text)
maketree(os.path.join(SAMPLE_DIR, args.run_name))
with open(
os.path.join(SAMPLE_DIR, args.run_name,
'samples-{}').format(counter), 'w') as fp:
fp.write('\n'.join(all_text))
def validation():
print('Calculating validation loss...')
losses = []
for batch in tqdm.tqdm(val_batches):
losses.append(sess.run(val_loss, feed_dict={val_context: batch}))
v_val_loss = np.mean(losses)
v_summary = sess.run(val_loss_summary, feed_dict={val_loss: v_val_loss})
summary_log.add_summary(v_summary, counter)
summary_log.flush()
print(
'[{counter} | {time:2.2f}] validation loss = {loss:2.2f}'
.format(
counter=counter,
time=time.time() - start_time,
loss=v_val_loss))
def sample_batch():
return [data_sampler.sample(1024) for _ in range(args.batch_size)]
avg_loss = (0.0, 0.0)
start_time = time.time()
try:
while True:
if counter % args.save_every == 0:
save()
if counter % args.sample_every == 0:
generate_samples()
if args.val_every > 0 and (counter % args.val_every == 0 or counter == 1):
validation()
if args.accumulate_gradients > 1:
sess.run(opt_reset)
for _ in range(args.accumulate_gradients):
sess.run(
opt_compute, feed_dict={context: sample_batch()})
(v_loss, v_summary) = sess.run((opt_apply, summaries))
else:
(_, v_loss, v_summary) = sess.run(
(opt_apply, loss, summaries),
feed_dict={context: sample_batch()})
summary_log.add_summary(v_summary, counter)
avg_loss = (avg_loss[0] * 0.99 + v_loss,
avg_loss[1] * 0.99 + 1.0)
print(
'[{counter} | {time:2.2f}] loss={loss:2.2f} avg={avg:2.2f}'
.format(
counter=counter,
time=time.time() - start_time,
loss=v_loss,
avg=avg_loss[0] / avg_loss[1]))
counter += 1
except KeyboardInterrupt:
print('interrupted')
save()
if __name__ == '__main__':
main()
sample.py
import tensorflow as tf
import model
def top_k_logits(logits, k):
if k == 0:
# no truncation
return logits
def _top_k():
values, _ = tf.nn.top_k(logits, k=k)
min_values = values[:, -1, tf.newaxis]
return tf.where(
logits < min_values,
tf.ones_like(logits, dtype=logits.dtype) * -1e10,
logits,
)
return tf.cond(
tf.equal(k, 0),
lambda: logits,
lambda: _top_k(),
)
def top_p_logits(logits, p):
with tf.variable_scope('top_p_logits'):
logits_sort = tf.sort(logits, direction='DESCENDING')
probs_sort = tf.nn.softmax(logits_sort)
probs_sums = tf.cumsum(probs_sort, axis=1, exclusive=True)
logits_masked = tf.where(probs_sums < p, logits_sort, tf.ones_like(logits_sort)*1000) # [batchsize, vocab]
min_logits = tf.reduce_min(logits_masked, axis=1, keepdims=True) # [batchsize, 1]
return tf.where(
logits < min_logits,
tf.ones_like(logits, dtype=logits.dtype) * -1e10,
logits,
)
def sample_sequence(*, hparams, length, start_token=None, batch_size=None, context=None, temperature=1, top_k=0, top_p=0.0):
if start_token is None:
assert context is not None, 'Specify exactly one of start_token and context!'
else:
assert context is None, 'Specify exactly one of start_token and context!'
context = tf.fill([batch_size, 1], start_token)
def step(hparams, tokens, past=None):
lm_output = model.model(hparams=hparams, X=tokens, past=past, reuse=tf.AUTO_REUSE)
logits = lm_output['logits'][:, :, :hparams.n_vocab]
presents = lm_output['present']
presents.set_shape(model.past_shape(hparams=hparams, batch_size=batch_size))
return {
'logits': logits,
'presents': presents,
}
with tf.name_scope('sample_sequence'):
# Don't feed the last context token -- leave that to the loop below
# TODO: Would be slightly faster if we called step on the entire context,
# rather than leaving the last token transformer calculation to the while loop.
context_output = step(hparams, context[:, :-1])
def body(past, prev, output):
next_outputs = step(hparams, prev[:, tf.newaxis], past=past)
logits = next_outputs['logits'][:, -1, :] / tf.to_float(temperature)
if top_p > 0.0:
logits = top_p_logits(logits, p=top_p)
else:
logits = top_k_logits(logits, k=top_k)
samples = tf.multinomial(logits, num_samples=1, output_dtype=tf.int32)
return [
tf.concat([past, next_outputs['presents']], axis=-2),
tf.squeeze(samples, axis=[1]),
tf.concat([output, samples], axis=1),
]
def cond(*args):
return True
_, _, tokens = tf.while_loop(
cond=cond, body=body,
maximum_iterations=length,
loop_vars=[
context_output['presents'],
context[:, -1],
context,
],
shape_invariants=[
tf.TensorShape(model.past_shape(hparams=hparams, batch_size=batch_size)),
tf.TensorShape([batch_size]),
tf.TensorShape([batch_size, None]),
],
back_prop=False,
)
return tokens
interactive_conditional_samples.py
#!/usr/bin/env python3
import fire
import json
import os
import numpy as np
import tensorflow as tf
import model, sample, encoder
def interact_model(
model_name='chatbot',
seed=None,
nsamples=1,
batch_size=1,
length=20,
temperature=1,
top_k=0,
top_p=0.0
):
"""
Interactively run the model
:model_name=chatbot : String, which model to use
:seed=None : Integer seed for random number generators, fix seed to reproduce
results
:nsamples=1 : Number of samples to return total
:batch_size=1 : Number of batches (only affects speed/memory). Must divide nsamples.
:length=None : Number of tokens in generated text, if None (default), is
determined by model hyperparameters
:temperature=1 : Float value controlling randomness in boltzmann
distribution. Lower temperature results in less random completions. As the
temperature approaches zero, the model will become deterministic and
repetitive. Higher temperature results in more random completions.
:top_k=0 : Integer value controlling diversity. 1 means only 1 word is
considered for each step (token), resulting in deterministic completions,
while 40 means 40 words are considered at each step. 0 (default) is a
special setting meaning no restrictions. 40 generally is a good value.
:top_p=0.0 : Float value controlling diversity. Implements nucleus sampling,
overriding top_k if set to a value > 0. A good setting is 0.9.
"""
if batch_size is None:
batch_size = 1
assert nsamples % batch_size == 0
enc = encoder.get_encoder(model_name)
hparams = model.default_hparams()
with open(os.path.join('models', model_name, 'hparams.json')) as f:
hparams.override_from_dict(json.load(f))
if length is None:
length = hparams.n_ctx // 2
elif length > hparams.n_ctx:
raise ValueError("Can't get samples longer than window size: %s" % hparams.n_ctx)
with tf.Session(graph=tf.Graph()) as sess:
context = tf.placeholder(tf.int32, [batch_size, None])
np.random.seed(seed)
tf.set_random_seed(seed)
output = sample.sample_sequence(
hparams=hparams, length=length,
context=context,
batch_size=batch_size,
temperature=temperature, top_k=top_k, top_p=top_p
)
saver = tf.train.Saver()
ckpt = tf.train.latest_checkpoint(os.path.join('models', model_name))
saver.restore(sess, ckpt)
while True:
raw_text = input("User >>> ")
while not raw_text:
print('Prompt should not be empty!')
raw_text = input("User >>> ")
context_tokens = enc.encode(raw_text)
generated = 0
for _ in range(nsamples // batch_size):
out = sess.run(output, feed_dict={
context: [context_tokens for _ in range(batch_size)]
})[:, len(context_tokens):]
for i in range(batch_size):
generated += 1
text = enc.decode(out[i])
print("=" * 40 + " SAMPLE " + str(generated) + " " + "=" * 40)
print(text)
print("=" * 80)
if __name__ == '__main__':
fire.Fire(interact_model)
How can I tweak the code to get it working like a chatbot? I am guessing it has something to do with the context part in sample.py, though i am unsure how is this going to work.
I know this is an old question now, but I have successfully tuned many Q&A style datasets on GPT-2 and have a suggestion that will work for future people who find this question.
GPT-2 reads unstructured text data, but it is very good at inferring and obeying structure in that data. Your issue is basically that you are not terminating your input lines with an identifier that GPT-2 understands, so it continues the sentence.
A simple way to fix this would be to annotate your dataset. Really anything with stop/start tokens will work, but you should also annotate the speaker identities. I would just do something like this:
A: How are you <EOL>
B: Hi Great and you <EOL>
A: Am also good <EOL>
B: So you re a graphic designer <EOL>
B: Another line from B <EOL>
The other benefit of this approach is that GPT-2 will learn multi-line input/output, and the different identities of the two conversants.
Problem is, all model sees is looking at the series of text you gave it, and trying to predict next most likely /token to be exact. It's not an encoder-decoder architecture. What you require is fine-tuning this architecture for a chatbot architecture.The only implementation I found regarding that one is here. But's it's done in pytorch so i am afraid it won't be what you are wanting.
https://medium.com/huggingface/how-to-build-a-state-of-the-art-conversational-ai-with-transfer-learning-2d818ac26313
I am trying to build a simple feed forward neural network using TensorFlow and its tfr formatting. I have been using TensorFlow's tutorials and examples as a reference:
https://github.com/tensorflow/tensorflow/tree/master/tensorflow/examples/how_tos/reading_data
Given "food" float values, I want to predict the "happiness" float value that it produces.
food_test.json is a a JSON file that contains the "food" value and its associated "happiness" value. This is the format the data is stored in.
food_to_record.py is based off of tensorflow's convert_to_records.py. It reads in the food_test.json and converts it to a food_record.tfr file.
food_reader.py is based off of tensorflow's fully_connected_reader.py. It reads in the food_record.tfr file and runs the data through a neural network.
I run the program in this order:
1. food_to_record.py
2. food_reader.py
When food_reader.py is run, it starts a TensorFlow session, but the session never terminates, does anyone know the reason for this?
food_test.json:
[
{
"food": 1.0,
"happiness": 2.0
},
{
"food": 1.4,
"happiness": 5.4
}
]
food_to_record.py:
#based off of tensorflow's convert_to_records.py
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import sys
import json
import tensorflow as tf
FLAGS = None
#feature for integers
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
#feature for floats
def _float_feature(value):
return tf.train.Feature(float_list = tf.train.FloatList(value= [value]))
#feature for strings and others
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def main(unused_argv):
print("food_to_record:main")
script_dir = os.path.dirname(__file__)
file_path = os.path.join(script_dir, 'food_test.json')
with open(file_path) as data_file:
data = json.load(data_file)
print(data)
num_examples = 2
name = 'food_record'
filename = os.path.join(FLAGS.directory, name + '.tfrecords')
print('Writing', filename)
writer = tf.python_io.TFRecordWriter(filename)
for index in range(num_examples):
example = tf.train.Example(features=tf.train.Features(feature={
'food': _float_feature(data[index]['food']),
'happiness': _float_feature(data[index]['happiness'])
}))
writer.write(example.SerializeToString())
writer.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--directory',
type=str,
default='.',
help='Directory to download data files and write the converted result'
)
parser.add_argument(
'--validation_size',
type=int,
default=5000,
help="""\
Number of examples to separate from the training data for the validation
set.\
"""
)
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
food_reader.py:
#based off of tensorflow's fully_connected_reader
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os.path
import sys
import time
import tensorflow as tf
# Basic model parameters as external flags.
FLAGS = None
# Constants used for dealing with the files
TRAIN_FILE = 'food_record.tfrecords'
# For simple testing purposes, use training file for validation
VALIDATION_FILE = 'food_record.tfrecords'
def read_and_decode(filename_queue):
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
features = tf.parse_single_example(
serialized_example,
# Defaults are not specified since both keys are required.
features={
'food': tf.FixedLenFeature([], tf.float32),
'happiness': tf.FixedLenFeature([], tf.float32)
})
food = tf.cast(features['food'], tf.float32)
happiness = tf.cast(features['happiness'], tf.float32)
food = tf.expand_dims(food, -1)
print("food shape: ", tf.shape(food))
print("happiness shape: ", tf.shape(happiness))
return food, happiness
def inputs(train, batch_size, num_epochs):
"""Reads input data num_epochs times.
Args:
train: Selects between the training (True) and validation (False) data.
batch_size: Number of examples per returned batch.
num_epochs: Number of times to read the input data, or 0/None to
train forever.
Returns:
A tuple (images, labels), where:
* images is a float tensor with shape [batch_size, mnist.IMAGE_PIXELS]
in the range [-0.5, 0.5].
* labels is an int32 tensor with shape [batch_size] with the true label,
a number in the range [0, mnist.NUM_CLASSES).
Note that an tf.train.QueueRunner is added to the graph, which
must be run using e.g. tf.train.start_queue_runners().
"""
if not num_epochs: num_epochs = None
filename = os.path.join(FLAGS.train_dir,
TRAIN_FILE if train else VALIDATION_FILE)
with tf.name_scope('input'):
filename_queue = tf.train.string_input_producer(
[filename], num_epochs=num_epochs)
# Even when reading in multiple threads, share the filename
# queue.
food, happiness = read_and_decode(filename_queue)
# Shuffle the examples and collect them into batch_size batches.
# (Internally uses a RandomShuffleQueue.)
# We run this in two threads to avoid being a bottleneck.
foods, happinesses= tf.train.shuffle_batch(
[food, happiness], batch_size=batch_size, num_threads=2,
capacity=1000 + 3 * batch_size,
# Ensures a minimum amount of shuffling of examples.
min_after_dequeue=1000)
return foods, happinesses
def main(_):
with tf.Graph().as_default():
# Input images and labels.
foods, happinesses = inputs(train=True, batch_size=FLAGS.batch_size,
num_epochs=FLAGS.num_epochs)
HIDDEN_UNITS = 4
INPUTS = 1
OUTPUTS = 1
weights_1 = tf.Variable(tf.truncated_normal([INPUTS, HIDDEN_UNITS]))
biases_1 = tf.Variable(tf.zeros([HIDDEN_UNITS]))
layer_1_outputs = tf.nn.sigmoid(tf.matmul(foods, weights_1) + biases_1)
weights_2 = tf.Variable(tf.truncated_normal([HIDDEN_UNITS, OUTPUTS]))
biases_2 = tf.Variable(tf.zeros([OUTPUTS]))
logits = tf.nn.sigmoid(tf.matmul(layer_1_outputs, weights_2) + biases_2)
#loss = tf.reduce_mean(logits)
labels = tf.to_int64(happinesses)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits, name='xentropy')
#loss = tf.reduce_mean(cross_entropy, name='xentropy_mean')
loss = tf.reduce_sum(tf.sub(logits, happinesses))
learning_rate = 0.01
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
train_op = optimizer.minimize(loss)
init_op = tf.group(tf.global_variables_initializer(),
tf.local_variables_initializer())
sess = tf.Session()
sess.run(init_op)
print('staring iteration', 0)
_, loss = sess.run([train_op, loss])
print(loss)
sess.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--learning_rate',
type=float,
default=0.01,
help='Initial learning rate.'
)
parser.add_argument(
'--num_epochs',
type=int,
default=2,
help='Number of epochs to run trainer.'
)
parser.add_argument(
'--hidden1',
type=int,
default=128,
help='Number of units in hidden layer 1.'
)
parser.add_argument(
'--hidden2',
type=int,
default=32,
help='Number of units in hidden layer 2.'
)
parser.add_argument(
'--batch_size',
type=int,
default=100,
help='Batch size.'
)
parser.add_argument(
'--train_dir',
type=str,
default='.',
help='Directory with the training data.'
)
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
You must call tf.train.start_queue_runners to populate the queue before you call run or eval to execute reading files. Otherwise, reading will block while it waits for filenames from the queue. Pleases check run_training method from the original example, or tensorflow's documentation about how_tos/reading_data.