__slots__ does not reduce memory - python

I have the following class:
class DataGenerator(keras.utils.Sequence):
__slots__ = 'path','batch_size','dim','mode','split_proportion','indices','processes'
def __init__(self, path: str,
batch_size: int,
dim: Tuple[int] = (12,86,98),
mode: str = 'train',
split_proportion: float = None,
indices: List[List[int]] = None,
processes: Optional[int] = None):
self._path = path
self._mode = mode
self._split_proportion = split_proportion
self._k_indices = indices
self._dim = dim
self._batch_size = batch_size
self._processes = processes
# Change mode to retrieve from folders
if mode == 'validation':
mode = 'train'
self._im_path = os.path.join(self._path, mode,'image')
self._msk_path = os.path.join(self._path, mode,'mask')
If I instantiate it, it should not have the attribute dict since it contains slots. However:
training_data = DataGenerator(path = '/path', batch_size = 1,
mode = 'train', split_proportion = 0.1)
training_data.__dict__
{'_path': '/path',
'_mode': 'train',
'_split_proportion': 0.1,
'_k_indices': None,
'_dim': (12, 86, 98),
'_batch_size': 1,
'_processes': None,
'_im_path': '/path/train/image',
'_msk_path': '/path/train/mask'}
Additionally, if I check memory requirements, they seem to be higher than without the slots.
# with __slots__
sys.getsizeof(training_data)
112
sys.getsizeof(training_data.__dict__)
152
# without __slots__
sys.getsizeof(training_data)
56
sys.getsizeof(training_data.__dict__)
152

Related

Detectron model giving different results for different machines (constant seed)

My training script for the model:
seed = 42
import random
import os
import numpy as np
def seed_everything(seed):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True
seed_everything(seed)
from detectron2.engine import DefaultTrainer
from detectron2.config import get_cfg
from detectron2.data.catalog import Metadata
import os
cfg = get_cfg()
cfg.merge_from_file(model_zoo.get_config_file("COCO-Detection/faster_rcnn_R_50_FPN_3x.yaml"))
cfg.DATASETS.TRAIN = ("experiment",)
cfg.DATASETS.TEST = ("test",)
cfg.DATALOADER.NUM_WORKERS = 2
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.7
cfg.MODEL.DEVICE = "cuda"
cfg.SOLVER.IMS_PER_BATCH = 2
num_gpu = 1
bs = (num_gpu * 2)
cfg.SOLVER.BASE_LR = 0.02 * bs / 16
cfg.SOLVER.MAX_ITER = 7500
cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 128
cfg.MODEL.ROI_HEADS.NUM_CLASSES = 4
os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)
trainer = DefaultTrainer(cfg)
trainer.resume_or_load(resume=False)
trainer.train()
My inference script on server-1 is:
import cv2
cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, "model_final.pth")
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.7
cfg.SEED = 42
predictor = DefaultPredictor(cfg)
img = cv2.imread('filename.jpg')
outputs = predictor(img)
print(outputs["instances"])
pred_classes = outputs['instances'].pred_classes.tolist()
classes = ["Handwritten", "Logo", "Markings", "Signature"]
for pred_class in pred_classes:
print('*'*10)
print(classes[pred_class])
print('*'*10)
if any(classes[pred_class] == "Handwritten" for pred_class in pred_classes):
print(True)
else:
print(False)
My inference script on server-2 is:
class Handwritten:
"""
Detects a list of handwritten pages in a PDF chart.
Attributes
----------
path_of_model : str
Path where the trained model is stored.
path_of_weights : str
Path where the weights file is stored.
"""
def __init__(self, path_of_weights: str) -> None:
"""Initialize Handwritten class.
Parameters
----------
path_of_model : str
path_of_weights : str
"""
self.cfg = get_cfg()
self.cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.7
self.cfg.MODEL.ROI_HEADS.NUM_CLASSES = 4
self.cfg.MODEL.WEIGHTS = path_of_weights
self.cfg.MODEL.DEVICE = "cpu"
self.cfg.SEED = 42
self.predictor = DefaultPredictor(self.cfg)
self.metadata = Metadata()
self.metadata.set(
thing_classes=["Handwritten", "Logo", "Markings", "Signature"],
thing_dataset_id_to_contiguous_id={0: 0, 1: 1, 2: 2, 3: 3},
)
def __call__(self, img: Any) -> Any:
"""Return the predicted output classes for the image."""
self.outputs = self.predictor(img)
return self.outputs["instances"]
def detect_hw(self, image: Any) -> bool:
"""Detect handwritten dx entity in image and if present then classifies it as hw page.
Parameters
----------
image : Matrix
.Image matrix of a page.
Return
-------
True/False : bool
Boolean value that states if the page is handwritten or not.
"""
outputs = self.__call__(image)
pred_classes = outputs.pred_classes.tolist()
classes = ["Handwritten", "Logo", "Markings", "Signature"]
if any(classes[pred_class] == "Handwritten" for pred_class in pred_classes):
return True
else:
return False
app = FastAPI()
path_of_weights = "model/model_final.pth"
model = Handwritten(path_of_weights)
#app.post("/cv/predict", status_code=200)
def predict(
page_no: int = Form(...), dimensions: list = Form(...), image: UploadFile = File(...)
) -> Dict[str, int]:
"""Predicts if image is handwritten page or not.
Parameters
----------
page_no : Page number of the given input page
dimensions : Height and width of the page
image : Image of the page as bytestream
"""
image_bytes = image.file.read()
decoded_image = cv2.imdecode(np.frombuffer(image_bytes, np.uint8), -1)
height, width = int(dimensions[0]), int(dimensions[1])
prediction_time = time.time()
pg_image = cv2.resize(decoded_image, (height, width))
try:
# Check if page is handwritten
hw_result = model.detect_hw(pg_image)
# If handwritten, consider for output
if hw_result:
hw_pages = page_no
else:
hw_pages = -99
prediction_info = {
"hw_pages": hw_pages,
"prediction_time": prediction_time,
}
#_logger.info(f"prediction info: {prediction_info}")
except HTTPError as e:
do something
return {"hw_pages": hw_pages}
While the model keeps giving good results on server-1, it is somehow being very erratic in server-2. The weights and the seed is the same. Somehow, I am unable to understand this change in behavior in both of these scenarios.
The model is trained on server-1
Server-1 is g4dn.2xlarge. Server-2 is g4dn.xlarge
Is there something wrong which I am doing?

RuntimeError: Only Tensors created explicitly by the user (graph leaves) support the deepcopy protocol at the moment

Here is the error about deepcopy, how should I do it.
error:
target_encoder = copy.deepcopy(self.online_encoder)
RuntimeError: Only Tensors created explicitly by the user (graph leaves) support the deepcopy protocol at the moment
class Model(nn.Module):
def __init__(
self,
model, # byol
projection_size=256,
pred_size = 256,
projection_hidden_size=4096,
moving_average_decay=0.99,
use_momentum=True,
):
super(SSL, self).__init__()
self.online_encoder = Pre_model(model) # 256
self.use_momentum = use_momentum
self.target_encoder = None
self.target_ema_updater = EMA(moving_average_decay)
def _get_target_encoder(self):
target_encoder = copy.deepcopy(self.online_encoder)
set_requires_grad(target_encoder, False)
return target_encoder
def forward(self, x):
anchors = x['anchor'].cuda(non_blocking = True)
neighbors = x['neighbor'].cuda(non_blocking = True)
online_anchor_proj = self.online_encoder(anchors)
online_neighbor_proj = self.online_encoder(neighbors)
with torch.no_grad():
target_online = self._get_target_encoder() if self.use_momentum else self.online_encoder
target_anchor_proj= target_online(anchors)
target_neighbor_proj = target_online(neighbors)

Getting access to class's variables inside another class's def

I'm trying override a str method in Person() class:
'''class Person(object):
def __init__(self, Nose = None, Neck = None, RShoulder = None, RElbow = None, RWrist = None, LShoulder = None, LElbow = None, LWrist = None, MidHip = None, RHip = None, RKnee = None, RAnkle = None, LHip = None, LKnee = None, LAnkle = None, REye = None, LEye = None, REar = None, LEar = None, LBigToe = None, LSmallToe = None, LHeel = None, RBigToe = None, RSmallToe = None, RHeel = None):
self.Nose = Nose
self.Neck = Neck
self.RShoulder = RShoulder
self.RElbow = RElbow
self.RWrist = RWrist
self.LShoulder = LShoulder
self.LElbow = LElbow
self.LWrist = LWrist
self.MidHip = MidHip
self.RHip = RHip
self.RKnee = RKnee
self.RAnkle = RAnkle
self.LHip = LHip
self.LKnee = LKnee
self.LAnkle = LAnkle
self.REye = REye
self.LEye = LEye
self.REar = REar
self.LEar = LEar
self.LBigToe = LBigToe
self.LSmallToe = LSmallToe
self.LHeel = LHeel
self.RBigToe = RBigToe
self.RSmallToe = RSmallToe
self.RHeel = RHeel
def __str__(self):
return 'Nose = %s\nNeck = \n%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s'%(self.Nose,self.Neck,self.RShoulder,self.RElbow,self.RWrist,self.LShoulder,self.LElbow,self.LWrist,self.MidHip,self.RHip,self.RKnee,self.RAnkle,self.LHip,self.LKnee,self.LAnkle,self.REye,self.LEye,self.REar,self.LEar,self.LBigToe,self.LSmallToe,self.LHeel,self.RBigToe,self.RSmallToe,self.RHeel)'''
And I want to find more elegant way to return a string which will look like that:
Nose = something
Neck = something
...
...
...
Question: elegant way to return a string which will look like ...
You can use the built-in vars function to get the __dict__ of the class variable and format it using .format(... and .join(....
Reference:
vars([object])
Return the __dict__ attribute for a module, class, instance, or any other object with a __dict__ attribute.
.format(value[, format_spec])
Convert a value to a “formatted” representation, as controlled by a standard formatting syntax that is used by most built-in types: Format Specification Mini-Language.
<str>.join(iterable)
Return a string which is the concatenation of the strings in iterable.
class Person:
def __init__(self, **kwargs):
self.Nose = kwargs.get('Nose', None)
self.Neck = kwargs.get('Neck', None)
self.RShoulder = kwargs.get('RShoulder', None)
def __str__(self):
return '\n'.join(('{} = {}'
.format(k, v) for k, v in vars(self).items()))
p = Person(Nose=1, Neck=1)
print(p)
Output:
Nose = 1
Neck = 1
RShoulder = None
Tested with Python: 3.6

Error when referencing a class within its __init__ method

I am putting the (NMT Tensorflow code) inside a main class. The code base has two classes - 'Encoder' and 'Decoder'. They are referenced in their respective 'init' methods. However it raises an error - 'Undefined named Encoder'.
class TranslationModel(ModelBase):
pathToZip = tf.keras.utils.get_file('spa-eng.zip', origin='http://download.tensorflow.org/data/spa-eng.zip', extract=True)
pathToFile = os.path.dirname(pathToZip)+"/spa-eng/spa.txt"
def __init__(self,
batchSize = 64,
bufferSize = None,
numberOfBatches = None,
units = 1024,
vocabInputSize = None,
vocabTargetSize = None,
optimizer = tf.train.AdamOptimizer(),
dataSetPath = None,
inputTensor = None,
targetTensor = None,
inputLanguage = None,
targetLanguage = None,
maxLengthInput = None,
maxLengthTarget = None,
embeddingDimension = 256, *arg, **kwargs):
self.batchSize = 64
self.bufferSize = None
self.numberOfBatches = None
self.units = units
self.vocabInputSize = None
self.vocabTargetSize = None
self.optimizer = optimizer
self.dataSetPath = dataSetPath
self.targetTensor = targetTensor
self.inputTensor = inputTensor
self.inputLanguage = inputLanguage
self.targetLanguage = targetLanguage
self.maxLengthInput = maxLengthInput
self.maxLengthTarget = maxLengthTarget
self.embeddingDimension = embeddingDimension
super().__init__(*arg, **kwargs)
#OTHER FUNCTIONS HERE
class Encoder(tf.keras.Model):
def __init__(self, vocabSize, embeddingDimension, encoderUnits, batchSize):
super(Encoder, self).__init__() # Raises error - 'Undefined named Encoder'
#Other code here
class Decoder(tf.keras.Model):
def __init__(self, vocabSize, embeddingDimension, dec_units, batchSize):
super('Decoder', self).__init__() # Raises error - 'Undefined named Decoder'
## Other code
It's because when you have a class inside of another and you want to identify it, you should do it this way : OutterClass.InnerClass
it won't work if you just use : InnerClass
for your case it's TranslationModel.Encoder

python creating an object of a class

In the function read_train_sets() an empty class is created called DataSets. It has no methods or variables. An object called data_sets is then created.
My question is, is data_sets.train an object of the class DataSet().
Or are you creating a method called train() and setting it equal to an object of the DataSet() class.
Note that there are two classes called DataSet and DataSets in the code.
import cv2
import os
import glob
from sklearn.utils import shuffle
import numpy as np
def load_train(train_path, image_size, classes):
images = []
labels = []
img_names = []
cls = []
print('Going to read training images')
for fields in classes:
index = classes.index(fields)
print('Now going to read {} files (Index: {})'.format(fields, index))
path = os.path.join(train_path, fields, '*g')
files = glob.glob(path)
for fl in files:
image = cv2.imread(fl)
image = cv2.resize(image, (image_size, image_size),0,0, cv2.INTER_LINEAR)
image = image.astype(np.float32)
image = np.multiply(image, 1.0 / 255.0)
images.append(image)
label = np.zeros(len(classes))
label[index] = 1.0
labels.append(label)
flbase = os.path.basename(fl)
img_names.append(flbase)
cls.append(fields)
images = np.array(images)
labels = np.array(labels)
img_names = np.array(img_names)
cls = np.array(cls)
return images, labels, img_names, cls
class DataSet(object):
def __init__(self, images, labels, img_names, cls):
self._num_examples = images.shape[0]
self._images = images
self._labels = labels
self._img_names = img_names
self._cls = cls
self._epochs_done = 0
self._index_in_epoch = 0
#property
def images(self):
return self._images
#property
def labels(self):
return self._labels
#property
def img_names(self):
return self._img_names
#property
def cls(self):
return self._cls
#property
def num_examples(self):
return self._num_examples
#property
def epochs_done(self):
return self._epochs_done
def next_batch(self, batch_size):
"""Return the next `batch_size` examples from this data set."""
start = self._index_in_epoch
self._index_in_epoch += batch_size
if self._index_in_epoch > self._num_examples:
# After each epoch we update this
self._epochs_done += 1
start = 0
self._index_in_epoch = batch_size
assert batch_size <= self._num_examples
end = self._index_in_epoch
return self._images[start:end], self._labels[start:end], self._img_names[start:end], self._cls[start:end]
def read_train_sets(train_path, image_size, classes, validation_size):
class DataSets(object):
pass
data_sets = DataSets()
images, labels, img_names, cls = load_train(train_path, image_size, classes)
images, labels, img_names, cls = shuffle(images, labels, img_names, cls)
if isinstance(validation_size, float):
validation_size = int(validation_size * images.shape[0])
validation_images = images[:validation_size]
validation_labels = labels[:validation_size]
validation_img_names = img_names[:validation_size]
validation_cls = cls[:validation_size]
train_images = images[validation_size:]
train_labels = labels[validation_size:]
train_img_names = img_names[validation_size:]
train_cls = cls[validation_size:]
data_sets.train = DataSet(train_images, train_labels, train_img_names, train_cls)
data_sets.valid = DataSet(validation_images, validation_labels, validation_img_names, validation_cls)
return data_sets
You can dynamically assign attributes to your objects in Python. Try inserting hasattr(data_sets, 'train') which asks if data_sets has attribute train after you assign it and see what you get. Also you can call type(data_sets.train) and convince yourself that it is indeed of type DataSet.
data_sets.train = DataSet(train_images, train_labels, train_img_names, train_cls)
This is quite clear since we are assigning a Class object to the data_sets.train
With respect to data_sets object, train and validate will be 2 attributes to it. Hope this helps.

Categories

Resources