Keras model conflicting with multiprocessing - python

I encountered a weird issue when I was trying to run 2 class methods concurrently in a third method. After eliminating large chunks of code, one at a time, I ended up with the example below.
Notes:
I must have a model as a class attribute, I cannot change that.
I need both tasks to run concurrently and I cannot get these 2 tasks out of the class because they interact with other class members
I get the same error using multiprocessing.Process(), so that's not going to fix the problem.
from concurrent.futures import ProcessPoolExecutor, as_completed
from tensorflow.keras.models import Model
class Example:
def __init__(self):
self.model = Model()
# comment out the line above and uncomment the line below, the error is gone
# self.model = None
def task1(self):
pass
def task2(self):
pass
def process(
self,
):
with ProcessPoolExecutor(2) as executor:
future_items = [
executor.submit(self.task1),
executor.submit(self.task2),
]
results = [
future_item.result() for future_item in as_completed(future_items)
]
print(results)
if __name__ == '__main__':
ex = Example()
ex.process()
Result:
2021-01-10 08:10:04.315386: I tensorflow/compiler/jit/xla_cpu_device.cc:41] Not creating XLA devices, tf_xla_enable_xla_devices not set
2021-01-10 08:10:04.315897: I tensorflow/core/platform/cpu_feature_guard.cc:142] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX2 FMA
To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.
concurrent.futures.process._RemoteTraceback:
"""
Traceback (most recent call last):
File "/usr/local/Cellar/python#3.8/3.8.7/Frameworks/Python.framework/Versions/3.8/lib/python3.8/multiprocessing/queues.py", line 239, in _feed
obj = _ForkingPickler.dumps(obj)
File "/usr/local/Cellar/python#3.8/3.8.7/Frameworks/Python.framework/Versions/3.8/lib/python3.8/multiprocessing/reduction.py", line 51, in dumps
cls(buf, protocol).dump(obj)
TypeError: cannot pickle 'weakref' object
"""
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/Users/emadboctor/Desktop/code/drl-algos/scratch.py", line 34, in <module>
ex.process()
File "/Users/emadboctor/Desktop/code/drl-algos/scratch.py", line 26, in process
results = [
File "/Users/emadboctor/Desktop/code/drl-algos/scratch.py", line 27, in <listcomp>
future_item.result() for future_item in as_completed(future_items)
File "/usr/local/Cellar/python#3.8/3.8.7/Frameworks/Python.framework/Versions/3.8/lib/python3.8/concurrent/futures/_base.py", line 432, in result
return self.__get_result()
File "/usr/local/Cellar/python#3.8/3.8.7/Frameworks/Python.framework/Versions/3.8/lib/python3.8/concurrent/futures/_base.py", line 388, in __get_result
raise self._exception
File "/usr/local/Cellar/python#3.8/3.8.7/Frameworks/Python.framework/Versions/3.8/lib/python3.8/multiprocessing/queues.py", line 239, in _feed
obj = _ForkingPickler.dumps(obj)
File "/usr/local/Cellar/python#3.8/3.8.7/Frameworks/Python.framework/Versions/3.8/lib/python3.8/multiprocessing/reduction.py", line 51, in dumps
cls(buf, protocol).dump(obj)
TypeError: cannot pickle 'weakref' object

Related

Error trying to run Pytorch on multiple GPUs

I'm trying to create a script with what I thought was a fairly simple Producer/Consumer queue. I'm using this on a system with two A4000 GPUs. Below is the relevant code.
import torch
from torch.multiprocessing import Process, set_start_method, Queue
def main():
input_data_queue = Queue(25)
send_data_queue = Queue(5)
for i in range(torch.cuda.device_count()):
Process_Data(input_data_queue, send_data_queue, i)
....
class Process_Data:
def __init__(self, in_q, out_q, gpu_id):
self.in_queue = in_q
self.out_queue = out_q
self.gpu_id = gpu_id
self.model = torch.hub.load('ultralytics/yolov5', 'custom', path='best.pt').to(torch.device(self.gpu_id))
self.model.eval()
....
if __name__ == "__main__":
set_start_method('spawn')
main()
I always get the error:
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "/usr/lib/python3.8/multiprocessing/spawn.py", line 116, in spawn_main
exitcode = _main(fd, parent_sentinel)
File "/usr/lib/python3.8/multiprocessing/spawn.py", line 126, in _main
self = reduction.pickle.load(from_parent)
File "/usr/local/lib/python3.8/dist-packages/torch/multiprocessing/reductions.py", line 111, in rebuild_cuda_tensor
storage = storage_cls._new_shared_cuda(
File "/usr/local/lib/python3.8/dist-packages/torch/storage.py", line 630, in _new_shared_cuda
return eval(cls.__module__)._UntypedStorage._new_shared_cuda(*args, **kwargs)
RuntimeError: CUDA error: invalid device ordinal
CUDA kernel errors might be asynchronously reported at some other API call,so the stacktrace below might be incorrect.
For debugging consider passing CUDA_LAUNCH_BLOCKING=1.
For calling the device, I've tried:
First creating a model with torch.device(0), then another class with torch.device(1)
torch.device("cuda:0") then torch.device("cuda:1")
torch.device("cuda") then torch.device("cuda")
torch.device("cuda", 0), then torch.device("cuda",1)
All variations I can find documented give the same error.
How can I get two models, running on two GPUs, sharing a work queue?

How can you use easyocr with multiprocessing?

I tried to read text on images with easyocr on python, and I want to run it separately so it doesn't hold back other parts of the code. But when I call the function inside a multiprocessing loop, I get a notimplemented error. Here is an example of code.
import multiprocessing as mp
import easyocr
import cv2
def ocr_test(q, reader):
while not q.empty():
q.get()
img = cv2.imread('unknown.png')
result = reader.readtext(img)
if __name__ == '__main__':
q = mp.Queue()
reader = easyocr.Reader(['en'])
p = mp.Process(target=ocr_test, args=(q,reader))
p.start()
q.put('start')
p.join()
and this is the error I get.
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "C:\Program Files\Python310\lib\multiprocessing\spawn.py", line 116, in spawn_main
exitcode = _main(fd, parent_sentinel)
File "C:\Program Files\Python310\lib\multiprocessing\spawn.py", line 126, in _main
self = reduction.pickle.load(from_parent)
File "C:\Python\venv\lib\site-packages\torch\multiprocessing\reductions.py", line 90, in rebuild_tensor
t = torch._utils._rebuild_tensor(storage, storage_offset, size, stride)
File "C:\Python\venv\lib\site-packages\torch\_utils.py", line 134, in _rebuild_tensor
t = torch.tensor([], dtype=storage.dtype, device=storage._untyped().device)
NotImplementedError: Could not run 'aten::empty.memory_format' with arguments from the 'QuantizedCPU' backend. This could be because the operator doesn't exist for this backend, or was omitted during the selective/custom build process (if using custom build). If you are a Facebook employee using PyTorch on mobile, please visit https://fburl.com/ptmfixes for possible resolutions. 'aten::empty.memory_format' is only available for these backends: [CPU, Meta, MkldnnCPU, SparseCPU, SparseCsrCPU, BackendSelect, Python, Named, Conjugate, Negative, ZeroTensor, ADInplaceOrView, AutogradOther, AutogradCPU, AutogradCUDA, AutogradXLA, AutogradLazy, AutogradXPU, AutogradMLC, AutogradHPU, AutogradNestedTensor, AutogradPrivateUse1, AutogradPrivateUse2, AutogradPrivateUse3, Tracer, AutocastCPU, Autocast, Batched, VmapMode, Functionalize].
Is there a way to solve this problem?

tensorflow_hub throwing this error: 'SentencepieceOp' when loading the link

The following line of code I am trying to run in PyCharm and I have tensorflow_hub installed and imported.
use = hub.load("https://tfhub.dev/google/universal-sentence-encoder-multilingual-large/3")
Any suggestions for the below error? As I need this for my project.
Traceback (most recent call last):
File "C:\Users\Jon10\miniconda3\envs\tensorflow\lib\site-packages\tensorflow_core\python\framework\ops.py", line 3820, in _get_op_def
return self._op_def_cache[type]
KeyError: 'SentencepieceOp'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "C:/Users/Jon10/OneDrive/Documents/Computer Science/Dissertation/PythonPractice/TFTest/test.py", line 28, in <module>
use = hub.load("https://tfhub.dev/google/universal-sentence-encoder-multilingual-large/3")
File "C:\Users\Jon10\miniconda3\envs\tensorflow\lib\site-packages\tensorflow_hub\module_v2.py", line 102, in load
obj = tf_v1.saved_model.load_v2(module_path, tags=tags)
File "C:\Users\Jon10\miniconda3\envs\tensorflow\lib\site-packages\tensorflow_core\python\saved_model\load.py", line 517, in load
return load_internal(export_dir, tags)
File "C:\Users\Jon10\miniconda3\envs\tensorflow\lib\site-packages\tensorflow_core\python\saved_model\load.py", line 541, in load_internal
export_dir)
File "C:\Users\Jon10\miniconda3\envs\tensorflow\lib\site-packages\tensorflow_core\python\saved_model\load.py", line 114, in __init__
meta_graph.graph_def.library))
File "C:\Users\Jon10\miniconda3\envs\tensorflow\lib\site-packages\tensorflow_core\python\saved_model\function_deserialization.py", line 312, in load_function_def_library
copy, copy_functions=False)
File "C:\Users\Jon10\miniconda3\envs\tensorflow\lib\site-packages\tensorflow_core\python\framework\function_def_to_graph.py", line 61, in function_def_to_graph
fdef, input_shapes, copy_functions)
File "C:\Users\Jon10\miniconda3\envs\tensorflow\lib\site-packages\tensorflow_core\python\framework\function_def_to_graph.py", line 214, in function_def_to_graph_def
op_def = ops.get_default_graph()._get_op_def(node_def.op) # pylint: disable=protected-access
File "C:\Users\Jon10\miniconda3\envs\tensorflow\lib\site-packages\tensorflow_core\python\framework\ops.py", line 3824, in _get_op_def
c_api.TF_GraphGetOpDef(self._c_graph, compat.as_bytes(type), buf)
tensorflow.python.framework.errors_impl.NotFoundError: Op type not registered 'SentencepieceOp' in binary running on DESKTOP-..... Make sure the Op and Kernel are registered in the binary running in this process. Note that if you are loading a saved graph which used ops from tf.contrib, accessing (e.g.) `tf.contrib.resampler` should be done before importing the graph, as contrib ops are lazily registered when the module is first accessed.
You need to install tensorflow_text, and import it before using hub.load

Error pickling a `matlab` object in joblib `Parallel` context

I'm running some Matlab code in parallel from inside a Python context (I know, but that's what's going on), and I'm hitting an import error involving matlab.double. The same code works fine in a multiprocessing.Pool, so I am having trouble figuring out what the problem is. Here's a minimal reproducing test case.
import matlab
from multiprocessing import Pool
from joblib import Parallel, delayed
# A global object that I would like to be available in the parallel subroutine
x = matlab.double([[0.0]])
def f(i):
print(i, x)
with Pool(4) as p:
p.map(f, range(10))
# This prints 1, [[0.0]]\n2, [[0.0]]\n... as expected
for _ in Parallel(4, backend='multiprocessing')(delayed(f)(i) for i in range(10)):
pass
# This also prints 1, [[0.0]]\n2, [[0.0]]\n... as expected
# Now run with default `backend='loky'`
for _ in Parallel(4)(delayed(f)(i) for i in range(10)):
pass
# ^ this crashes.
So, the only problematic one is the one using the 'loky' backend.
The full traceback is:
exception calling callback for <Future at 0x7f63b5a57358 state=finished raised BrokenProcessPool>
joblib.externals.loky.process_executor._RemoteTraceback:
'''
Traceback (most recent call last):
File "~/miniconda3/envs/myenv/lib/python3.6/site-packages/joblib/externals/loky/process_executor.py", line 391, in _process_worker
call_item = call_queue.get(block=True, timeout=timeout)
File "~/miniconda3/envs/myenv/lib/python3.6/multiprocessing/queues.py", line 113, in get
return _ForkingPickler.loads(res)
File "~/miniconda3/envs/myenv/lib/python3.6/site-packages/matlab/mlarray.py", line 31, in <module>
from _internal.mlarray_sequence import _MLArrayMetaClass
File "~/miniconda3/envs/myenv/lib/python3.6/site-packages/matlab/_internal/mlarray_sequence.py", line 3, in <module>
from _internal.mlarray_utils import _get_strides, _get_size, \
File "~/miniconda3/envs/myenv/lib/python3.6/site-packages/matlab/_internal/mlarray_utils.py", line 4, in <module>
import matlab
File "~/miniconda3/envs/myenv/lib/python3.6/site-packages/matlab/__init__.py", line 24, in <module>
from mlarray import double, single, uint8, int8, uint16, \
ImportError: cannot import name 'double'
'''
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "~/miniconda3/envs/myenv/lib/python3.6/site-packages/joblib/externals/loky/_base.py", line 625, in _invoke_callbacks
callback(self)
File "~/miniconda3/envs/myenv/lib/python3.6/site-packages/joblib/parallel.py", line 309, in __call__
self.parallel.dispatch_next()
File "~/miniconda3/envs/myenv/lib/python3.6/site-packages/joblib/parallel.py", line 731, in dispatch_next
if not self.dispatch_one_batch(self._original_iterator):
File "~/miniconda3/envs/myenv/lib/python3.6/site-packages/joblib/parallel.py", line 759, in dispatch_one_batch
self._dispatch(tasks)
File "~/miniconda3/envs/myenv/lib/python3.6/site-packages/joblib/parallel.py", line 716, in _dispatch
job = self._backend.apply_async(batch, callback=cb)
File "~/miniconda3/envs/myenv/lib/python3.6/site-packages/joblib/_parallel_backends.py", line 510, in apply_async
future = self._workers.submit(SafeFunction(func))
File "~/miniconda3/envs/myenv/lib/python3.6/site-packages/joblib/externals/loky/reusable_executor.py", line 151, in submit
fn, *args, **kwargs)
File "~/miniconda3/envs/myenv/lib/python3.6/site-packages/joblib/externals/loky/process_executor.py", line 1022, in submit
raise self._flags.broken
joblib.externals.loky.process_executor.BrokenProcessPool: A task has failed to un-serialize. Please ensure that the arguments of the function are all picklable.
joblib.externals.loky.process_executor._RemoteTraceback:
'''
Traceback (most recent call last):
File "~/miniconda3/envs/myenv/lib/python3.6/site-packages/joblib/externals/loky/process_executor.py", line 391, in _process_worker
call_item = call_queue.get(block=True, timeout=timeout)
File "~/miniconda3/envs/myenv/lib/python3.6/multiprocessing/queues.py", line 113, in get
return _ForkingPickler.loads(res)
File "~/miniconda3/envs/myenv/lib/python3.6/site-packages/matlab/mlarray.py", line 31, in <module>
from _internal.mlarray_sequence import _MLArrayMetaClass
File "~/miniconda3/envs/myenv/lib/python3.6/site-packages/matlab/_internal/mlarray_sequence.py", line 3, in <module>
from _internal.mlarray_utils import _get_strides, _get_size, \
File "~/miniconda3/envs/myenv/lib/python3.6/site-packages/matlab/_internal/mlarray_utils.py", line 4, in <module>
import matlab
File "~/miniconda3/envs/myenv/lib/python3.6/site-packages/matlab/__init__.py", line 24, in <module>
from mlarray import double, single, uint8, int8, uint16, \
ImportError: cannot import name 'double'
'''
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "test.py", line 20, in <module>
for _ in Parallel(4)(delayed(f)(i) for i in range(10)):
File "~/miniconda3/envs/myenv/lib/python3.6/site-packages/joblib/parallel.py", line 934, in __call__
self.retrieve()
File "~/miniconda3/envs/myenv/lib/python3.6/site-packages/joblib/parallel.py", line 833, in retrieve
self._output.extend(job.get(timeout=self.timeout))
File "~/miniconda3/envs/myenv/lib/python3.6/site-packages/joblib/_parallel_backends.py", line 521, in wrap_future_result
return future.result(timeout=timeout)
File "~/miniconda3/envs/myenv/lib/python3.6/concurrent/futures/_base.py", line 432, in result
return self.__get_result()
File "~/miniconda3/envs/myenv/lib/python3.6/concurrent/futures/_base.py", line 384, in __get_result
raise self._exception
File "~/miniconda3/envs/myenv/lib/python3.6/site-packages/joblib/externals/loky/_base.py", line 625, in _invoke_callbacks
callback(self)
File "~/miniconda3/envs/myenv/lib/python3.6/site-packages/joblib/parallel.py", line 309, in __call__
self.parallel.dispatch_next()
File "~/miniconda3/envs/myenv/lib/python3.6/site-packages/joblib/parallel.py", line 731, in dispatch_next
if not self.dispatch_one_batch(self._original_iterator):
File "~/miniconda3/envs/myenv/lib/python3.6/site-packages/joblib/parallel.py", line 759, in dispatch_one_batch
self._dispatch(tasks)
File "~/miniconda3/envs/myenv/lib/python3.6/site-packages/joblib/parallel.py", line 716, in _dispatch
job = self._backend.apply_async(batch, callback=cb)
File "~/miniconda3/envs/myenv/lib/python3.6/site-packages/joblib/_parallel_backends.py", line 510, in apply_async
future = self._workers.submit(SafeFunction(func))
File "~/miniconda3/envs/myenv/lib/python3.6/site-packages/joblib/externals/loky/reusable_executor.py", line 151, in submit
fn, *args, **kwargs)
File "~/miniconda3/envs/myenv/lib/python3.6/site-packages/joblib/externals/loky/process_executor.py", line 1022, in submit
raise self._flags.broken
joblib.externals.loky.process_executor.BrokenProcessPool: A task has failed to un-serialize. Please ensure that the arguments of the function are all picklable.
Looking at the traceback, it seems like the root cause is an issue importing the matlab package in the child process.
It's probably worth noting that this all runs just fine if instead I had defined x = np.array([[0.0]]) (after importing numpy as np). And of course the main process has no problem with any matlab imports, so I am not sure why the child process would.
I'm not sure if this error has anything in particular to do with the matlab package, or if it's something to do with global variables and cloudpickle or loky. In my application it would help to stick with loky, so I'd appreciate any insight!
I should also note that I'm using the official Matlab engine for Python: https://www.mathworks.com/help/matlab/matlab-engine-for-python.html. I suppose that might make it hard for others to try out the test cases, so I wish I could reproduce this error with a type other than matlab.double, but I haven't found another yet.
Digging around more, I've noticed that the process of importing the matlab package is more circular than I would expect, and I'm speculating that this could be part of the problem? The issue is that when import matlab is run by loky's _ForkingPickler, first some file matlab/mlarray.py is imported, which imports some other files, one of which contains import matlab, and this causes matlab/__init__.py to be run, which internally has from mlarray import double, single, uint8, ... which is the line that causes the crash.
Could this circularity be the issue? If so, why can I import this module in the main process but not in the loky backend?
The error is caused by incorrect loading order of global objects in the child processes. It can be seen clearly in the traceback
_ForkingPickler.loads(res) -> ... -> import matlab -> from mlarray import ...
that matlab is not yet imported when the global variable x is loaded by cloudpickle.
joblib with loky seems to treat modules as normal global objects and send them dynamically to the child processes. joblib doesn't record the order in which those objects/modules were defined. Therefore they are loaded (initialized) in a random order in the child processes.
A simple workaround is to manually pickle the matlab object and load it after importing matlab inside your function.
import matlab
import pickle
px = pickle.dumps(matlab.double([[0.0]]))
def f(i):
import matlab
x=pickle.loads(px)
print(i, x)
Of course you can also use the joblib.dumps and loads to serialize the objects.
Use initializer
Thanks to the suggestion of #Aaron, you can also use an initializer (for loky) to import Matlab before loading x.
Currently there's no simple API to specify initializer. So I wrote a simple function:
def with_initializer(self, f_init):
# Overwrite initializer hook in the Loky ProcessPoolExecutor
# https://github.com/tomMoral/loky/blob/f4739e123acb711781e46581d5ed31ed8201c7a9/loky/process_executor.py#L850
hasattr(self._backend, '_workers') or self.__enter__()
origin_init = self._backend._workers._initializer
def new_init():
origin_init()
f_init()
self._backend._workers._initializer = new_init if callable(origin_init) else f_init
return self
It is a little bit hacky but works well with the current version of joblib and loky.
Then you can use it like:
import matlab
from joblib import Parallel, delayed
x = matlab.double([[0.0]])
def f(i):
print(i, x)
def _init_matlab():
import matlab
with Parallel(4) as p:
for _ in with_initializer(p, _init_matlab)(delayed(f)(i) for i in range(10)):
pass
I hope the developers of joblib will add initializer argument to the constructor of Parallel in the future.

ML Engine: Prediction Error while executing local predict command

I have uploaded a version of the model in the Google ML Engine with saved_model.pb and a variables folder. When I try to execute the command:
gcloud ml-engine local predict --model-dir=saved_model --json-instances=request.json
It shows the following error:
ERROR: (gcloud.ml-engine.local.predict) 2018-09-11 19:06:39.770396: I tensorflow/core/platform/cpu_feature_guard.cc:141]
Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2 FMA
Traceback (most recent call last):
File "lib/googlecloudsdk/command_lib/ml_engine/local_predict.py", line 172, in <module>
main()
File "lib/googlecloudsdk/command_lib/ml_engine/local_predict.py", line 167, in main
signature_name=args.signature_name)
File "/usr/lib/google-cloud-sdk/lib/third_party/ml_sdk/cloud/ml/prediction/prediction_lib.py", line 106, in local_predict
predictions = model.predict(instances, signature_name=signature_name)
File "/usr/lib/google-cloud-sdk/lib/third_party/ml_sdk/cloud/ml/prediction/prediction_utils.py", line 230, in predict
preprocessed = self.preprocess(instances, stats=stats, **kwargs)
File "/usr/lib/google-cloud-sdk/lib/third_party/ml_sdk/cloud/ml/prediction/frameworks/tf_prediction_lib.py", line 436, in preprocess
preprocessed = self._canonicalize_input(instances, signature)
File "/usr/lib/google-cloud-sdk/lib/third_party/ml_sdk/cloud/ml/prediction/frameworks/tf_prediction_lib.py", line 453, in _canonicalize_input
return canonicalize_single_tensor_input(instances, tensor_name)
File "/usr/lib/google-cloud-sdk/lib/third_party/ml_sdk/cloud/ml/prediction/frameworks/tf_prediction_lib.py", line 166, in canonicalize_single_tensor_input
instances = [parse_single_tensor(x, tensor_name) for x in instances]
File "/usr/lib/google-cloud-sdk/lib/third_party/ml_sdk/cloud/ml/prediction/frameworks/tf_prediction_lib.py", line 162, in parse_single_tensor
(tensor_name, list(x.keys())))
cloud.ml.prediction.prediction_utils.PredictionError: Invalid inputs: Expected tensor name: inputs, got tensor name: [u'inputs', u'key']. (Error code: 1)
My request.json file is
{"inputs": {"b64": "/9j/4AAQSkZJRgABAQAAAQABAAD/2wBDAAgGBgcGBQgHBwcJCQgKDBQNDAsLDBkSEw8UHRofHh0aHBwgJC4nICIsIxwcKDcpLDAxNDQ0Hyc5PTgyPC4zNDL/2wBDAQkJCQwLDBgNDRgyIRwhMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjL/wAARCAHVArwDASIAAhEBAxEB/8QAHwAAAQUBAQEBA....."}, "key": "841bananas.jpg"}
Thanks in advance.
It appears your model was exported with only one input named "inputs". In that case, you shouldn't be sending "key" in the JSON, i.e., (scroll to the end to see I've removed "keys"):
{"inputs": {"b64": "/9j/4AAQSkZJRgABAQAAAQABAAD/2wBDAAgGBgcGBQgHBwcJCQgKDBQNDAsLDBkSEw8UHRofHh0aHBwgJC4nICIsIxwcKDcpLDAxNDQ0Hyc5PTgyPC4zNDL/2wBDAQkJCQwLDBgNDRgyIRwhMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjL/wAARCAHVArwDASIAAhEBAxEB/8QAHwAAAQUBAQEBA....."}}

Categories

Resources