I'm in the process of serializing a large dataset of images with different resolutions. When I create the TFRecords, I store also the shape of the serialized image in this way:
def convert(folder_dirs, tfrecords_filename = '.tfrecords'):
with tf.python_io.TFRecordWriter(tfrecords_filename) as writer:
for ix, img_path in enumerate(folder_dirs):
data, annotation = read_image(img_path)
image_shape = list(data.shape)
img_raw = data.tostring()
annotation_raw = list(annotation)
example = tf.train.Example(features=tf.train.Features(feature={
'shape': _int64_feature(image_shape),
'image_raw': _bytes_feature(img_raw),
'label_raw': _int64_feature(annotation_raw)}))
serialized = example.SerializeToString()
writer.write(serialized)
My question is, how do I use the serialized shape in order to reshape the raw bytes of the images?
def imgs_input_fn(filenames, perform_shuffle=False, repeat_count=1, batch_size=1):
def _parse_function(serialized):
features = \
{
'shape': tf.FixedLenFeature([], tf.string),
'image_raw': tf.FixedLenFeature([], tf.string),
'label_raw': tf.FixedLenFeature([], tf.string)
}
parsed_example = tf.parse_single_example(serialized=serialized,
features=features)
shape = parsed_example['shape'] # <-- True image shape
image_raw = parsed_example['image_raw']
label = parsed_example['label_raw']
image = tf.io.decode_raw(image_raw, tf.uint16)
image = tf.reshape(image, [120, 120, 3]) # <-- Reshape needed
image = tf.cast(image, tf.float32)
d = dict(zip([input_name], [image])), label
return image, label
dataset = tf.data.TFRecordDataset(filenames=filenames)
dataset = dataset.map(_parse_function)
...
Have you tried numpy and scipy.ndimage
import numpy as np
import skimage.io
from scipy.ndimage import zoom
from skimage.transform import resize
try:
# Python3 will most likely not be able to load protobuf
from caffe.proto import caffe_pb2
except:
import sys
if sys.version_info >= (3, 0):
print("Failed to include caffe_pb2, things might go wrong!")
else:
raise
## proto / datum / ndarray conversion
def blobproto_to_array(blob, return_diff=False):
"""
Convert a blob proto to an array. In default, we will just return the data,
unless return_diff is True, in which case we will return the diff.
"""
# Read the data into an array
if return_diff:
data = np.array(blob.diff)
else:
data = np.array(blob.data)
# Reshape the array
if blob.HasField('num') or blob.HasField('channels') or blob.HasField('height') or blob.HasField('width'):
# Use legacy 4D shape
return data.reshape(blob.num, blob.channels, blob.height, blob.width)
else:
return data.reshape(blob.shape.dim)
def array_to_blobproto(arr, diff=None):
"""Converts a N-dimensional array to blob proto. If diff is given, also
convert the diff. You need to make sure that arr and diff have the same
shape, and this function does not do sanity check.
"""
blob = caffe_pb2.BlobProto()
blob.shape.dim.extend(arr.shape)
blob.data.extend(arr.astype(float).flat)
if diff is not None:
blob.diff.extend(diff.astype(float).flat)
return blob
def arraylist_to_blobprotovector_str(arraylist):
"""Converts a list of arrays to a serialized blobprotovec, which could be
then passed to a network for processing.
"""
vec = caffe_pb2.BlobProtoVector()
vec.blobs.extend([array_to_blobproto(arr) for arr in arraylist])
return vec.SerializeToString()
def blobprotovector_str_to_arraylist(str):
"""Converts a serialized blobprotovec to a list of arrays.
"""
vec = caffe_pb2.BlobProtoVector()
vec.ParseFromString(str)
return [blobproto_to_array(blob) for blob in vec.blobs]
def array_to_datum(arr, label=None):
"""Converts a 3-dimensional array to datum. If the array has dtype uint8,
the output data will be encoded as a string. Otherwise, the output data
will be stored in float format.
"""
if arr.ndim != 3:
raise ValueError('Incorrect array shape.')
datum = caffe_pb2.Datum()
datum.channels, datum.height, datum.width = arr.shape
if arr.dtype == np.uint8:
datum.data = arr.tostring()
else:
datum.float_data.extend(arr.flat)
if label is not None:
datum.label = label
return datum
def datum_to_array(datum):
"""Converts a datum to an array. Note that the label is not returned,
as one can easily get it by calling datum.label.
"""
if len(datum.data):
return np.fromstring(datum.data, dtype=np.uint8).reshape(
datum.channels, datum.height, datum.width)
else:
return np.array(datum.float_data).astype(float).reshape(
datum.channels, datum.height, datum.width)
## Pre-processing
class Transformer:
"""
Transform input for feeding into a Net.
Note: this is mostly for illustrative purposes and it is likely better
to define your own input preprocessing routine for your needs.
Parameters
----------
net : a Net for which the input should be prepared
"""
def __init__(self, inputs):
self.inputs = inputs
self.transpose = {}
self.channel_swap = {}
self.raw_scale = {}
self.mean = {}
self.input_scale = {}
def __check_input(self, in_):
if in_ not in self.inputs:
raise Exception('{} is not one of the net inputs: {}'.format(
in_, self.inputs))
def preprocess(self, in_, data):
"""
Format input for Caffe:
- convert to single
- resize to input dimensions (preserving number of channels)
- transpose dimensions to K x H x W
- reorder channels (for instance color to BGR)
- scale raw input (e.g. from [0, 1] to [0, 255] for ImageNet models)
- subtract mean
- scale feature
Parameters
----------
in_ : name of input blob to preprocess for
data : (H' x W' x K) ndarray
Returns
-------
caffe_in : (K x H x W) ndarray for input to a Net
"""
self.__check_input(in_)
caffe_in = data.astype(np.float32, copy=False)
transpose = self.transpose.get(in_)
channel_swap = self.channel_swap.get(in_)
raw_scale = self.raw_scale.get(in_)
mean = self.mean.get(in_)
input_scale = self.input_scale.get(in_)
in_dims = self.inputs[in_][2:]
if caffe_in.shape[:2] != in_dims:
caffe_in = resize_image(caffe_in, in_dims)
if transpose is not None:
caffe_in = caffe_in.transpose(transpose)
if channel_swap is not None:
caffe_in = caffe_in[channel_swap, :, :]
if raw_scale is not None:
caffe_in *= raw_scale
if mean is not None:
caffe_in -= mean
if input_scale is not None:
caffe_in *= input_scale
return caffe_in
def deprocess(self, in_, data):
"""
Invert Caffe formatting; see preprocess().
"""
self.__check_input(in_)
decaf_in = data.copy().squeeze()
transpose = self.transpose.get(in_)
channel_swap = self.channel_swap.get(in_)
raw_scale = self.raw_scale.get(in_)
mean = self.mean.get(in_)
input_scale = self.input_scale.get(in_)
if input_scale is not None:
decaf_in /= input_scale
if mean is not None:
decaf_in += mean
if raw_scale is not None:
decaf_in /= raw_scale
if channel_swap is not None:
decaf_in = decaf_in[np.argsort(channel_swap), :, :]
if transpose is not None:
decaf_in = decaf_in.transpose(np.argsort(transpose))
return decaf_in
def set_transpose(self, in_, order):
"""
Set the input channel order for e.g. RGB to BGR conversion
as needed for the reference ImageNet model.
Parameters
----------
in_ : which input to assign this channel order
order : the order to transpose the dimensions
"""
self.__check_input(in_)
if len(order) != len(self.inputs[in_]) - 1:
raise Exception('Transpose order needs to have the same number of '
'dimensions as the input.')
self.transpose[in_] = order
def set_channel_swap(self, in_, order):
"""
Set the input channel order for e.g. RGB to BGR conversion
as needed for the reference ImageNet model.
N.B. this assumes the channels are the first dimension AFTER transpose.
Parameters
----------
in_ : which input to assign this channel order
order : the order to take the channels.
(2,1,0) maps RGB to BGR for example.
"""
self.__check_input(in_)
if len(order) != self.inputs[in_][1]:
raise Exception('Channel swap needs to have the same number of '
'dimensions as the input channels.')
self.channel_swap[in_] = order
def set_raw_scale(self, in_, scale):
"""
Set the scale of raw features s.t. the input blob = input * scale.
While Python represents images in [0, 1], certain Caffe models
like CaffeNet and AlexNet represent images in [0, 255] so the raw_scale
of these models must be 255.
Parameters
----------
in_ : which input to assign this scale factor
scale : scale coefficient
"""
self.__check_input(in_)
self.raw_scale[in_] = scale
def set_mean(self, in_, mean):
"""
Set the mean to subtract for centering the data.
Parameters
----------
in_ : which input to assign this mean.
mean : mean ndarray (input dimensional or broadcastable)
"""
self.__check_input(in_)
ms = mean.shape
if mean.ndim == 1:
# broadcast channels
if ms[0] != self.inputs[in_][1]:
raise ValueError('Mean channels incompatible with input.')
mean = mean[:, np.newaxis, np.newaxis]
else:
# elementwise mean
if len(ms) == 2:
ms = (1,) + ms
if len(ms) != 3:
raise ValueError('Mean shape invalid')
if ms != self.inputs[in_][1:]:
raise ValueError('Mean shape incompatible with input shape.')
self.mean[in_] = mean
def set_input_scale(self, in_, scale):
"""
Set the scale of preprocessed inputs s.t. the blob = blob * scale.
N.B. input_scale is done AFTER mean subtraction and other preprocessing
while raw_scale is done BEFORE.
Parameters
----------
in_ : which input to assign this scale factor
scale : scale coefficient
"""
self.__check_input(in_)
self.input_scale[in_] = scale
## Image IO
def load_image(filename, color=True):
"""
Load an image converting from grayscale or alpha as needed.
Parameters
----------
filename : string
color : boolean
flag for color format. True (default) loads as RGB while False
loads as intensity (if image is already grayscale).
Returns
-------
image : an image with type np.float32 in range [0, 1]
of size (H x W x 3) in RGB or
of size (H x W x 1) in grayscale.
"""
img = skimage.img_as_float(skimage.io.imread(filename, as_grey=not color)).astype(np.float32)
if img.ndim == 2:
img = img[:, :, np.newaxis]
if color:
img = np.tile(img, (1, 1, 3))
elif img.shape[2] == 4:
img = img[:, :, :3]
return img
def resize_image(im, new_dims, interp_order=1):
"""
Resize an image array with interpolation.
Parameters
----------
im : (H x W x K) ndarray
new_dims : (height, width) tuple of new dimensions.
interp_order : interpolation order, default is linear.
Returns
-------
im : resized ndarray with shape (new_dims[0], new_dims[1], K)
"""
if im.shape[-1] == 1 or im.shape[-1] == 3:
im_min, im_max = im.min(), im.max()
if im_max > im_min:
# skimage is fast but only understands {1,3} channel images
# in [0, 1].
im_std = (im - im_min) / (im_max - im_min)
resized_std = resize(im_std, new_dims, order=interp_order)
resized_im = resized_std * (im_max - im_min) + im_min
else:
# the image is a constant -- avoid divide by 0
ret = np.empty((new_dims[0], new_dims[1], im.shape[-1]),
dtype=np.float32)
ret.fill(im_min)
return ret
else:
# ndimage interpolates anything but more slowly.
scale = tuple(np.array(new_dims, dtype=float) / np.array(im.shape[:2]))
resized_im = zoom(im, scale + (1,), order=interp_order)
return resized_im.astype(np.float32)
def oversample(images, crop_dims):
"""
Crop images into the four corners, center, and their mirrored versions.
Parameters
----------
image : iterable of (H x W x K) ndarrays
crop_dims : (height, width) tuple for the crops.
Returns
-------
crops : (10*N x H x W x K) ndarray of crops for number of inputs N.
"""
# Dimensions and center.
im_shape = np.array(images[0].shape)
crop_dims = np.array(crop_dims)
im_center = im_shape[:2] / 2.0
# Make crop coordinates
h_indices = (0, im_shape[0] - crop_dims[0])
w_indices = (0, im_shape[1] - crop_dims[1])
crops_ix = np.empty((5, 4), dtype=int)
curr = 0
for i in h_indices:
for j in w_indices:
crops_ix[curr] = (i, j, i + crop_dims[0], j + crop_dims[1])
curr += 1
crops_ix[4] = np.tile(im_center, (1, 2)) + np.concatenate([
-crop_dims / 2.0,
crop_dims / 2.0
])
crops_ix = np.tile(crops_ix, (2, 1))
# Extract crops
crops = np.empty((10 * len(images), crop_dims[0], crop_dims[1],
im_shape[-1]), dtype=np.float32)
ix = 0
for im in images:
for crop in crops_ix:
crops[ix] = im[crop[0]:crop[2], crop[1]:crop[3], :]
ix += 1
crops[ix-5:ix] = crops[ix-5:ix, :, ::-1, :] # flip for mirrors
return crops
Related
I'm trying to write the code of this paper paper for a university project. the idea is to insert an invisible watermark into a grayscale image, which can be extracted later to verify the image ownership.
This is the code I wrote for the watermark embedding process :
import pywt
import numpy as np
import cv2
from PIL import Image
from math import sqrt, log10
from scipy.fftpack import dct, idct
def Get_MSB_LSB_Watermark () : #Function that separates the watermark into MSB and LSB images
MSBs = []
LSBs = []
for i in range (len(Watermark)) :
binary = '{:0>8}'.format(str(bin(Watermark[i]))[2:])
MSB = (binary[0:4])
LSB = (binary[4:])
MSB = int(MSB, 2)
LSB = int(LSB,2)
MSBs.append(MSB)
LSBs.append(LSB)
MSBs = np.array(MSBs)
LSBs = np.array(LSBs)
return MSBs.reshape(64,64), LSBs.reshape(64,64)
def split(array, nrows, ncols): #Split array into blocks of size nrows* ncols
r, h = array.shape
return (array.reshape(h//nrows, nrows, -1, ncols)
.swapaxes(1, 2)
.reshape(-1, nrows, ncols))
def unblockshaped(arr, h, w): #the inverse of the split function
n, nrows, ncols = arr.shape
return (arr.reshape(h//nrows, -1, nrows, ncols)
.swapaxes(1,2)
.reshape(h, w))
def ISVD (U,S,V): #the inverse of singular value decomposition
s = np.zeros(np.shape(U))
for i in range(4):
s[i, i] = S[i]
recon_image = U # s # V
return recon_image
def Watermark_Embedding (blocks, watermark) :
Watermarked_blocks = []
k1 = []
k2 = []
#convert the watermark to a list
w = list(np.ndarray.flatten(watermark))
for i in range (len(blocks)) :
B = blocks[i]
#Aplly singular value decoposition to the block
U, s, V = np.linalg.svd(B)
#Modify the singular values of the block
P = s[1] - s[2]
delta = abs(w[i]) - P
s[1] = s[1] + delta
if s[0] >= s[1] :
k1.append(1)
else :
k1.append(-1)
#the inverse of SVD after watermark embedding
recunstructed_B = ISVD(U, s, V)
Watermarked_blocks.append(recunstructed_B)
for j in range(len(w)):
if w[j] >= 0:
k2.append(1)
else:
k2.append(-1)
return k1,k2, np.array(Watermarked_blocks)
def apply_dct(image_array):
size = image_array[0].__len__()
all_subdct = np.empty((size, size))
for i in range (0, size, 4):
for j in range (0, size, 4):
subpixels = image_array[i:i+4, j:j+4]
subdct = dct(dct(subpixels.T, norm="ortho").T, norm="ortho")
all_subdct[i:i+4, j:j+4] = subdct
return all_subdct
def inverse_dct(all_subdct):
size = all_subdct[0].__len__()
all_subidct = np.empty((size, size))
for i in range (0, size, 4):
for j in range (0, size, 4):
subidct = idct(idct(all_subdct[i:i+4, j:j+4].T, norm="ortho").T, norm="ortho")
all_subidct[i:i+4, j:j+4] = subidct
return all_subidct
#read watermark
Watermark = Image.open('Copyright.png').convert('L')
Watermark = list(Watermark.getdata())
#Separate the watermark into LSB and MSB images
Watermark1, Watermark2 = Get_MSB_LSB_Watermark()
#Apply descrete cosine Transform on the two generated images
DCT_Watermark1 = apply_dct(Watermark1)
DCT_Watermark2 = apply_dct(Watermark2)
#read cover Image
Cover_Image = Image.open('10.png').convert('L')
#Apply 1 level descrete wavelet transform
LL1, (LH1, HL1, HH1) = pywt.dwt2(Cover_Image, 'haar')
#Split the LH1 and HL1 subbands into blocks of size 4*4
blocks_LH1 = split(LH1,4,4)
blocks_HL1 = split(HL1,4,4)
#Watermark Embedding in LH1 and HL1 and Keys generation
Key1, Key3, WatermarkedblocksLH1 = Watermark_Embedding(blocks_LH1,DCT_Watermark1)
Key2 ,Key4, WatermarkedblocksHL1 = Watermark_Embedding(blocks_HL1,DCT_Watermark2)
#Merge the watermzrked Blocks
reconstructed_LH1 = unblockshaped(WatermarkedblocksLH1, 256,256)
reconstructed_HL1 = unblockshaped(WatermarkedblocksHL1, 256,256)
#Apply the inverse of descrete wavelet transform to get the watermarked image
IDWT = pywt.idwt2((LL1, (reconstructed_LH1, reconstructed_HL1, HH1)), 'haar')
cv2.imwrite('Watermarked_img.png', IDWT)
This is the code I wrote for the Extraction process :
import pywt
from scipy import fftpack
import numpy as np
import cv2
from PIL import Image
import scipy
from math import sqrt, log10
from Watermark_Embedding import *
def Watermark_Extraction(blocks,key1, key2) :
Extracted_Watermark = []
for i in range(len(blocks)):
B = blocks[i]
#apply SVD on the Block
U, s, V = np.linalg.svd(B)
if key1[i] == 1 :
P = (s[1] - s[2])
Extracted_Watermark.append(P)
else :
P = (s[0] - s[2])
Extracted_Watermark.append(P)
for j in range(len(Extracted_Watermark)) :
if key2[j] == 1 :
Extracted_Watermark[j] = Extracted_Watermark[j]
else :
Extracted_Watermark[j] = - (Extracted_Watermark[j])
return np.array(Extracted_Watermark)
def Merge_W1_W2 ():
Merged_watermark = []
w1 = list(np.ndarray.flatten(IDCTW1))
w2 = list(np.ndarray.flatten(IDCTW2))
for i in range (len(w2)):
bw1 = '{:0>4}'.format((bin(int(abs(w1[i]))))[2:])
bw2 = '{:0>4}'.format((bin(int(abs(w2[i]))))[2:])
P = bw1+bw2
pixel = (int(P,2))
Merged_watermark.append(pixel)
return Merged_watermark
Watermarked_Image = Image.open('Watermarked_img.png')
LL1, (LH1, HL1, HH1) = pywt.dwt2(Watermarked_Image, 'haar')
blocks_LH1 = split(LH1,4,4)
blocks_HL1 = split(HL1,4,4)
W1 = Watermark_Extraction(blocks_LH1, Key1,Key3)
W2 = Watermark_Extraction(blocks_HL1, Key2, Key4)
W1 = W1.reshape(64,64)
W2 = W2.reshape(64,64)
IDCTW1 = inverse_dct(W1)
IDCTW2 = inverse_dct(W2)
Merged = np.array(Merge_W1_W2())
Merged = Merged.reshape(64,64)
cv2.imwrite('Extracted_Watermark.png', Merged)
The cover Image of size 512*512:
The 64*64 watermark I used
The watermarked Image :
The extracted Watermark I get:
I calculated the similarity between the two watermarks using SSIM :
from skimage.metrics import structural_similarity
original_Watermark = cv2.imread('Copyright.png')
extracted_watermark = cv2.imread('Extracted_Watermark.png')
# Convert images to grayscale
original_watermark = cv2.cvtColor(original_Watermark, cv2.COLOR_BGR2GRAY)
extracted_Watermark = cv2.cvtColor(extracted_watermark, cv2.COLOR_BGR2GRAY)
# Compute SSIM between two images
(score, diff) = structural_similarity(original_Watermark, extracted_Watermark, full=True)
print("SSIM = ", score)
I didn't apply any modification on the watermarked image and The SSIM I got is 0.8445354561524052. however the SSIM of the extracted watermark should be 0.99 according to the paper.
I don't know what's wrong with my code and I have a deadline after two days so I really need help.
thanks in advance.
There are two issues:
In Merge_W1_W2 you are using int to convert from float to int but that introduces errors for numbers where the floating point representation is not exact (e.g. 14.99999999999997); this can be fixed by using round instead.
Saving cv2.imwrite('Watermarked_img.png', IDWT) is a lossy operation because it rounds the values in IDWT to the nearest integer; if you use Watermarked_Image = IDWT then you will get back the exact same watermark image.
I'm struggling in creating a data generator in PyTorch to extract 2D images from many 3D cubes saved in .dat format
There is a total of 200 3D cubes each having a 128*128*128 shape. Now I want to extract 2D images from all of these cubes along length and breadth.
For example, a is a cube having size 128*128*128
So I want to extract all 2D images along length i.e., [:, i, :] which will get me 128 2D images along the length, and similarly i want to extract along width i.e., [:, :, i], which will give me 128 2D images along the width. So therefore i get a total of 256 2D images from 1 3D cube, and i want to repeat this whole process for all 200 cubes, there by giving me 51200 2D images.
So far I've tried a very basic implementation which is working fine but is taking approximately 10 minutes to run. I want you guys to help me create a more optimal implementation keeping in mind time and space complexity. Right now my current approach has a time complexity of O(n2), can we dec it further to reduce the time complexity
I'm providing below the current implementation
from os.path import join as pjoin
import torch
import numpy as np
import os
from tqdm import tqdm
from torch.utils import data
class DataGenerator(data.Dataset):
def __init__(self, is_transform=True, augmentations=None):
self.is_transform = is_transform
self.augmentations = augmentations
self.dim = (128, 128, 128)
seismicSections = [] #Input
faultSections = [] #Ground Truth
for fileName in tqdm(os.listdir(pjoin('train', 'seis')), total = len(os.listdir(pjoin('train', 'seis')))):
unrolledVolSeismic = np.fromfile(pjoin('train', 'seis', fileName), dtype = np.single) #dat file contains unrolled cube, we need to reshape it
reshapedVolSeismic = np.transpose(unrolledVolSeismic.reshape(self.dim)) #need to transpose the axis to get height axis at axis = 0, while length (axis = 1), and width(axis = 2)
unrolledVolFault = np.fromfile(pjoin('train', 'fault', fileName),dtype=np.single)
reshapedVolFault = np.transpose(unrolledVolFault.reshape(self.dim))
for idx in range(reshapedVolSeismic.shape[2]):
seismicSections.append(reshapedVolSeismic[:, :, idx])
faultSections.append(reshapedVolFault[:, :, idx])
for idx in range(reshapedVolSeismic.shape[1]):
seismicSections.append(reshapedVolSeismic[:, idx, :])
faultSections.append(reshapedVolFault[:, idx, :])
self.seismicSections = seismicSections
self.faultSections = faultSections
def __len__(self):
return len(self.seismicSections)
def __getitem__(self, index):
X = self.seismicSections[index]
Y = self.faultSections[index]
return X, Y
Please Help!!!
why not storing only the 3D data in mem, and let the __getitem__ method "slice" it on the fly?
class CachedVolumeDataset(Dataset):
def __init__(self, ...):
super(...)
self._volumes_x = # a list of 200 128x128x128 volumes
self._volumes_y = # a list of 200 128x128x128 volumes
def __len__(self):
return len(self._volumes_x) * (128 + 128)
def __getitem__(self, index):
# extract volume index from general index:
vidx = index // (128 + 128)
# extract slice index
sidx = index % (128 + 128)
if sidx < 128:
# first dim
x = self._volumes_x[vidx][:, :, sidx]
y = self._volumes_y[vidx][:, :, sidx]
else:
sidx -= 128
# second dim
x = self._volumes_x[vidx][:, sidx, :]
y = self._volumes_y[vidx][:, sidx, :]
return torch.squeeze(x), torch.squeeze(y)
I am starting with the pose estimation tflite model for getting keypoints on humans.
https://www.tensorflow.org/lite/models/pose_estimation/overview
I have started with fitting a single image or a person and invoking the model:
img = cv.imread('photos\standing\\3.jpg')
img = tf.reshape(tf.image.resize(img, [257,257]), [1,257,257,3])
model = tf.lite.Interpreter('models\posenet_mobilenet_v1_100_257x257_multi_kpt_stripped.tflite')
model.allocate_tensors()
input_details = model.get_input_details()
output_details = model.get_output_details()
floating_model = input_details[0]['dtype'] == np.float32
if floating_model:
img = (np.float32(img) - 127.5) / 127.5
model.set_tensor(input_details[0]['index'], img)
model.invoke()
output_data = model.get_tensor(output_details[0]['index'])# o()
offset_data = model.get_tensor(output_details[1]['index'])
results = np.squeeze(output_data)
offsets_results = np.squeeze(offset_data)
print("output shape: {}".format(output_data.shape))
np.savez('sample3.npz', results, offsets_results)
but I am struggling with parsing the output correctly to get the coordinates/confidences of each body part. Does anyone have a python example for interpreting this models results? (for example: using them to map keypoints back to the original image)
My code (a snippet from a class which essentially takes the np array directly from the model output):
def get_keypoints(self, data):
height, width, num_keypoints = data.shape
keypoints = []
for keypoint in range(0, num_keypoints):
maxval = data[0][0][keypoint]
maxrow = 0
maxcol = 0
for row in range(0, width):
for col in range(0,height):
if data[row][col][keypoint] > maxval:
maxrow = row
maxcol = col
maxval = data[row][col][keypoint]
keypoints.append(KeyPoint(keypoint, maxrow, maxcol, maxval))
# keypoints = [Keypoint(x,y,z) for x,y,z in ]
return keypoints
def get_image_coordinates_from_keypoints(self, offsets):
height, width, depth = (257,257,3)
# [(x,y,confidence)]
coords = [{ 'point': k.body_part,
'location': (k.x / (width - 1)*width + offsets[k.y][k.x][k.index],
k.y / (height - 1)*height + offsets[k.y][k.x][k.index]),
'confidence': k.confidence}
for k in self.keypoints]
return coords
after matching the indexes to the parts my output is:
Some of the coordinates here are negative, which can't be correct. Where is my mistake?
import numpy as np
For a pose estimation model which outputs a heatmap and offsets. The desired points can be obtained by:
Performing a sigmoid operation on the heatmap:
scores = sigmoid(heatmaps)
Each keypoint of those pose is usually represented by a 2-D matrix, the maximum value in that matrix is related to where the model thinks that point is located in the input image. Use argmax2D to obtain the x and y index of that value in each matrix, the value itself represents the confidence value:
x,y = np.unravel_index(np.argmax(scores[:,:,keypointindex]),
scores[:,:,keypointindex].shape)
confidences = scores[x,y,keypointindex]
That x,y is used to find the corresponding offset vector for calculating the final location of the keypoint:
offset_vector = (offsets[y,x,keypointindex], offsets[y,x,num_keypoints+keypointindex])
After you have obtained your keypoint coords and offsets you can calculate the final position of the keypoint by using ():
image_positions = np.add(np.array(heatmap_positions) * output_stride, offset_vectors)
See this for determining how to get the output stride, if you don't already have it. The tflite pose estimation has an output stride of 32.
A function which takes output from that Pose Estimation model and outputs keypoints. Not including KeyPoint class
def get_keypoints(self, heatmaps, offsets, output_stride=32):
scores = sigmoid(heatmaps)
num_keypoints = scores.shape[2]
heatmap_positions = []
offset_vectors = []
confidences = []
for ki in range(0, num_keypoints ):
x,y = np.unravel_index(np.argmax(scores[:,:,ki]), scores[:,:,ki].shape)
confidences.append(scores[x,y,ki])
offset_vector = (offsets[y,x,ki], offsets[y,x,num_keypoints+ki])
heatmap_positions.append((x,y))
offset_vectors.append(offset_vector)
image_positions = np.add(np.array(heatmap_positions) * output_stride, offset_vectors)
keypoints = [KeyPoint(i, pos, confidences[i]) for i, pos in enumerate(image_positions)]
return keypoints
Keypoint class:
PARTS = {
0: 'NOSE',
1: 'LEFT_EYE',
2: 'RIGHT_EYE',
3: 'LEFT_EAR',
4: 'RIGHT_EAR',
5: 'LEFT_SHOULDER',
6: 'RIGHT_SHOULDER',
7: 'LEFT_ELBOW',
8: 'RIGHT_ELBOW',
9: 'LEFT_WRIST',
10: 'RIGHT_WRIST',
11: 'LEFT_HIP',
12: 'RIGHT_HIP',
13: 'LEFT_KNEE',
14: 'RIGHT_KNEE',
15: 'LEFT_ANKLE',
16: 'RIGHT_ANKLE'
}
class KeyPoint():
def __init__(self, index, pos, v):
x, y = pos
self.x = x
self.y = y
self.index = index
self.body_part = PARTS.get(index)
self.confidence = v
def point(self):
return int(self.y), int(self.x)
def to_string(self):
return 'part: {} location: {} confidence: {}'.format(
self.body_part, (self.x, self.y), self.confidence)
This function receives a list of numpy arrays that consist of cropped parts of an image. The crops are all the same size, except for the right-most and bottom-most images which might be of smaller size.
predictions[2] would return the 3rd sub-image that was cropped from the original image. Each crop is a numpy array. There are WxH crops, enumerated from left to right, top to bottom (so if there are 4 sub-images constituting the width, the 5th image in predictions would be the first sub-image on the left from the 2nd row of sub-images).
crops contains the necessary information to find number of horizontal and vertical images that will constitute the reconstructed images. crops[2][3] will contain the 3rd from the top, 4th from the left image cropped.
The images contained by crops are of smaller dimension than the ones in predictions (I am basically making a model that increases the resolution of images). The reconstructed image if from the images in predictions, arranged in the same order as the ones in crops.
def reconstruct(predictions, crops):
if len(crops) != 0:
print("use crops")
# TODO: properly extract the size of the full image
width_length = 0
height_length = 0
full_image = np.empty(shape=(height_length, width_length))
print(full_image.shape)
# TODO: properly merge the crops back into a single image
for height in range(len(predictions[0])):
for width in range(len(predictions)):
# concatenate here
print(height, width)
return full_image
I was going to use numpy.concatenate, but according to other answers I've seen on SO it wouldn't be an efficient way of doing it (apparently numpy will just recreate a new variable in memory, copy the old one, and add the new data, etc.). So now I'm left wondering how to properly merge my multiple images into a single image. The current idea I was going for was to create a python list of the proper shape and progressively fill it with each numpy array's data, but even that I'm not sure if it's the proper idea.
Here is more or less the kind of bunch of images I'm trying to concatenate into a single image:
Here is the expected result:
And to help you out with understanding what more might be available to you, here is some more code:
def predict(args):
model = load_model(save_dir + '/' + args.model)
image = skimage.io.imread(tests_path + args.image)
predictions = []
images = []
crops = seq_crop(image) # crops into multiple sub-parts the image based on 'input_' constants
for i in range(len(crops)): # amount of vertical crops
for j in range(len(crops[0])): # amount of horizontal crops
current_image = crops[i][j]
images.append(current_image)
# Hack because GPU can only handle one image at a time
input_img = (np.expand_dims(images[p], 0)) # Add the image to a batch where it's the only member
predictions.append(model.predict(input_img)[0]) # returns a list of lists, one for each image in the batch
return predictions, image, crops
# adapted from: https://stackoverflow.com/a/52463034/9768291
def seq_crop(img):
"""
To crop the whole image in a list of sub-images of the same size.
Size comes from "input_" variables in the 'constants' (Evaluation).
Padding with 0 the Bottom and Right image.
:param img: input image
:return: list of sub-images with defined size
"""
width_shape = ceildiv(img.shape[1], input_width)
height_shape = ceildiv(img.shape[0], input_height)
sub_images = [] # will contain all the cropped sub-parts of the image
for j in range(height_shape):
horizontal = []
for i in range(width_shape):
horizontal.append(crop_precise(img, i*input_width, j*input_height, input_width, input_height))
sub_images.append(horizontal)
return sub_images
def crop_precise(img, coord_x, coord_y, width_length, height_length):
"""
To crop a precise portion of an image.
When trying to crop outside of the boundaries, the input to padded with zeros.
:param img: image to crop
:param coord_x: width coordinate (top left point)
:param coord_y: height coordinate (top left point)
:param width_length: width of the cropped portion starting from coord_x
:param height_length: height of the cropped portion starting from coord_y
:return: the cropped part of the image
"""
tmp_img = img[coord_y:coord_y + height_length, coord_x:coord_x + width_length]
return float_im(tmp_img) # From [0,255] to [0.,1.]
# from https://stackoverflow.com/a/17511341/9768291
def ceildiv(a, b):
"""
To get the ceiling of a division
:param a:
:param b:
:return:
"""
return -(-a // b)
if __name__ == '__main__':
preds, original, crops = predict(args) # returns the predictions along with the original
# TODO: reconstruct image
enhanced = reconstruct(preds, crops) # reconstructs the enhanced image from predictions
EDIT:
The answer worked. Here is the version I've used:
# adapted from https://stackoverflow.com/a/52733370/9768291
def reconstruct(predictions, crops):
# unflatten predictions
def nest(data, template):
data = iter(data)
return [[next(data) for _ in row] for row in template]
predictions = nest(predictions, crops)
H = np.cumsum([x[0].shape[0] for x in predictions])
W = np.cumsum([x.shape[1] for x in predictions[0]])
D = predictions[0][0]
recon = np.empty((H[-1], W[-1], D.shape[2]), D.dtype)
for rd, rs in zip(np.split(recon, H[:-1], 0), predictions):
for d, s in zip(np.split(rd, W[:-1], 1), rs):
d[...] = s
return recon
The most convenient is probably np.block
import numpy as np
from scipy import misc
import Image
# get example picture
data = misc.face()
# chop it up
I, J = map(np.arange, (200, 200), data.shape[:2], (200, 200))
chops = [np.split(row, J, axis=1) for row in np.split(data, I, axis=0)]
# do something with the bits
predictions = [chop-(i+j)*(chop>>3) for j, row in enumerate(chops) for i, chop in enumerate(row)]
# unflatten predictions
def nest(data, template):
data = iter(data)
return [[next(data) for _ in row] for row in template]
pred_lol = nest(predictions, chops)
# almost builtin reconstruction
def np_block_2D(chops):
return np.block([[[x] for x in row] for row in chops])
recon = np_block_2D(pred_lol)
Image.fromarray(recon).save('demo.png')
Reconstructed manipulated image:
But we can do faster than that by avoiding intermediary arrays. Instead, we copy into a preallocated array:
def speed_block_2D(chops):
H = np.cumsum([x[0].shape[0] for x in chops])
W = np.cumsum([x.shape[1] for x in chops[0]])
D = chops[0][0]
recon = np.empty((H[-1], W[-1], D.shape[2]), D.dtype)
for rd, rs in zip(np.split(recon, H[:-1], 0), chops):
for d, s in zip(np.split(rd, W[:-1], 1), rs):
d[...] = s
return recon
Timings, also including a generalized ND-ready variant of each method:
numpy 2D: 0.991 ms
prealloc 2D: 0.389 ms
numpy general: 1.021 ms
prealloc general: 0.448 ms
Code for general case and timings:
def np_block(chops):
d = 0
tl = chops
while isinstance(tl, list):
tl = tl[0]
d += 1
if d < tl.ndim:
def adjust_depth(L):
if isinstance(L, list):
return [adjust_depth(l) for l in L]
else:
ret = L
for j in range(d, tl.ndim):
ret = [ret]
return ret
chops = adjust_depth(chops)
return np.block(chops)
def speed_block(chops):
def line(src, i):
while isinstance(src, list):
src = src[0]
return src.shape[i]
def hyper(src, i):
src = iter(src)
fst = next(src)
if isinstance(fst, list):
res, dtype, szs = hyper(fst, i+1)
szs.append([res[i], *(line(s, i) for s in src)])
res[i] = sum(szs[-1])
return res, dtype, szs
res = np.array(fst.shape)
szs = [res[i], *(s.shape[i] for s in src)]
res[i] = sum(szs)
return res, fst.dtype, [szs]
shape, dtype, szs = hyper(chops, 0)
recon = np.empty(shape, dtype)
def cpchp(dst, src, i, szs=None):
szs = np.array(hyper(src, i)[2]) if szs is None else szs
dst = np.split(dst, np.cumsum(szs[-1][:-1]), i)
if isinstance(src[0], list):
szs = szs[:-1]
for ds, sr in zip(dst, src):
cpchp(ds, sr, i+1, szs)
szs = None
else:
for ds, sr in zip(dst, src):
ds[...] = sr
cpchp(recon, chops, 0, np.array(szs))
return recon
from timeit import timeit
T = (timeit(lambda: speed_block(pred_lol), number=1000),
timeit(lambda: np_block(pred_lol), number=1000),
timeit(lambda: speed_block_2D(pred_lol), number=1000),
timeit(lambda: np_block_2D(pred_lol), number=1000))
assert (np.all(speed_block(pred_lol)==np_block(pred_lol)) and
np.all(speed_block_2D(pred_lol)==np_block(pred_lol)) and
np.all(speed_block(pred_lol)==np_block_2D(pred_lol)))
print(f"""
numpy 2D: {T[3]:10.3f} ms
prealloc 2D: {T[2]:10.3f} ms
numpy general: {T[1]:10.3f} ms
prealloc general: {T[0]:10.3f} ms
""")
To the following code happens a strange thing when executed: the percentage which shows completition is going over 100%. The code is running for about 45 minutes by now..
This is the code I adapted to Python 3 after the errors original one gave me.
'''
Read and pre-process SD19 characters text file.
Blog post : http://seeb0h.github.io/howto/preprocess-sd19-dataset-for-digits-learning/
Characters in txt file are in 128x128 images with much padded zeros.
It may be suitable for learning to have smaller, deskewed, trimmed, squared ones
Following preprocessing is applied to the dataset:
- Read glyph (see read_glyph())
- Moment-based image deskew (see deskew())
- Trim zeros rows and columns (see trim_padding())
- Resize image while keeping aspect ratio (see resize_with_constant_ratio())
- Pad zeros in order to get a square image (see pad_digit())
Extends original code from http://asciirain.com/wordpress/2013/04/08/exploring-sd19-glyph-recognition-with-randomforests/
Usage:
preprocess_sd19_text.py
'''
#
import os
import re
import sys
import pickle
import cv2
import numpy as np
import math
def read_glyph(_line):
"""Extract digit from the text file
Parameters
----------
_line : string
current line in SD19 text file
Returns
-------
digit : np.array
2D digit 128x128
label : int
the label
"""
match = re.search("^(\S+) (\d+)", _line)
label = match.group(1)
vector = list(match.group(2))
vector = [int(x) for x in vector]
label = ord(label)
label = str(symbol_map[label]) #changed from int to str
digit = np.array(vector, 'float32')
digit = (digit*-1.+1.).reshape(128, 128)
return digit, label
def deskew(img):
"""Deskew digit
Parameters
----------
img : np.array
2D digit array
Returns
-------
dst : Deskewed digit
"""
m = cv2.moments(img)
if abs(m['mu02']) < 1e-2:
return img.copy()
skew = m['mu11']/m['mu02']
rot_mat = np.float32([[1, skew, -0.5*max(img.shape[0], img.shape[1])*skew], [0, 1, 0]])
img = cv2.warpAffine(img, rot_mat, (img.shape[0], img.shape[1]), flags=cv2.WARP_INVERSE_MAP | cv2.INTER_LINEAR)
return img
def resize_with_constant_ratio(img, char_dim):
"""Resize image while keeping aspect ratio. Max dim is char_dim
pad_dim is applied in order to have derivative friendly image
Parameters
----------
img : np.array
2D digit array
char_dim : int
dst dim
Returns
-------
dst : resized digit
"""
roi_h = img.shape[0]
roi_w = img.shape[1]
max_dim = max(roi_w, roi_h)
pad_dim = 2
scale = float(char_dim-pad_dim) / max_dim
if roi_w >= roi_h:
new_w = int(char_dim-pad_dim)
new_h = int(roi_h * scale)
else:
new_w = int(roi_w * scale)
new_h = int(char_dim-pad_dim)
dst = cv2.resize(img, (new_w, new_h), interpolation=cv2.INTER_LINEAR)
return dst
def trim_padding(img):
"""Trim zeros rows and columns
Parameters
----------
img : np.array
2D digit array
Returns
-------
dst : trimmed digit
"""
mask_row = np.all(np.equal(img, 0), axis=1)
dst = img[~mask_row]
mask_col = np.all(np.equal(dst, 0), axis=0)
dst = dst[:, ~mask_col]
return dst
def pad_digit(img, char_dim):
"""Pad zeros in order to get a square char_dimxchar_dim image
Parameters
----------
img : np.array
2D digit array
char_dim : int
image dim
Returns
-------
dst : padded digit
"""
pad_h = char_dim-img.shape[0]
pad_w = char_dim-img.shape[1]
pad_h_b = math.floor(pad_h/2)
pad_h_t = pad_h - pad_h_b
pad_w_r = math.floor(pad_w/2)
pad_w_l = pad_w - pad_w_r
dst = np.hstack(( img, np.zeros((img.shape[0], pad_w_r))))
dst = np.hstack(( np.zeros((dst.shape[0], pad_w_l)), dst))
dst = np.vstack(( dst, np.zeros((pad_h_b, dst.shape[1]))))
dst = np.vstack(( np.zeros((pad_h_t, dst.shape[1])), dst))
return dst
def print_overwrite(text):
"""Print with overwrite (for progression counter)
Parameters
----------
text : string
text to display
"""
delete = "\b" * (len (text)+1)
print ("{0}{1}".format(delete, text)),
if __name__ == '__main__':
print (__doc__)
sd19_filename = "sd19-binary_digits.txt"
data = open(sd19_filename, "r")
dataset = []
symbol_map = dict([(x, chr(x)) for x in list(range(48, 58)) + list(range(65, 91)) + list(range(97, 123))]) #added list() to every range
current_dir = os.curdir
num_records = 0
num_lines = 402953
char_dim=28
pickle_name = "SD19_" + str(char_dim) + "x" + str(char_dim) + "_"
for line in data:
num_records += 1
if num_records%20000 == 0:
with open(os.path.join(current_dir, pickle_name +\
str(num_records) + ".pickle"), 'wb') as f:
pickle.dump(dataset, f, pickle.HIGHEST_PROTOCOL)
print_overwrite("num_records : {}/{} - {:5.2f}%"\
.format(num_records, num_lines, num_records*1./num_lines*100))
digit, label = read_glyph(line)
digit_deskewed = deskew(digit)
digit_trimmed = trim_padding(digit_deskewed)
digit_resized = resize_with_constant_ratio(digit_trimmed, char_dim)
digit_padded = pad_digit(digit_resized, char_dim)
item = []
item.append((digit_padded*255).astype('uint8'))
item.append(label)
dataset.append(item)
with open(os.path.join(current_dir, pickle_name +\
str(num_lines) + ".pickle"), 'wb') as f:
pickle.dump(dataset, f, pickle.HIGHEST_PROTOCOL)
It is used to create PICKLE from a .txt file containing binary images. For more see here.
And my "error"...
By now it is at 135% and last PICKLE file is around 400mb...
Why it happens ? Also, it continue to create files (should have stopped at 400.000 or little more).