Create pydicom file from numpy array - python

I'm trying to create a new dicom image from a standard-sized (512 x 512 or 256 x 256) numpy array.
import dicom, dicom.UID
from dicom.dataset import Dataset, FileDataset
def write_dicom(pixel_array,filename):
file_meta = Dataset()
ds = FileDataset(filename, {},file_meta = file_meta,preamble="\0"*128)
ds.PixelData = pixel_array.tostring()
ds.save_as(filename)
return
if __name__ == "__main__":
import numpy as np
pixel_array = np.tile(np.arange(256).reshape(16,16), (16,16)) * 4
write_dicom(pixel_array,'pretty.dcm')

2020 update :)
None of these answers worked for me. This is what I ended up with to save a valid monochrome 16bpp MR slice which is correctly displayed at least in Slicer, Radiant and MicroDicom:
import pydicom
from pydicom.dataset import Dataset, FileDataset
from pydicom.uid import ExplicitVRLittleEndian
import pydicom._storage_sopclass_uids
image2d = image2d.astype(np.uint16)
print("Setting file meta information...")
# Populate required values for file meta information
meta = pydicom.Dataset()
meta.MediaStorageSOPClassUID = pydicom._storage_sopclass_uids.MRImageStorage
meta.MediaStorageSOPInstanceUID = pydicom.uid.generate_uid()
meta.TransferSyntaxUID = pydicom.uid.ExplicitVRLittleEndian
ds = Dataset()
ds.file_meta = meta
ds.is_little_endian = True
ds.is_implicit_VR = False
ds.SOPClassUID = pydicom._storage_sopclass_uids.MRImageStorage
ds.PatientName = "Test^Firstname"
ds.PatientID = "123456"
ds.Modality = "MR"
ds.SeriesInstanceUID = pydicom.uid.generate_uid()
ds.StudyInstanceUID = pydicom.uid.generate_uid()
ds.FrameOfReferenceUID = pydicom.uid.generate_uid()
ds.BitsStored = 16
ds.BitsAllocated = 16
ds.SamplesPerPixel = 1
ds.HighBit = 15
ds.ImagesInAcquisition = "1"
ds.Rows = image2d.shape[0]
ds.Columns = image2d.shape[1]
ds.InstanceNumber = 1
ds.ImagePositionPatient = r"0\0\1"
ds.ImageOrientationPatient = r"1\0\0\0\-1\0"
ds.ImageType = r"ORIGINAL\PRIMARY\AXIAL"
ds.RescaleIntercept = "0"
ds.RescaleSlope = "1"
ds.PixelSpacing = r"1\1"
ds.PhotometricInterpretation = "MONOCHROME2"
ds.PixelRepresentation = 1
pydicom.dataset.validate_file_meta(ds.file_meta, enforce_standard=True)
print("Setting pixel data...")
ds.PixelData = image2d.tobytes()
ds.save_as(r"out.dcm")
Note the following:
Going through FileDataset constructor as PyDicom docs suggest was failing to create a valid header for me
validate_file_meta will create some missing elements in header for you (version)
You need to specify endianness and explicit/implicit VR twice :/
This method will allow you to create a valid volume as well as long as you update ImagePositionPatient and InstanceNumber for each slice accordingly
Make sure your numpy array is cast to data format that has same number of bits as your BitsStored

Here is a functional version of the code I needed to write. It will write a 16-bit grayscale DICOM image from a given 2D array of pixels. According to the DICOM standard, the UIDs should be unique for each image and series, which this code doesn't worry about, because I don't know what the UIDs actually do. If anyone else does, I'll be happy to add it in.
import dicom, dicom.UID
from dicom.dataset import Dataset, FileDataset
import numpy as np
import datetime, time
def write_dicom(pixel_array,filename):
"""
INPUTS:
pixel_array: 2D numpy ndarray. If pixel_array is larger than 2D, errors.
filename: string name for the output file.
"""
## This code block was taken from the output of a MATLAB secondary
## capture. I do not know what the long dotted UIDs mean, but
## this code works.
file_meta = Dataset()
file_meta.MediaStorageSOPClassUID = 'Secondary Capture Image Storage'
file_meta.MediaStorageSOPInstanceUID = '1.3.6.1.4.1.9590.100.1.1.111165684411017669021768385720736873780'
file_meta.ImplementationClassUID = '1.3.6.1.4.1.9590.100.1.0.100.4.0'
ds = FileDataset(filename, {},file_meta = file_meta,preamble="\0"*128)
ds.Modality = 'WSD'
ds.ContentDate = str(datetime.date.today()).replace('-','')
ds.ContentTime = str(time.time()) #milliseconds since the epoch
ds.StudyInstanceUID = '1.3.6.1.4.1.9590.100.1.1.124313977412360175234271287472804872093'
ds.SeriesInstanceUID = '1.3.6.1.4.1.9590.100.1.1.369231118011061003403421859172643143649'
ds.SOPInstanceUID = '1.3.6.1.4.1.9590.100.1.1.111165684411017669021768385720736873780'
ds.SOPClassUID = 'Secondary Capture Image Storage'
ds.SecondaryCaptureDeviceManufctur = 'Python 2.7.3'
## These are the necessary imaging components of the FileDataset object.
ds.SamplesPerPixel = 1
ds.PhotometricInterpretation = "MONOCHROME2"
ds.PixelRepresentation = 0
ds.HighBit = 15
ds.BitsStored = 16
ds.BitsAllocated = 16
ds.SmallestImagePixelValue = '\\x00\\x00'
ds.LargestImagePixelValue = '\\xff\\xff'
ds.Columns = pixel_array.shape[0]
ds.Rows = pixel_array.shape[1]
if pixel_array.dtype != np.uint16:
pixel_array = pixel_array.astype(np.uint16)
ds.PixelData = pixel_array.tostring()
ds.save_as(filename)
return
if __name__ == "__main__":
# pixel_array = np.arange(256*256).reshape(256,256)
# pixel_array = np.tile(np.arange(256).reshape(16,16),(16,16))
x = np.arange(16).reshape(16,1)
pixel_array = (x + x.T) * 32
pixel_array = np.tile(pixel_array,(16,16))
write_dicom(pixel_array,'pretty.dcm')

I was able to reduce #Corvin's great answer even more. Here is a minimalist code example allowing one to save a (dummy) 3D numpy array to a valid DICOM image that can be opened with Amide:
#!/usr/bin/python3
import numpy
import pydicom
import pydicom._storage_sopclass_uids
# dummy image
image = numpy.random.randint(2**16, size=(512, 512, 512), dtype=numpy.uint16)
# metadata
fileMeta = pydicom.Dataset()
fileMeta.MediaStorageSOPClassUID = pydicom._storage_sopclass_uids.CTImageStorage
fileMeta.MediaStorageSOPInstanceUID = pydicom.uid.generate_uid()
fileMeta.TransferSyntaxUID = pydicom.uid.ExplicitVRLittleEndian
# dataset
ds = pydicom.Dataset()
ds.file_meta = fileMeta
ds.Rows = image.shape[0]
ds.Columns = image.shape[1]
ds.NumberOfFrames = image.shape[2]
ds.PixelSpacing = [1, 1] # in mm
ds.SliceThickness = 1 # in mm
ds.BitsAllocated = 16
ds.PixelRepresentation = 1
ds.PixelData = image.tobytes()
# save
ds.save_as('image.dcm', write_like_original=False)
As one might observe, a lot of fields are missing if the output image.dcm file is passed to dciodvfy. The filling of these fields are left to the reader ;)

The above example works but causes many tools to complain about the DICOMs and they cannot even be read at all using itk/SimpleITK as a stack. The best way I have found for making DICOMs from numpy is by using the SimpleITK tools and generating the DICOMs slice-by-slice. A basic example (https://github.com/zivy/SimpleITK/blob/8e94451e4c0e90bcc6a1ffdd7bc3d56c81f58d80/Examples/DicomSeriesReadModifyWrite/DicomSeriesReadModifySeriesWrite.py) shows how to load in a stack, perform a transformation and then resave the files, but this can easily be modified by using the
import SimpleITK as sitk
filtered_image = sitk.GetImageFromArray(my_numpy_array)
The number of tags ultimately in output image is quite large and so manually creating all of them is tedious. Additionally SimpleITK supports 8, 16, 32-bit images as well as RGB so it is much easier than making them in pydicom.
(0008, 0008) Image Type CS: ['DERIVED', 'SECONDARY']
(0008, 0016) SOP Class UID UI: Secondary Capture Image Storage
(0008, 0018) SOP Instance UID UI: 1.2.826.0.1.3680043.2.1125.1.35596048796922805578234000521866725
(0008, 0020) Study Date DA: '20170803'
(0008, 0021) Series Date DA: '20170803'
(0008, 0023) Content Date DA: 0
(0008, 0030) Study Time TM: '080429.171808'
(0008, 0031) Series Time TM: '080429'
(0008, 0033) Content Time TM: 0
(0008, 0050) Accession Number SH: ''
(0008, 0060) Modality CS: 'OT'
(0008, 0064) Conversion Type CS: 'WSD'
(0008, 0090) Referring Physician's Name PN: ''
(0010, 0010) Patient's Name PN: ''
(0010, 0020) Patient ID LO: ''
(0010, 0030) Patient's Birth Date DA: ''
(0010, 0040) Patient's Sex CS: ''
(0018, 2010) Nominal Scanned Pixel Spacing DS: ['1', '3']
(0020, 000d) Study Instance UID UI: 1.2.826.0.1.3680043.2.1125.1.33389357207068897066210100430826006
(0020, 000e) Series Instance UID UI: 1.2.826.0.1.3680043.2.1125.1.51488923827429438625199681257282809
(0020, 0010) Study ID SH: ''
(0020, 0011) Series Number IS: ''
(0020, 0013) Instance Number IS: ''
(0020, 0020) Patient Orientation CS: ''
(0020, 0052) Frame of Reference UID UI: 1.2.826.0.1.3680043.2.1125.1.35696880630664441938326682384062489
(0028, 0002) Samples per Pixel US: 1
(0028, 0004) Photometric Interpretation CS: 'MONOCHROME2'
(0028, 0010) Rows US: 40
(0028, 0011) Columns US: 50
(0028, 0100) Bits Allocated US: 32
(0028, 0101) Bits Stored US: 32
(0028, 0102) High Bit US: 31
(0028, 0103) Pixel Representation US: 1
(0028, 1052) Rescale Intercept DS: "0"
(0028, 1053) Rescale Slope DS: "1"
(0028, 1054) Rescale Type LO: 'US'
(7fe0, 0010) Pixel Data OW: Array of 8000 bytes

Corvin's 2020 update almost worked for me.
The meta was still not written to the file, so when reading it the following exception was raised:
pydicom.errors.InvalidDicomError: File is missing DICOM File Meta Information header or the 'DICM' prefix is missing from the header.
In order to fix this and write the meta into the dicom file, I needed to add enforce_standard=True to the save_as() call:
ds.save_as(filename=out_filename, enforce_standard=True)

DICOM is a really complicated format. There are many dialects, and compatibilty is rather a question of luck. You could alternatively try nibabel, maybe its dialect is more appealing to RadiAnt or MicroDicom.
In general, I'd recommend using Nifti-format whenever possible. Its standard is much more concise, and incompatibilities are rare. nibabel also supports this.

One working config for those who need it and one question.
Question is in another thread Create a Dicom from multiple jpg images
What worked for me was greyscale without compression. Every attempt at compression fails miserably I don't know why:
# Populate required values for file meta information
meta = pydicom.Dataset()
meta.TransferSyntaxUID = pydicom.uid.ExplicitVRLittleEndian
meta.MediaStorageSOPClassUID = pydicom._storage_sopclass_uids.MRImageStorage
meta.MediaStorageSOPInstanceUID = pydicom.uid.generate_uid()
# build dataset
ds = Dataset()
ds.file_meta = meta
ds.fix_meta_info()
# unknown options
ds.is_little_endian = True
ds.is_implicit_VR = False
ds.SOPClassUID = pydicom._storage_sopclass_uids.MRImageStorage
ds.SeriesInstanceUID = pydicom.uid.generate_uid()
ds.StudyInstanceUID = pydicom.uid.generate_uid()
ds.FrameOfReferenceUID = pydicom.uid.generate_uid()
ds.BitsStored = 16
ds.BitsAllocated = 16
ds.SamplesPerPixel = 1
ds.HighBit = 15
ds.ImagesInAcquisition = "1"
ds.InstanceNumber = 1
ds.ImagePositionPatient = r"0\0\1"
ds.ImageOrientationPatient = r"1\0\0\0\-1\0"
ds.ImageType = r"ORIGINAL\PRIMARY\AXIAL"
ds.RescaleIntercept = "0"
ds.RescaleSlope = "1"
ds.PixelRepresentation = 1
# Case options
ds.PatientName = "Anonymous"
ds.PatientID = "123456"
ds.Modality = "MR"
ds.StudyDate = '20200225'
ds.ContentDate = '20200225'
# convert image to grayscale
img = Image.open(filename).convert('L')
img.save(filename)
# open image, decode and ensure_even stream
with open(filename, 'rb') as f:
arr = decode(f)
def ensure_even(stream):
# Very important for some viewers
if len(stream) % 2:
return stream + b"\x00"
return stream
# required for pixel handler
ds.BitsStored = 8
ds.BitsAllocated = 8
ds.HighBit = 7
ds.PixelRepresentation = 0
# grayscale without compression WORKS
ds.PhotometricInterpretation = "MONOCHROME2"
ds.SamplesPerPixel = 1 # 1 color = 1 sample per pixel
ds.file_meta.TransferSyntaxUID = pydicom.uid.ExplicitVRLittleEndian
ds.PixelData = ensure_even(arr.tobytes())
# JPEGBaseline compressed DOES NOT WORK
# ds.PixelData = encapsulate([ensure_even(arr.tobytes())])
# ds.PhotometricInterpretation = "YBR_FULL"
# ds.SamplesPerPixel = 3 # 3 colors = 3 sampleperpixel
# ds.file_meta.TransferSyntaxUID = pydicom.uid.JPEGBaseline
# ds.compress(pydicom.uid.JPEGBaseline)
# JPEGExtended compressed DOES NOT WORK
# ds.PixelData = encapsulate([ensure_even(arr.tobytes())])
# ds.PhotometricInterpretation = "YBR_FULL_422"
# ds.SamplesPerPixel = 3 # 3 colors = 3 sampleperpixel
# ds.file_meta.TransferSyntaxUID = pydicom.uid.JPEGExtended
# ds.compress(pydicom.uid.JPEGExtended)
# JPEG2000 compressed DOES NOT WORK
# ds.PhotometricInterpretation = "RGB"
# ds.SamplesPerPixel = 3 # 3 colors = 3 sampleperpixel
# ds.file_meta.TransferSyntaxUID = pydicom.uid.JPEG2000
# ds.PixelData = encapsulate([ensure_even(arr.tobytes())])
# ds.compress(pydicom.uid.JPEG2000)
# Image shape
ds['PixelData'].is_undefined_length = False
array_shape = arr.shape
ds.Rows = array_shape[0]
ds.Columns = array_shape[1]
# validate and save
pydicom.dataset.validate_file_meta(ds.file_meta, enforce_standard=True)
new_filename = filename.replace('.jpg', name + '.dcm')
ds.save_as(new_filename, write_like_original=False)

For a 3D CT scan, you can use the following code
def vol_to_dicom_for_ct(path_img_ct, patient_name, patient_id, path_dicom):
"""
Converts a .nrrd/.mha/.nifti file into its .dcm files
Params
------
path_img_ct: str, the path of the .nrrd/.mha/.nifti file
patient_name: str
patient_id: str
path_dicom: str, the final output directory
Note: Verify the output with dciodvfy
- Ref 1: https://www.dclunie.com/dicom3tools/workinprogress/index.html
- Ref 2: https://manpages.debian.org/unstable/dicom3tools/dciodvfy.1.en.html
"""
try:
import sys
import copy
import random
import shutil
import subprocess
import numpy as np
if Path(path_img_ct).exists():
try:
import pydicom
import pydicom._storage_sopclass_uids
except:
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '--user', 'pydicom'])
import pydicom
try:
import SimpleITK as sitk
except:
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '--user', 'SimpleITK']) # 2.1.1
import SimpleITK as sitk
try:
import matplotlib.pyplot as plt
except:
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '--user', 'matplotlib']) # 2.1.1
import matplotlib.pyplot as plt
# Step 0 - Create save directory
if Path(path_dicom).exists():
shutil.rmtree(path_dicom)
Path(path_dicom).mkdir(exist_ok=True, parents=True)
# Step 1 - Get volume params
img_ct = sitk.ReadImage(str(path_img_ct))
img_spacing = tuple(img_ct.GetSpacing())
img_origin = tuple(img_ct.GetOrigin()) # --> dicom.ImagePositionPatient
img_array = sitk.GetArrayFromImage(img_ct).astype(np.int16) # [D,H,W]
# Step 2 - Create dicom dataset
ds = pydicom.dataset.Dataset()
ds.FrameOfReferenceUID = pydicom.uid.generate_uid() # this will stay the same for all .dcm files of a volume
# Step 2.1 - Modality details
ds.SOPClassUID = pydicom._storage_sopclass_uids.CTImageStorage
ds.Modality = 'CT'
ds.ImageType = ['ORIGINAL', 'PRIMARY', 'AXIAL']
# Step 2.2 - Image Details
ds.PixelSpacing = [float(img_spacing[0]), float(img_spacing[1])]
ds.SliceThickness = str(img_spacing[-1])
ds.Rows = img_array.shape[1]
ds.Columns = img_array.shape[2]
ds.PatientPosition = 'HFS'
ds.ImageOrientationPatient = [1, 0, 0, 0, 1, 0]
ds.PositionReferenceIndicator = 'SN'
ds.SamplesPerPixel = 1
ds.PhotometricInterpretation = 'MONOCHROME2'
ds.BitsAllocated = 16
ds.BitsStored = 16
ds.HighBit = 15
ds.PixelRepresentation = 1
ds.RescaleIntercept = "0.0"
ds.RescaleSlope = "1.0"
ds.RescaleType = 'HU'
# Step 3.1 - Metadata
fileMeta = pydicom.Dataset()
fileMeta.MediaStorageSOPClassUID = pydicom._storage_sopclass_uids.CTImageStorage
fileMeta.MediaStorageSOPInstanceUID = pydicom.uid.generate_uid() # this will change for each .dcm file of a volume
fileMeta.TransferSyntaxUID = pydicom.uid.ExplicitVRLittleEndian
ds.file_meta = fileMeta
# Step 3.2 - Include study details
ds.StudyInstanceUID = pydicom.uid.generate_uid()
ds.StudyDescription = ''
ds.StudyDate = '19000101' # needed to create DICOMDIR
ds.StudyID = str(random.randint(0,1000)) # needed to create DICOMDIR
# Step 3.3 - Include series details
ds.SeriesInstanceUID = pydicom.uid.generate_uid()
ds.SeriesDescription = ''
ds.SeriesNumber = str(random.randint(0,1000)) # needed to create DICOMDIR
# Step 3.4 - Include patient details
ds.PatientName = patient_name
ds.PatientID = patient_id
# Step 3.5 - Manufacturer details
ds.Manufacturer = 'MICCAI2015'
ds.ReferringPhysicianName = 'Mody' # needed for identification in RayStation
ds.ManufacturerModelName = 'test_offsite'
# Step 4 - Make slices
for slice_id in range(img_array.shape[0]):
# Step 4.1 - Slice identifier
random_uuid = pydicom.uid.generate_uid()
ds.file_meta.MediaStorageSOPInstanceUID = random_uuid
ds.SOPInstanceUID = random_uuid
ds.InstanceNumber = str(slice_id+1)
vol_origin_tmp = list(copy.deepcopy(img_origin))
vol_origin_tmp[-1] += img_spacing[-1]*slice_id
ds.ImagePositionPatient = vol_origin_tmp
# Step 4.2 - Slice data
img_slice = img_array[slice_id,:,:]
# plt.imshow(img_slice); plt.savefig(str(Path(path_dicom, '{}.png'.format(slice_id)))); plt.close()
ds.PixelData = img_slice.tobytes()
save_path = Path(path_dicom).joinpath(str(ds.file_meta.MediaStorageSOPInstanceUID) + '.dcm')
ds.save_as(str(save_path), write_like_original=False)
return ds.StudyInstanceUID, ds.SeriesInstanceUID
else:
print (' - [ERROR][vol_to_dicom_for_ct()] Error in path: path_img_ct: ', path_img_ct)
return None, None
except:
traceback.print_exc()

Related

How can I alter TFRecords for my COCO format dataset?

I am currently trying to get the Caltech camera traps benchmark dataset into TFRecords but I am struggling quite a bit. https://lila.science/datasets/caltech-camera-traps. The annotations are displayed as follows:
"info": {"contributor": "Sara Beery", "date_created": "2018-07-03 18:34:36.573636", "version": "Caltech Camera Traps - ECCV18", "description": "Database of camera trap images collected from the NPS and the USGS with help from Justin Brown and Erin Boydston", "year": 2018}]
"categories": [{"id": 6, "name": "bobcat"}, ....]
"images": [{"file_name": "59b93afb-23d2-11e8-a6a3-ec086b02610b.jpg", "rights_holder": "Justin Brown", "height": 1494, "width": 2048, "frame_num": 2, "date_captured": "2012-05-09 07:33:45", "location": 38, "seq_num_frames": 3, "seq_id": "6f04895c-5567-11e8-a3d6-dca9047ef277", "id": "59b93afb-23d2-11e8-a6a3-ec086b02610b"},...]
"annotations": [{"image_id": "59ffbd00-23d2-11e8-a6a3-ec086b02610b", "category_id": 1, "bbox": [1118.72, 570.88, 328.96000000000004, 180.48000000000002], "id": "36132"}
I am trying to use the create_coco_tf_record.py file and adapt it. I do not have the 'iscrowd' or 'segmentations' in my annotations and a lot of the images do not have bounding boxes. I was wondering if someone has done similar and would be able to help please. Thanks! Here is the file...
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Convert raw COCO dataset to TFRecord for object_detection.
This tool supports data generation for object detection (boxes, masks),
keypoint detection, and DensePose.
Please note that this tool creates sharded output files.
Example usage:
python create_coco_tf_record.py --logtostderr \
--train_image_dir="${TRAIN_IMAGE_DIR}" \
--val_image_dir="${VAL_IMAGE_DIR}" \
--test_image_dir="${TEST_IMAGE_DIR}" \
--train_annotations_file="${TRAIN_ANNOTATIONS_FILE}" \
--val_annotations_file="${VAL_ANNOTATIONS_FILE}" \
--testdev_annotations_file="${TESTDEV_ANNOTATIONS_FILE}" \
--output_dir="${OUTPUT_DIR}"
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import hashlib
import io
import json
import logging
import os
import contextlib2
import numpy as np
import PIL.Image
from pycocotools import mask
import tensorflow.compat.v1 as tf
from object_detection.dataset_tools import tf_record_creation_util
from object_detection.utils import dataset_util
from object_detection.utils import label_map_util
flags = tf.app.flags
tf.flags.DEFINE_boolean(
'include_masks', False, 'Whether to include instance segmentations masks '
'(PNG encoded) in the result. default: False.')
tf.flags.DEFINE_string('train_image_dir', '', 'Training image directory.')
tf.flags.DEFINE_string('val_image_dir', '', 'Validation image directory.')
tf.flags.DEFINE_string('test_image_dir', '', 'Test image directory.')
tf.flags.DEFINE_string('train_annotations_file', '',
'Training annotations JSON file.')
tf.flags.DEFINE_string('val_annotations_file', '',
'Validation annotations JSON file.')
tf.flags.DEFINE_string('testdev_annotations_file', '',
'Test-dev annotations JSON file.')
tf.flags.DEFINE_string('train_keypoint_annotations_file', '',
'Training annotations JSON file.')
tf.flags.DEFINE_string('val_keypoint_annotations_file', '',
'Validation annotations JSON file.')
# DensePose is only available for coco 2014.
tf.flags.DEFINE_string('train_densepose_annotations_file', '',
'Training annotations JSON file for DensePose.')
tf.flags.DEFINE_string('val_densepose_annotations_file', '',
'Validation annotations JSON file for DensePose.')
tf.flags.DEFINE_string('output_dir', '/tmp/', 'Output data directory.')
# Whether to only produce images/annotations on person class (for keypoint /
# densepose task).
tf.flags.DEFINE_boolean('remove_non_person_annotations', False, 'Whether to '
'remove all annotations for non-person objects.')
tf.flags.DEFINE_boolean('remove_non_person_images', False, 'Whether to '
'remove all examples that do not contain a person.')
FLAGS = flags.FLAGS
logger = tf.get_logger()
logger.setLevel(logging.INFO)
_COCO_KEYPOINT_NAMES = [
b'nose', b'left_eye', b'right_eye', b'left_ear', b'right_ear',
b'left_shoulder', b'right_shoulder', b'left_elbow', b'right_elbow',
b'left_wrist', b'right_wrist', b'left_hip', b'right_hip',
b'left_knee', b'right_knee', b'left_ankle', b'right_ankle'
]
_COCO_PART_NAMES = [
b'torso_back', b'torso_front', b'right_hand', b'left_hand', b'left_foot',
b'right_foot', b'right_upper_leg_back', b'left_upper_leg_back',
b'right_upper_leg_front', b'left_upper_leg_front', b'right_lower_leg_back',
b'left_lower_leg_back', b'right_lower_leg_front', b'left_lower_leg_front',
b'left_upper_arm_back', b'right_upper_arm_back', b'left_upper_arm_front',
b'right_upper_arm_front', b'left_lower_arm_back', b'right_lower_arm_back',
b'left_lower_arm_front', b'right_lower_arm_front', b'right_face',
b'left_face',
]
_DP_PART_ID_OFFSET = 1
def clip_to_unit(x):
return min(max(x, 0.0), 1.0)
def create_tf_example(image,
annotations_list,
image_dir,
category_index,
include_masks=False,
keypoint_annotations_dict=None,
densepose_annotations_dict=None,
remove_non_person_annotations=False,
remove_non_person_images=False):
"""Converts image and annotations to a tf.Example proto.
Args:
image: dict with keys: [u'license', u'file_name', u'coco_url', u'height',
u'width', u'date_captured', u'flickr_url', u'id']
annotations_list:
list of dicts with keys: [u'segmentation', u'area', u'iscrowd',
u'image_id', u'bbox', u'category_id', u'id'] Notice that bounding box
coordinates in the official COCO dataset are given as [x, y, width,
height] tuples using absolute coordinates where x, y represent the
top-left (0-indexed) corner. This function converts to the format
expected by the Tensorflow Object Detection API (which is which is
[ymin, xmin, ymax, xmax] with coordinates normalized relative to image
size).
image_dir: directory containing the image files.
category_index: a dict containing COCO category information keyed by the
'id' field of each category. See the label_map_util.create_category_index
function.
include_masks: Whether to include instance segmentations masks
(PNG encoded) in the result. default: False.
keypoint_annotations_dict: A dictionary that maps from annotation_id to a
dictionary with keys: [u'keypoints', u'num_keypoints'] represeting the
keypoint information for this person object annotation. If None, then
no keypoint annotations will be populated.
densepose_annotations_dict: A dictionary that maps from annotation_id to a
dictionary with keys: [u'dp_I', u'dp_x', u'dp_y', 'dp_U', 'dp_V']
representing part surface coordinates. For more information see
http://densepose.org/.
remove_non_person_annotations: Whether to remove any annotations that are
not the "person" class.
remove_non_person_images: Whether to remove any images that do not contain
at least one "person" annotation.
Returns:
key: SHA256 hash of the image.
example: The converted tf.Example
num_annotations_skipped: Number of (invalid) annotations that were ignored.
num_keypoint_annotation_skipped: Number of keypoint annotations that were
skipped.
num_densepose_annotation_skipped: Number of DensePose annotations that were
skipped.
Raises:
ValueError: if the image pointed to by data['filename'] is not a valid JPEG
"""
image_height = image['height']
image_width = image['width']
filename = image['file_name']
image_id = image['id']
full_path = os.path.join(image_dir, filename)
with tf.gfile.GFile(full_path, 'rb') as fid:
encoded_jpg = fid.read()
encoded_jpg_io = io.BytesIO(encoded_jpg)
image = PIL.Image.open(encoded_jpg_io)
key = hashlib.sha256(encoded_jpg).hexdigest()
xmin = []
xmax = []
ymin = []
ymax = []
is_crowd = []
category_names = []
category_ids = []
area = []
encoded_mask_png = []
keypoints_x = []
keypoints_y = []
keypoints_visibility = []
keypoints_name = []
num_keypoints = []
include_keypoint = keypoint_annotations_dict is not None
num_annotations_skipped = 0
num_keypoint_annotation_used = 0
num_keypoint_annotation_skipped = 0
dp_part_index = []
dp_x = []
dp_y = []
dp_u = []
dp_v = []
dp_num_points = []
densepose_keys = ['dp_I', 'dp_U', 'dp_V', 'dp_x', 'dp_y', 'bbox']
include_densepose = densepose_annotations_dict is not None
num_densepose_annotation_used = 0
num_densepose_annotation_skipped = 0
for object_annotations in annotations_list:
(x, y, width, height) = tuple(object_annotations['bbox'])
if width <= 0 or height <= 0:
num_annotations_skipped += 1
continue
if x + width > image_width or y + height > image_height:
num_annotations_skipped += 1
continue
category_id = int(object_annotations['category_id'])
category_name = category_index[category_id]['name'].encode('utf8')
if remove_non_person_annotations and category_name != b'person':
num_annotations_skipped += 1
continue
xmin.append(float(x) / image_width)
xmax.append(float(x + width) / image_width)
ymin.append(float(y) / image_height)
ymax.append(float(y + height) / image_height)
#is_crowd.append(object_annotations['iscrowd'])
category_ids.append(category_id)
category_names.append(category_name)
#area.append(object_annotations['area'])
# if include_masks:
# run_len_encoding = mask.frPyObjects(object_annotations['segmentation'],
# image_height, image_width)
# binary_mask = mask.decode(run_len_encoding)
# if not object_annotations['iscrowd']:
# binary_mask = np.amax(binary_mask, axis=2)
# pil_image = PIL.Image.fromarray(binary_mask)
# output_io = io.BytesIO()
# pil_image.save(output_io, format='PNG')
# encoded_mask_png.append(output_io.getvalue())
# if include_keypoint:
# annotation_id = object_annotations['id']
# if annotation_id in keypoint_annotations_dict:
# num_keypoint_annotation_used += 1
# keypoint_annotations = keypoint_annotations_dict[annotation_id]
# keypoints = keypoint_annotations['keypoints']
# num_kpts = keypoint_annotations['num_keypoints']
# keypoints_x_abs = keypoints[::3]
# keypoints_x.extend(
# [float(x_abs) / image_width for x_abs in keypoints_x_abs])
# keypoints_y_abs = keypoints[1::3]
# keypoints_y.extend(
# [float(y_abs) / image_height for y_abs in keypoints_y_abs])
# keypoints_visibility.extend(keypoints[2::3])
# keypoints_name.extend(_COCO_KEYPOINT_NAMES)
# num_keypoints.append(num_kpts)
# else:
# keypoints_x.extend([0.0] * len(_COCO_KEYPOINT_NAMES))
# keypoints_y.extend([0.0] * len(_COCO_KEYPOINT_NAMES))
# keypoints_visibility.extend([0] * len(_COCO_KEYPOINT_NAMES))
# keypoints_name.extend(_COCO_KEYPOINT_NAMES)
# num_keypoints.append(0)
# if include_densepose:
# annotation_id = object_annotations['id']
# if (annotation_id in densepose_annotations_dict and
# all(key in densepose_annotations_dict[annotation_id]
# for key in densepose_keys)):
# dp_annotations = densepose_annotations_dict[annotation_id]
# num_densepose_annotation_used += 1
# dp_num_points.append(len(dp_annotations['dp_I']))
# dp_part_index.extend([int(i - _DP_PART_ID_OFFSET)
# for i in dp_annotations['dp_I']])
# # DensePose surface coordinates are defined on a [256, 256] grid
# # relative to each instance box (i.e. absolute coordinates in range
# # [0., 256.]). The following converts the coordinates
# # so that they are expressed in normalized image coordinates.
# dp_x_box_rel = [
# clip_to_unit(val / 256.) for val in dp_annotations['dp_x']]
# dp_x_norm = [(float(x) + x_box_rel * width) / image_width
# for x_box_rel in dp_x_box_rel]
# dp_y_box_rel = [
# clip_to_unit(val / 256.) for val in dp_annotations['dp_y']]
# dp_y_norm = [(float(y) + y_box_rel * height) / image_height
# for y_box_rel in dp_y_box_rel]
# dp_x.extend(dp_x_norm)
# dp_y.extend(dp_y_norm)
# dp_u.extend(dp_annotations['dp_U'])
# dp_v.extend(dp_annotations['dp_V'])
# else:
# dp_num_points.append(0)
# if (remove_non_person_images and
# not any(name == b'person' for name in category_names)):
# return (key, None, num_annotations_skipped,
# num_keypoint_annotation_skipped, num_densepose_annotation_skipped)
feature_dict = {
'image/height':
dataset_util.int64_feature(image_height),
'image/width':
dataset_util.int64_feature(image_width),
'image/filename':
dataset_util.bytes_feature(filename.encode('utf8')),
'image/source_id':
dataset_util.bytes_feature(str(image_id).encode('utf8')),
'image/key/sha256':
dataset_util.bytes_feature(key.encode('utf8')),
'image/encoded':
dataset_util.bytes_feature(encoded_jpg),
'image/format':
dataset_util.bytes_feature('jpeg'.encode('utf8')),
'image/object/bbox/xmin':
dataset_util.float_list_feature(xmin),
'image/object/bbox/xmax':
dataset_util.float_list_feature(xmax),
'image/object/bbox/ymin':
dataset_util.float_list_feature(ymin),
'image/object/bbox/ymax':
dataset_util.float_list_feature(ymax),
'image/object/class/text':
dataset_util.bytes_list_feature(category_names),
'image/object/is_crowd':
dataset_util.int64_list_feature(is_crowd),
'image/object/area':
dataset_util.float_list_feature(area),
}
# if include_masks:
# feature_dict['image/object/mask'] = (
# dataset_util.bytes_list_feature(encoded_mask_png))
# if include_keypoint:
# feature_dict['image/object/keypoint/x'] = (
# dataset_util.float_list_feature(keypoints_x))
# feature_dict['image/object/keypoint/y'] = (
# dataset_util.float_list_feature(keypoints_y))
# feature_dict['image/object/keypoint/num'] = (
# dataset_util.int64_list_feature(num_keypoints))
# feature_dict['image/object/keypoint/visibility'] = (
# dataset_util.int64_list_feature(keypoints_visibility))
# feature_dict['image/object/keypoint/text'] = (
# dataset_util.bytes_list_feature(keypoints_name))
# num_keypoint_annotation_skipped = (
# len(keypoint_annotations_dict) - num_keypoint_annotation_used)
# if include_densepose:
# feature_dict['image/object/densepose/num'] = (
# dataset_util.int64_list_feature(dp_num_points))
# feature_dict['image/object/densepose/part_index'] = (
# dataset_util.int64_list_feature(dp_part_index))
# feature_dict['image/object/densepose/x'] = (
# dataset_util.float_list_feature(dp_x))
# feature_dict['image/object/densepose/y'] = (
# dataset_util.float_list_feature(dp_y))
# feature_dict['image/object/densepose/u'] = (
# dataset_util.float_list_feature(dp_u))
# feature_dict['image/object/densepose/v'] = (
# dataset_util.float_list_feature(dp_v))
# num_densepose_annotation_skipped = (
# len(densepose_annotations_dict) - num_densepose_annotation_used)
example = tf.train.Example(features=tf.train.Features(feature=feature_dict))
return (key, example, num_annotations_skipped,
num_keypoint_annotation_skipped, num_densepose_annotation_skipped)
def _create_tf_record_from_coco_annotations(annotations_file, image_dir,
output_path, include_masks,
num_shards,
keypoint_annotations_file='',
densepose_annotations_file='',
remove_non_person_annotations=False,
remove_non_person_images=False):
"""Loads COCO annotation json files and converts to tf.Record format.
Args:
annotations_file: JSON file containing bounding box annotations.
image_dir: Directory containing the image files.
output_path: Path to output tf.Record file.
include_masks: Whether to include instance segmentations masks
(PNG encoded) in the result. default: False.
num_shards: number of output file shards.
keypoint_annotations_file: JSON file containing the person keypoint
annotations. If empty, then no person keypoint annotations will be
generated.
densepose_annotations_file: JSON file containing the DensePose annotations.
If empty, then no DensePose annotations will be generated.
remove_non_person_annotations: Whether to remove any annotations that are
not the "person" class.
remove_non_person_images: Whether to remove any images that do not contain
at least one "person" annotation.
"""
with contextlib2.ExitStack() as tf_record_close_stack, \
tf.gfile.GFile(annotations_file, 'r') as fid:
output_tfrecords = tf_record_creation_util.open_sharded_output_tfrecords(
tf_record_close_stack, output_path, num_shards)
groundtruth_data = json.load(fid)
images = groundtruth_data['images']
category_index = label_map_util.create_category_index(
groundtruth_data['categories'])
annotations_index = {}
if 'annotations' in groundtruth_data:
logging.info('Found groundtruth annotations. Building annotations index.')
for annotation in groundtruth_data['annotations']:
image_id = annotation['image_id']
if image_id not in annotations_index:
annotations_index[image_id] = []
annotations_index[image_id].append(annotation)
missing_annotation_count = 0
for image in images:
image_id = image['id']
if image_id not in annotations_index:
missing_annotation_count += 1
annotations_index[image_id] = []
logging.info('%d images are missing annotations.',
missing_annotation_count)
keypoint_annotations_index = {}
if keypoint_annotations_file:
with tf.gfile.GFile(keypoint_annotations_file, 'r') as kid:
keypoint_groundtruth_data = json.load(kid)
if 'annotations' in keypoint_groundtruth_data:
for annotation in keypoint_groundtruth_data['annotations']:
image_id = annotation['image_id']
if image_id not in keypoint_annotations_index:
keypoint_annotations_index[image_id] = {}
keypoint_annotations_index[image_id][annotation['id']] = annotation
densepose_annotations_index = {}
if densepose_annotations_file:
with tf.gfile.GFile(densepose_annotations_file, 'r') as fid:
densepose_groundtruth_data = json.load(fid)
if 'annotations' in densepose_groundtruth_data:
for annotation in densepose_groundtruth_data['annotations']:
image_id = annotation['image_id']
if image_id not in densepose_annotations_index:
densepose_annotations_index[image_id] = {}
densepose_annotations_index[image_id][annotation['id']] = annotation
total_num_annotations_skipped = 0
total_num_keypoint_annotations_skipped = 0
total_num_densepose_annotations_skipped = 0
for idx, image in enumerate(images):
if idx % 100 == 0:
logging.info('On image %d of %d', idx, len(images))
annotations_list = annotations_index[image['id']]
keypoint_annotations_dict = None
if keypoint_annotations_file:
keypoint_annotations_dict = {}
if image['id'] in keypoint_annotations_index:
keypoint_annotations_dict = keypoint_annotations_index[image['id']]
densepose_annotations_dict = None
if densepose_annotations_file:
densepose_annotations_dict = {}
if image['id'] in densepose_annotations_index:
densepose_annotations_dict = densepose_annotations_index[image['id']]
(_, tf_example, num_annotations_skipped, num_keypoint_annotations_skipped,
num_densepose_annotations_skipped) = create_tf_example(
image, annotations_list, image_dir, category_index, include_masks,
keypoint_annotations_dict, densepose_annotations_dict,
remove_non_person_annotations, remove_non_person_images)
total_num_annotations_skipped += num_annotations_skipped
total_num_keypoint_annotations_skipped += num_keypoint_annotations_skipped
total_num_densepose_annotations_skipped += (
num_densepose_annotations_skipped)
shard_idx = idx % num_shards
if tf_example:
output_tfrecords[shard_idx].write(tf_example.SerializeToString())
logging.info('Finished writing, skipped %d annotations.',
total_num_annotations_skipped)
if keypoint_annotations_file:
logging.info('Finished writing, skipped %d keypoint annotations.',
total_num_keypoint_annotations_skipped)
if densepose_annotations_file:
logging.info('Finished writing, skipped %d DensePose annotations.',
total_num_densepose_annotations_skipped)
def main(_):
assert FLAGS.train_image_dir, '`train_image_dir` missing.'
assert FLAGS.val_image_dir, '`val_image_dir` missing.'
assert FLAGS.test_image_dir, '`test_image_dir` missing.'
assert FLAGS.train_annotations_file, '`train_annotations_file` missing.'
assert FLAGS.val_annotations_file, '`val_annotations_file` missing.'
assert FLAGS.testdev_annotations_file, '`testdev_annotations_file` missing.'
if not tf.gfile.IsDirectory(FLAGS.output_dir):
tf.gfile.MakeDirs(FLAGS.output_dir)
train_output_path = os.path.join(FLAGS.output_dir, 'coco_train.record')
val_output_path = os.path.join(FLAGS.output_dir, 'coco_val.record')
testdev_output_path = os.path.join(FLAGS.output_dir, 'coco_testdev.record')
_create_tf_record_from_coco_annotations(
FLAGS.train_annotations_file,
FLAGS.train_image_dir,
train_output_path,
FLAGS.include_masks,
num_shards=100,
keypoint_annotations_file=FLAGS.train_keypoint_annotations_file,
densepose_annotations_file=FLAGS.train_densepose_annotations_file,
remove_non_person_annotations=FLAGS.remove_non_person_annotations,
remove_non_person_images=FLAGS.remove_non_person_images)
_create_tf_record_from_coco_annotations(
FLAGS.val_annotations_file,
FLAGS.val_image_dir,
val_output_path,
FLAGS.include_masks,
num_shards=50,
keypoint_annotations_file=FLAGS.val_keypoint_annotations_file,
densepose_annotations_file=FLAGS.val_densepose_annotations_file,
remove_non_person_annotations=FLAGS.remove_non_person_annotations,
remove_non_person_images=FLAGS.remove_non_person_images)
_create_tf_record_from_coco_annotations(
FLAGS.testdev_annotations_file,
FLAGS.test_image_dir,
testdev_output_path,
FLAGS.include_masks,
num_shards=50)
if __name__ == '__main__':
tf.app.run()
You can leave the iscrowd and segmentations as empty if you are doing object detection or classification or ... tasks. But You wouldn't make use of the data with missing bounding boxes for such tasks.

Skip image during tensorflow input pipeline

I have a Tensorflow input pipeline that reads in two png files (example, label) from disk. I want to tell tensorflow to skip an example/label pair based on a value in the label. Anyone know how to do this?
Here is a simplified example of the input pipeline and with a comment where I want to do the filtering:
import tensorflow as tf
import glob2 as glob
def preprocess_images(impath, labpath):
image = tf.io.read_file(impath)
label = tf.io.read_file(labpath)
image = tf.image.decode_png(image, channels=3)
label = tf.image.decode_png(label, channels=1)
if tf.reduce_sum(label) == 0:
#skip the image and move on to the next, don't include this in the batch
else:
return (image, label)
im_files = glob.glob(impath + '*.png')
lab_files = glob.glob(labpath + '*.png')
files = (im_files, lab_files)
path = tf.data.Dataset.from_tensor_slices(files)
pair = path.map(preprocess_images)
ds = tf.data.Dataset.zip(pair)
ds = ds.batch(64)
The easiest way seems to be to use filter method on your tf.data.Dataset object.
Here I am going to load the label only and filter out the entries with a sum of 0:
def load_label_only(impath, labpath):
label = tf.io.read_file(labpath)
label = tf.image.decode_png(label, channels=1)
return impath, label
# Create the dataset as in your example:
im_files = glob.glob(impath + '*.png')
lab_files = glob.glob(labpath + '*.png')
files = (im_files, lab_files)
ds = tf.data.Dataset.from_tensor_slices(files)
ds = ds.map(load_label_only)
# Here, I am going to keep only non-zero labels:
filtered_ds = ds.filter(lambda image_path, label_map: tf.reduce_sum(label_map) != 0)
# Load the rest of the images...

How do I take care of GDALDatasetShadow TypeError when reprojecting and writing GEOTIFF multi-band raster to GEOTIFF 3-band raster?

I've been trying to reproject an input (multi-band) GEOTIFF file to a destination transformation/projection and write these reprojected bands to a new 3 band GEOTIFF file (since I don't need the other bands). I've tried combining 2 blocks of code that work independently of each other (one writes 3 bands to a new GEOTIFF, the other reprojects a 1 band GEOTIFF to a reference file). However, I receive the following error:
TypeError: in method 'ReprojectImage', argument 1 of type 'GDALDatasetShadow *'
Find the function that I've used below:
def reprojectRaster(inputfile, referencefile, outputfile):
if os.path.exists(outputfile):
os.remove(outputfile)
input = gdal.Open(inputfile, gdalconst.GA_ReadOnly)
with rasterio.open(inputfile) as src:
input_array = src.read((2,1,4)) #red band, green band, nir band
inputProj = input.GetProjection()
#inputTrans = input.GetGeoTransform
reference = gdal.Open(referencefile, gdalconst.GA_ReadOnly)
referenceProj = reference.GetProjection()
referenceTrans = reference.GetGeoTransform()
bandreference = reference.GetRasterBand(1)
x = reference.RasterXSize
y = reference.RasterYSize
no_bands = reference.RasterCount
driver= gdal.GetDriverByName('GTiff')
output = driver.Create(outputfile,x,y,no_bands, bandreference.DataType)
output.SetGeoTransform(referenceTrans)
output.SetProjection(referenceProj)
#I've been trying to make put this loop inside from the other code
for i, image in enumerate(input_array, 1):
input.GetRasterBand(i).WriteArray( image )
gdal.ReprojectImage(inputfile,outputfile,inputProj,referenceProj,gdalconst.GRA_Bilinear)
del output
def CreateGeoTiff(outputfile, inputfile, referencefile):
with rasterio.open(inputfile) as src:
input_array = src.read((2,1,4)) #red, green, nir bands
reference = gdal.Open(referencefile, gdalconst.GA_ReadOnly)
referenceProj = reference.GetProjection()
referenceTrans = reference.GetGeoTransform()
driver = gdal.GetDriverByName('GTiff')
no_bands, cols, rows = input_array.shape
DataSet = driver.Create(outputfile, cols, rows, no_bands, gdal.GDT_Float32)
DataSet.SetGeoTransform(referenceTrans)
DataSet.SetProjection(referenceProj)
for i, image in enumerate(input_array, 1):
DataSet.GetRasterBand(i).WriteArray( image )
DataSet = None
return(gdal.Open(outputfile, gdalconst.GA_ReadOnly))
The tried to combine the following 2 functions:
def reprojectRaster(inputfile, referencefile, outputfile):
input = gdal.Open(inputfile, gdalconst.GA_ReadOnly)
inputProj = input.GetProjection()
reference = gdal.Open(referencefile, gdalconst.GA_ReadOnly)
referenceProj = reference.GetProjection()
referenceTrans = reference.GetGeoTransform()
bandreference = reference.GetRasterBand(1)
x = reference.RasterXSize
y = reference.RasterYSize
driver= gdal.GetDriverByName('GTiff')
output = driver.Create(outputfile,x,y,1,bandreference.DataType)
output.SetGeoTransform(referenceTrans)
output.SetProjection(referenceProj)
gdal.ReprojectImage(input,output,inputProj,referenceProj,gdalconst.GRA_Bilinear)
del output
def CreateGeoTiff(outputfile, inputfile, referencefile):
with rasterio.open(inputfile) as src:
input_array = src.read((2,1,4)) #red, green, nir bands
reference = gdal.Open(referencefile, gdalconst.GA_ReadOnly)
referenceProj = reference.GetProjection()
referenceTrans = reference.GetGeoTransform()
driver = gdal.GetDriverByName('GTiff')
no_bands, cols, rows = input_array.shape
DataSet = driver.Create(outputfile, cols, rows, no_bands, gdal.GDT_Float32)
DataSet.SetGeoTransform(referenceTrans)
DataSet.SetProjection(referenceProj)
for i, image in enumerate(input_array, 1):
DataSet.GetRasterBand(i).WriteArray( image )
DataSet = None
return(gdal.Open(outputfile, gdalconst.GA_ReadOnly))
The code seems to check out, the right inputs and calling the right functions, Python however raises the TypeError mentioned above. Does anyone have an idea where it went wrong?

Error preprocessing the input data when using Tensorflow Dataset API

I have images of [64,512,5] stored in *.npy files which I convert into *.tfrecords files.
I have verified that the reading of said records corresponds correctly with what is present in the *.npy files. However, when I perform some operation on the parser, like adding 1 to each pixel of the image, the result is not the expected one. The result should be 65*512*5 = 163840 but it is 163839.99980013957 (not always the same)
I have tried to perform different operations like tf.subtract, but the results are the same.
Could someone tell me what is wrong?
import re
import ast
import sys, select
import random as rn
from glob import glob
from tqdm import tqdm
from datetime import datetime
from configparser import SafeConfigParser
import numpy as np
import numpy.ma as ma
import scipy.misc
import os.path
from os import mkdir, stat
from os.path import exists, dirname, abspath
from os.path import join as dir_join
import tensorflow as tf
''' File hierarchy
'''
_code_dir = dirname(abspath(__file__))
_python_dir = dirname(_code_dir)
_model_dir = dirname(_python_dir)
_project_dir = dirname(_model_dir)
_ml_dir = dirname(_project_dir)
_srv_dir = dirname(_ml_dir)
_root_datasets_dir = dir_join(_srv_dir,'machine_learning','data_sets/ssd_prepared')
_config_dir = dir_join(_python_dir, 'config')
'''Data sets directories
'''
THIS_DATA_SET_DIR = 'Sph_50m' #WARNING: Global variable also used in helper.py
_data_dir = dir_join(_root_datasets_dir, THIS_DATA_SET_DIR)
_data_set_dir = dir_join(_data_dir,'ImageSet')
_data_npy_dir = dir_join(_data_dir,'data')
_data_tfRecord_dir = dir_join(_data_dir,'tfRecord')
''' Configuration parser
'''
cfg_parser = SafeConfigParser()
cfg_parser.read(dir_join(_config_dir,'cfg_model.ini'))
''' Private variables
'''
_batch_size = cfg_parser.getint(section='train', option='batch_size')
_max_epoch = cfg_parser.getint(section='train', option='max_epoch')
_standarize = cfg_parser.getboolean(section='train', option='standarize_input')
_input_shape = ast.literal_eval(cfg_parser.get(section='data_shape', option='input_shape'))
_label_channel = cfg_parser.getint(section='data_shape', option='label_channel')
_track_channel = cfg_parser.getint(section='data_shape', option='track_channel')
_mask_channel = cfg_parser.getint(section='data_shape', option='mask_channel')
_data_train = cfg_parser.get(section='data_set', option='data_train')
_data_val = cfg_parser.get(section='data_set', option='data_val')
_data_test = cfg_parser.get(section='data_set', option='data_test')
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=value.reshape(-1)))
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _floats_feature(value):
return tf.train.Feature(float_list=tf.train.FloatList(value=value.reshape(-1)))
def numpy_to_TFRecord():
if not exists(_data_tfRecord_dir): mkdir(_data_tfRecord_dir)
for dataset in [_data_train, _data_val, _data_test]:
tfRecord_folder = dir_join(_data_tfRecord_dir, dataset)
if not exists(tfRecord_folder): mkdir(tfRecord_folder)
#Retrieve list of files
projections_dir=[]
file_ = open(dir_join(_data_set_dir, dataset+'.txt'), 'r')
for x in file_.readlines():
file_nat = x.strip()+'.npy'
filename = dir_join(_data_npy_dir, file_nat)
assert exists(filename), "{} doesn't exist".format(filename)
projections_dir.append(filename)
file_.close()
totaltfRecordSize = 0
numFile = 0
for projection_dir in tqdm(projections_dir, ncols= 100, desc = 'TFRecord {}'.format(dataset)):
scanName = projection_dir.split('/')[-1].split('.')[0]
if totaltfRecordSize > 100*(10**6) or totaltfRecordSize == 0:
# address to save the TFRecords file
train_filename = dir_join(tfRecord_folder, \
str(numFile) + '_' + dataset +'.tfrecords')
# open the TFRecords file
writer = tf.python_io.TFRecordWriter(train_filename)
numFile += 1
totaltfRecordSize = 0
# Load the image
projection = np.load(projection_dir)
image = projection[:,:,:_label_channel]
label = projection[:,:,_label_channel].astype(int)
mask = projection[:,:,_mask_channel].astype(int)
track = projection[:,:,_track_channel].astype(int)
# Create a feature
feature = {'image': _floats_feature(image),
'label': _int64_feature(label),
'mask' : _int64_feature(mask),
'track': _int64_feature(track),
'scanName': _bytes_feature(tf.compat.as_bytes(scanName))}
# Create an example protocol buffer
example = tf.train.Example(features=tf.train.Features(feature=feature))
# Serialize to string and write on the file
writer.write(example.SerializeToString())
fileSize = stat(train_filename).st_size
totaltfRecordSize += fileSize
writer.close()
sys.stdout.flush()
def readTFRecord():
# Transforms a scalar string `example_proto` into a pair of a scalar string and
# a scalar integer, representing an image and its label, respectively.
image_dim = _input_shape[0] * _input_shape[1] * _label_channel
label_dim = _input_shape[0] * _input_shape[1]
mean = np.load(dir_join(_data_dir,'mean.npy'))
std = np.load(dir_join(_data_dir,'std.npy'))
mean_tf = tf.convert_to_tensor(mean, dtype=tf.float32, name='mean')
std_tf = tf.convert_to_tensor(std, dtype=tf.float32, name='std')
with tf.variable_scope('TFRecord'):
def _parse_function(example_proto):
with tf.variable_scope('parser'):
features = {'image': tf.FixedLenFeature([image_dim], tf.float32),
'label': tf.FixedLenFeature([label_dim], tf.int64),
'mask' : tf.FixedLenFeature([label_dim], tf.int64),
'track': tf.FixedLenFeature([label_dim], tf.int64),
'scanName': tf.FixedLenFeature([], tf.string)}
parsed_features = tf.parse_single_example(example_proto, features)
# Reshape image data into the original shape
image = tf.reshape(parsed_features['image'], [_input_shape[0], _input_shape[1], _label_channel], name='image')
label = tf.reshape(parsed_features['label'], _input_shape, name='lable_reshape')
mask = tf.reshape(parsed_features['mask'], _input_shape, name='mask_reshape')
track = tf.reshape(parsed_features['track'], _input_shape, name='track_reshape')
scanName = parsed_features['scanName']
image = image + tf.constant(1., dtype=tf.float32)
return image, label, mask, track, scanName
training_filenames = glob(dir_join(_data_tfRecord_dir, _data_train, '*.tfrecords'))
validation_filenames = glob(dir_join(_data_tfRecord_dir, _data_val, '*.tfrecords'))
filenames = tf.placeholder(tf.string, shape=[None], name='filenames')
dataset = tf.data.TFRecordDataset(filenames)
dataset = dataset.map(_parse_function, num_parallel_calls=20) # Parse the record into tensors.
dataset = dataset.shuffle(buffer_size=10000)
dataset = dataset.batch(_batch_size, drop_remainder=True)
dataset = dataset.prefetch(buffer_size=10)
iterator = dataset.make_initializable_iterator()
next = iterator.get_next()
sess = tf.Session()
while True:
sess.run(iterator.initializer, feed_dict={filenames: training_filenames})
try:
img, _, _, _, scanX = sess.run(next)
for i, scan in enumerate(scanX):
print(scan.decode("utf-8"))
projection = np.load(dir_join(_data_npy_dir, scan.decode("utf-8") + '.npy'))
imagenp = projection[:,:,:_label_channel]
if np.abs(np.sum(img[i,...] - imagenp)) > 0.:
print(np.sum(img[i,...] - imagenp))
except tf.errors.OutOfRangeError:
break
return training_filenames, validation_filenames, filenames, iterator
if __name__ == '__main__':
numpy_to_TFRecord()
readTFRecord()
The test I'm doing in the previous code is to convert the *.npy files to *.tfrecords. Then, I compare the *.trecords with the *.npy. The value should be 0 if both images were identical.
img, _, _, _, scanX = sess.run(next)
for i, scan in enumerate(scanX):
print(scan.decode("utf-8"))
projection = np.load(dir_join(_data_npy_dir, scan.decode("utf-8") + '.npy'))
imagenp = projection[:,:,:_label_channel]
print(np.sum(img[i,...] - imagenp))
If the data is not preprocessed, these images are the same, however, if we perform some kind of transformation, the results do not match what was expected. In this case we are adding 1 to each pixel of the image, so the total difference should be 64 * 512 * 5.
image = image + tf.constant(1., dtype=tf.float32)
I would like to solve this error, since so far I have not been able to obtain the results obtained by my neural network using feed_dict instead of Tensorflow Dataset API, and this is the only point where I can observe a difference in the input data.

Basic a tiff processing with photoshop using python

I need to write a script that does the following:
# open a tiff
# get it's dpi, width, height and colorspace
# set the dpi, width, height and colorspace
# and then save the tiff out with no compression and no layers.
So far I've gotten:
from win32com.client.dynamic import Dispatch
ps = Dispatch( "Photoshop.Application" )
file_path = "C:\\Users\\me\\myImg.tif"
doc = ps.Open( file_path )
dpi = doc.Resolution
width = doc.Width # in cm
height = doc.Height # in cm
# up to here the code works, but then I try
doc.Resolution = 72
ps.ResizeImage( 120 , 120 )
ps.PsColorSpaceType( 3 ) # psSRGB
ps.TiffSaveOptions.ImageCompression = 1 # psNoTIFFCompression
ps.TiffSaveOptions.Layers = False
ps.Save()
# and this last section fails
Please help, any ideas, tips, soultions would be greatly appreciated :D
After a lot of googeling and some trial and error and then even more trial and error I've managed to come up with the code below.
Hope this can help someone else.
Code
file_path = "C:\\Users\\me\\myImg.tif"
color_settings = "North America General Purpose 2"
from win32com.client.dynamic import Dispatch
ps_app = Dispatch( "Photoshop.Application" )
# set photoshop to use pixels as dimensions
ps_app.Preferences.RulerUnits = 1 # 'for PsUnits --> 1 (psPixels)
ps_app.Preferences.TypeUnits = 1 # 'for PsTypeUnits --> 1 (psPixels)
doc = ps_app.Open( file_path ) # Open a file and store open file as doc
dpi = doc.Resolution
width = doc.Width
height = doc.Height
cor_res = 1024
ps_app.ChangeColorSettings( color_settings )
doc.ResizeImage( cor_res , cor_res , 72 )
options = Dispatch('Photoshop.TiffSaveOptions')
options.ImageCompression = 1 # ps_appNoTIFFCompression
options.Layers = False # no layers
doc.SaveAs( file_path , options ) # Save with specified options
doc.Close( 2 ) # psDoNotSaveChanges

Categories

Resources