Python PIL ValueError: not enough image data - python

I have written a function to load images (jpg, JPEG, png, gif etc.) and convert it to jpg. The code looks like this:
def jpg_image_open(file_path, fill_color=(255, 255, 255)):
image = PIL.Image.open(file_path)
print(file_path, image.mode)
if file_path.endswith('.gif'):
# print(image.is_animated, image.n_frames)
for im_frame in PIL.ImageSequence.Iterator(image):
# Converting it to RGB to ensure that it has 3 dimensions as requested
im_frame = im_frame.convert('RGB')
image = im_frame
break
elif file_path.endswith('.png'):
image.load()
if image.mode in ('P', 'L'):
image.convert("RGB")
elif image.mode in ('RGBA', 'LA'):
# https://stackoverflow.com/a/9459208/2049763
print(file_path, " has transparency layer")
# image.load() # required for png.split()
background = PIL.Image.new(image.mode[:-1], image.size, fill_color)
background.paste(image, image.split()[-1])
image = background
return image, np.array(image)
I usually call it from other files without error.
# read input image as numpy array
loaded_img, in_image = create_my_tf_record_util.jpg_image_open(img_file)
PIL.Image.fromarray(in_image, 'RGB').save(out_img_file)
It works great for all images except, if the image is in mode 'P', 'L'.
Traceback (most recent call last):
File "dataset_tools/create_my_tf_record_coco.py", line 322, in thread_cube_map_annotation_png
PIL.Image.fromarray(in_image, 'RGB').save(out_img_file)
File "/home/mazhar/miniconda3/envs/mytfenv/lib/python3.6/site-packages/PIL/Image.py", line 2554, in fromarray
return frombuffer(mode, size, obj, "raw", rawmode, 0, 1)
File "/home/mazhar/miniconda3/envs/mytfenv/lib/python3.6/site-packages/PIL/Image.py", line 2497, in frombuffer
return frombytes(mode, size, data, decoder_name, args)
File "/home/mazhar/miniconda3/envs/mytfenv/lib/python3.6/site-packages/PIL/Image.py", line 2430, in frombytes
im.frombytes(data, decoder_name, args)
File "/home/mazhar/miniconda3/envs/mytfenv/lib/python3.6/site-packages/PIL/Image.py", line 812, in frombytes
raise ValueError("not enough image data")
ValueError: not enough image data

Related

How to user a tensorflow server saved model with string tensor input to predict in the local machine?

I'm trying to run saved serving models in my local machine. However, it takes string tensor as input, and I'm having trouble converting the images to the correct string format.
To load the model I use:
saved_model = tf.saved_model.load('model/1/')
inf_model = saved_model.signatures['serving_default']
The model has the following input-output structure:
inputs {
key: "encoded"
value {
name: "serving_default_encoded:0"
dtype: DT_STRING
tensor_shape {
}
}
}
outputs {
key: "output_0"
value {
name: "StatefulPartitionedCall:0"
dtype: DT_FLOAT
tensor_shape {
dim {
size: 19451
}
}
}
}
method_name: "tensorflow/serving/predict"
To process the image I use this:
img = tf.io.read_file(path)
# Decodes the image to W x H x 3 shape tensor with type of uint8
img = tf.io.decode_image(img, channels=3)
img = tf.image.resize_with_pad(img, 224, 224)
img = tf.image.convert_image_dtype(img, tf.float32)
And I try to convert it to string tensor format like this:
img_encoded = base64.urlsafe_b64encode(img).decode("utf-8")
img_encoded = tf.constant(img_encoded)
Predicting:
pred = inf_model(encoded=enc)['sequential_1'][0]
However, I get the following error:
Traceback (most recent call last):
File "/home/james/anaconda3/envs/james/lib/python3.8/site-packages/spyder_kernels/py3compat.py", line 356, in compat_exec
exec(code, globals, locals)
File "/home/james/Desktop/Project/dev_test/inference.py", line 79, in <module>
res = inf_model(encoded=enc)['sequential_1'][0]
File "/home/james/anaconda3/envs/james/lib/python3.8/site-packages/tensorflow/python/eager/function.py", line 1669, in __call__
return self._call_impl(args, kwargs)
File "/home/james/anaconda3/envs/james/lib/python3.8/site-packages/tensorflow/python/eager/function.py", line 1678, in _call_impl
return self._call_with_structured_signature(args, kwargs,
File "/home/james/anaconda3/envs/james/lib/python3.8/site-packages/tensorflow/python/eager/function.py", line 1759, in _call_with_structured_signature
return self._call_flat(
File "/home/james/anaconda3/envs/james/lib/python3.8/site-packages/tensorflow/python/saved_model/load.py", line 115, in _call_flat
return super(_WrapperFunction, self)._call_flat(args, captured_inputs,
File "/home/james/anaconda3/envs/james/lib/python3.8/site-packages/tensorflow/python/eager/function.py", line 1918, in _call_flat
return self._build_call_outputs(self._inference_function.call(
File "/home/james/anaconda3/envs/james/lib/python3.8/site-packages/tensorflow/python/eager/function.py", line 555, in call
outputs = execute.execute(
File "/home/james/anaconda3/envs/james/lib/python3.8/site-packages/tensorflow/python/eager/execute.py", line 59, in quick_execute
tensors = pywrap_tfe.TFE_Py_Execute(ctx._handle, device_name, op_name,
InvalidArgumentError: Unknown image file format. One of JPEG, PNG, GIF, BMP required.
[[{{node StatefulPartitionedCall/decode_image/DecodeImage}}]] [Op:__inference_signature_wrapper_129216]
Function call stack:
signature_wrapper
The error is due to image format, which is not of JPEG, PNG, GIF, BMP formats.
The images might have an extension name jpg but be in say a tiff format. As the error states that the image file format is unknown, kindly check the image type and delete the images which are not of JPEG, PNG, GIF, BMP type from your dataset using the code below;
import os
import cv2
import imghdr
def check_images( s_dir, ext_list):
bad_images=[]
bad_ext=[]
s_list= os.listdir(s_dir)
for klass in s_list:
klass_path=os.path.join (s_dir, klass)
print ('processing class directory ', klass)
if os.path.isdir(klass_path):
file_list=os.listdir(klass_path)
for f in file_list:
f_path=os.path.join (klass_path,f)
tip = imghdr.what(f_path)
if ext_list.count(tip) == 0:
bad_images.append(f_path)
if os.path.isfile(f_path):
try:
img=cv2.imread(f_path)
shape=img.shape
except:
print('file ', f_path, ' is not a valid image file')
bad_images.append(f_path)
else:
print('*** fatal error, you a sub directory ', f, ' in class directory ', klass)
else:
print ('*** WARNING*** you have files in ', s_dir, ' it should only contain sub directories')
return bad_images, bad_ext
source_dir =r'c:\temp\people\storage'
good_exts=['jpg', 'png', 'jpeg', 'gif', 'bmp' ] # list of acceptable extensions
bad_file_list, bad_ext_list=check_images(source_dir, good_exts)
if len(bad_file_list) !=0:
print('improper image files are listed below')
for i in range (len(bad_file_list)):
print (bad_file_list[i])
else:
print(' no improper image files were found')
Removing such images from the dataset will help.

pillow can not draw text with fill

i draw a text to a image and i set the fill and i get an error , but i do not for the fill it get nothing for my image
from PIL import Image, ImageDraw
img = Image.open('test.png')
d = ImageDraw.Draw(img)
d.text((100, 100), "HELLO", fill="red")
img.save('testssss.png')
Traceback (most recent call last):
File "/opt/virtualenvs/python3/lib/python3.8/site-packages/PIL/ImagePalette.py", line 99, in getcolor
return self.colors[color]
KeyError: (255, 0, 0)
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "main.py", line 20, in <module>
d.text((100, 100), "HELLO", fill="red")
File "/opt/virtualenvs/python3/lib/python3.8/site-packages/PIL/ImageDraw.py", line 455, in text
ink = getink(fill)
File "/opt/virtualenvs/python3/lib/python3.8/site-packages/PIL/ImageDraw.py", line 403, in getink
ink, fill = self._getink(fill)
File "/opt/virtualenvs/python3/lib/python3.8/site-packages/PIL/ImageDraw.py", line 111, in _getink
ink = self.palette.getcolor(ink)
File "/opt/virtualenvs/python3/lib/python3.8/site-packages/PIL/ImagePalette.py", line 109, in getcolor
self.palette[index + 256] = color[1]
IndexError: bytearray index out of range
The image:
my system : replit(linux)
Try, (255,0,0) instead of red.
from PIL import Image, ImageDraw
img = Image.open('test.png')
d = ImageDraw.Draw(img)
d.text((100, 100), "HELLO", fill=(255,0,0))
img.save('testssss.png')
You should also look at the size of your image, it might be too small for the text you are using. You can try reducing from (100,100) to maybe (10,10) first and see if it works.
It could also be a case where you don’t have enough bits for the colour because your image is less than 256 bits per colour palette. You can also try converting your image to an rgb image using the following command before you add the text. Do
img = img.convert("RGB")

can't save JPG image with alpha

width, height = resized_image.size
dummyImg = np.zeros([height, width, 4], dtype=np.uint8)
for x in range(width):
for y in range(height):
color = seg_map[y,x]
(r,g,b) = resized_image.getpixel((x,y))
if color == 0:
dummyImg[y,x] = [255,255,255,255]
else:
dummyImg[y,x] = [r,g,b,255]
img = Image.fromarray(dummyImg)
outputFilePath = '15d09a689ca0d4 Mod.jpg'
img.save(outputFilePath)
Hello, I am getting this error while trying to save image as "JPG", I included alpha channels in dummyimg.
Traceback (most recent call last):
File "/home/timmy/.local/lib/python3.6/site-packages/PIL/JpegImagePlugin.py", line 620, in _save
rawmode = RAWMODE[im.mode]
KeyError: 'RGBA'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/timmy/Desktop/image-background-removal-master/impr.py", line 79, in <module>
img.save(outputFilePath)
File "/home/timmy/.local/lib/python3.6/site-packages/PIL/Image.py", line 2007, in save
save_handler(self, fp, filename)
File "/home/timmy/.local/lib/python3.6/site-packages/PIL/JpegImagePlugin.py", line 622, in _save
raise IOError("cannot write mode %s as JPEG" % im.mode)
OSError: cannot write mode RGBA as JPEG
I checked GitHub issue cannot write mode RGBA as JPEG (4.2.0) , they said it is solved and is now doable
JPEG file format cannot handle transparency. To save an image with color and transparency you need to use another format (e.g. PNG).
You can save to JPEG only if you drop the alpha channel and make all pixels opaque.

TypeError: dst is not a numpy array, neither a scalar

I use anaconda2 in python script, but it showed me these error information:
Traceback (most recent call last):
File "main.py", line 16, in <module>
clip = source.fl_image(lane_detector.run)
File "E:\Anaconda2\Lib\site-packages\moviepy\video\VideoClip.py", line 533, in fl_image
return self.fl(lambda gf, t: image_func(gf(t)), apply_to)
File "E:\Anaconda2\Lib\site-packages\moviepy\Clip.py", line 136, in fl
newclip = self.set_make_frame(lambda t: fun(self.get_frame, t))
File "<decorator-gen-57>", line 2, in set_make_frame
File "E:\Anaconda2\Lib\site-packages\moviepy\decorators.py", line 14, in outpl ace
f(newclip, *a, **k)
File "E:\Anaconda2\Lib\site-packages\moviepy\video\VideoClip.py", line 694, in set_make_frame
self.size = self.get_frame(0).shape[:2][::-1]
File "<decorator-gen-14>", line 2, in get_frame
File "E:\Anaconda2\Lib\site-packages\moviepy\decorators.py", line 89, in wrapp er
return f(*new_a, **new_kw)
File "E:\Anaconda2\Lib\site-packages\moviepy\Clip.py", line 95, in get_frame
return self.make_frame(t)
File "E:\Anaconda2\Lib\site-packages\moviepy\Clip.py", line 136, in <lambda>
newclip = self.set_make_frame(lambda t: fun(self.get_frame, t))
File "E:\Anaconda2\Lib\site-packages\moviepy\video\VideoClip.py", line 533, in <lambda>
return self.fl(lambda gf, t: image_func(gf(t)), apply_to)
File "C:\Users\shihaohou\Desktop\lane-line-detection-master\detector.py", line 655, in run
warped = self.__perspective_transform(preprocessed,pers_mat)
File "C:\Users\shihaohou\Desktop\lane-line-detection-master\detector.py", line 328, in __perspective_transform
warped = cv2.warpPerspective(img, mat, img_size, cv2.INTER_LINEAR)
TypeError: dst is not a numpy array, neither a scalar
The relative code in detetor.py is :
def __perspective_transform(self, img, mat):
'''
Method to transform the image
:param img: the image to be transformed
:param mat: the perspective matrix
:return: the transformed image
'''
img_size = (img.shape[1], img.shape[0])
warped = cv2.warpPerspective(img, mat, img_size, cv2.INTER_LINEAR)
return warped
if self.ENABLE_LOGS:
cv2.imwrite('./test/def_' + str(self.processed_frames) + '.jpg', img)
# undistort the image using the calibrated values
undistorted = self.__undistort(img,self.mtx, self.dist)
# pass the distortion corrected image through the threshold pipeline
preprocessed = self.__prepocess(undistorted)
if self.ENABLE_LOGS:
cv2.imwrite('./debug/preprocessed_' + str(self.processed_frames) + '.jpg', img)
# get the current region of interest and load the perspective and inverse perspective matrix
src, dest = self.__get_current_roi()
pers_mat, inv_mat = self.__load_perspective_matrix(src, dest)
# transform the thresholded image to a bird's eye view
warped = self.__perspective_transform(preprocessed,pers_mat)
I looked the cv2.warpPerspective function,does this error means this function isn't use correctly? Thank you for help!
You need to call it as:
warped = cv2.warpPerspective(img, mat, img_size, flags=cv2.INTER_LINEAR)

PIL crop and paste problem: Cropping doesn't create a cropped image

I'm trying to crop an image and then paste the cropped image into the centre of another image. Ideally I'd like the cropped image to be smaller than the image its being pasted on so that there is a border around the pasted image but I don't know if that's possible.
Here's what I've tried (along with the resulting error message):
>>> import Image
>>> grey = Image.new('RGB', (200, 200), "grey")
>>> House = Image.open("House01.jpg")
>>> print grey.size, grey.mode, grey.format
>>>(200, 200) RGB None
>>> print House.size, House.mode, House.format
>>>(300, 300) RGB JPEG
>>> box = (25, 25, 25, 25)
>>> House.crop(box)
>>>Image._ImageCrop image mode=RGB size=0x0 at 0x11AD210>
>>> region = House.crop(box)
>>> region.show()
>>>Traceback (most recent call last):
>>> File "<pyshell#28>", line 1, in <module>
region.show()
>>> File "C:\Python26\lib\site-packages\PIL\Image.py", line 1483, in show
_show(self, title=title, command=command)
>>> File "C:\Python26\lib\site-packages\PIL\Image.py", line 2123, in _show
apply(_showxv, (image,), options)
>>> File "C:\Python26\lib\site-packages\PIL\Image.py", line 2127, in _showxv
apply(ImageShow.show, (image, title), options)
>>> File "C:\Python26\lib\site-packages\PIL\ImageShow.py", line 41, in show
if viewer.show(image, title=title, **options):
>>> File "C:\Python26\lib\site-packages\PIL\ImageShow.py", line 66, in show
self.show_image(image, **options)
>>> File "C:\Python26\lib\site-packages\PIL\ImageShow.py", line 85, in show_image
return self.show_file(self.save_image(image), **options)
>>> File "C:\Python26\lib\site-packages\PIL\ImageShow.py", line 81, in save_image
return image._dump(format=self.get_format(image))
>>> File "C:\Python26\lib\site-packages\PIL\Image.py", line 493, in _dump
self.save(file, format)
>>> File "C:\Python26\lib\site-packages\PIL\Image.py", line 1439, in save
save_handler(self, fp, filename)
>>> File "C:\Python26\lib\site-packages\PIL\BmpImagePlugin.py", line 242, in _save
ImageFile._save(im, fp, [("raw", (0,0)+im.size, 0, (rawmode, stride, -1))])
>>> File "C:\Python26\lib\site-packages\PIL\ImageFile.py", line 498, in _save
e.setimage(im.im, b)
>>>SystemError: tile cannot extend outside image
I can see that the 'region' size has been made (0,0) but I can't understand why.
Any help on this would be great thanks
The PIL documentation for the crop method states:
Returns a rectangular region from the
current image. The box is a 4-tuple
defining the left, upper, right, and
lower pixel coordinate.
This is a lazy operation. Changes to
the source image may or may not be
reflected in the cropped image. To get
a separate copy, call the load method
on the cropped copy.
So, you should try region = House.crop(box).load() to make sure you get an actual cropped copy.
UPDATE:
Actually, it seems the above only works if you're using PIL 1.1.6 and later. In versions before that, I guess load() doesn't return anything so you can't chain the operations. In that case, use:
region = House.crop(box)
region.load()
I had a similar error that I could not seem to solve, but I then realized as you did that it had to do with the arguments passed in to Image.crop(). You can see the size of your image is (0,0) so there is nothing to show. You are setting bounds from point (25,25) to (25,25).
If you need a 25x25 cropped image(starting from the top left):
```
>
>> import Image
>>> grey = Image.new('RGB', (200, 200), "grey")
>>> House = Image.open("House01.jpg")
>>> print grey.size, grey.mode, grey.format
>>>(200, 200) RGB None
>>> print House.size, House.mode, House.format
>>>(300, 300) RGB JPEG
>>> box = (0, 0, 25, 25)
>>> House.crop(box)
>>>Image._ImageCrop image mode=RGB size=0x0 at 0x11AD210>
>>> region = House.crop(box)
>>> region.show()
```
If you want to start from the center or another point I would use this link as a reference:

Categories

Resources