Why does image.getdata() return different result second time I invoke it?
from PIL import Image
def write():
image = Image.open('image.jpg')
newimage = Image.new(image.mode, image.size)
pixels = [p for p in image.getdata()]
for i in range(100):
pixels[i] = (255,255,255)
newimage.putdata(pixels)
newimage.save('newimage.jpg')
print(list(newimage.getdata())[0:10])
def read():
image = Image.open('newimage.jpg')
pixels = [p for p in image.getdata()]
print(list(image.getdata())[0:10])
write()
read()
It gives me the following result:
Why is second set of data differ from the first one?
Related
I use opencv to count the number of white and black pixels of picture(I have convert them into black and white image), and everytime I run my code it return the number is 0,and the code is
output_path = "/content/drive/MyDrive/dataset_demo/result_pic"
for pic in os.listdir(output_path):
if pic.endswith('.jpg'):
image = cv2.imread(pic,cv2.IMREAD_UNCHANGED)
number_of_white_pix = np.sum(image == 255)
number_of_black_pix = np.sum(image == 0)
number_of_total = number_of_white_pix + number_of_black_pix
number_of_ratio = number_of_white_pix / number_of_black_pix
print(number_of_total)
The pic variable contains only the file name of the image, but cv2.imread needs the full path to the image in order to read it. You need to use the full path to the image when you call cv2.imread.
output_path = "/content/drive/MyDrive/dataset_demo/result_pic"
for pic in os.listdir(output_path):
if pic.endswith('.jpg'):
pic = os.path.join(output_path, pic) #full path to the image
image = cv2.imread(pic,cv2.IMREAD_UNCHANGED)
number_of_white_pix = np.sum(image == 255)
number_of_black_pix = np.sum(image == 0)
number_of_total = number_of_white_pix + number_of_black_pix
number_of_ratio = number_of_white_pix / number_of_black_pix
print(number_of_total)
from PIL import Image, ImageSequence
import PIL
GIF_PATH = Image.open(r"C:\Users\me_\My\Filw\Path.gif")
IMAGE_PATH = Image.open(r"base.png")
frames = []
for frame in ImageSequence.Iterator(GIF_PATH):
output = IMAGE_PATH.copy()
frame_px = frame.load()
output_px = output.load()
transparent_foreground = frame.convert('RGBA')
transparent_foreground_px = transparent_foreground.load()
for x in range(frame.width):
for y in range(frame.height):
if frame_px[x, y] in (frame.info["background"], frame.info["transparency"]):
continue
output_px[x, y] = transparent_foreground_px[x, y]
output =output.resize([436,249], PIL.Image.NEAREST)
frames.append(output)
frames[0].save('output.gif',save_all = True, append_images = frames[1:], optimize = False, duration = 40, loop=0)
how would I paste the image to a specific location?
I'm pretty new so trying to get a grasp
I tried using imagechops offset but the image just wrapped around instead of moving
I'm trying to write a program in Python that takes an image and replaces the pixels with images, based off of the pixels lightness value. Figured I'd start with following a tutorial on converting images to ASCII art, then I can replace the ASCII characters with images. Here's what this attempt looks like:
https://pastebin.com/VNFWd9xN
It's a bit quick and dirty, just to see if I can make it work, but I think you'll get the idea.
So, a couple of issues I ran into.
The first and biggest one, i get a "TypeError: sequence item 0: expected str instance, JpegImageFile found". I get that the program expects a string and gets an image instead. But, how do I solve that?
Lastly, more of a parenthesis really, but I was playing around with the save function and could not get it so save "ascii_image" to jpg.
Anyhow, would really appreciate some guidance here.
Thank you.
import PIL.Image
from PIL import Image
img1 = Image.open("image1.jpg")
img2 = Image.open("image2.jpg")
img3 = Image.open("image3.jpg")
img4 = Image.open("image4.jpg")
img5 = Image.open("image5.jpg")
img6 = Image.open("image6.jpg")
img7 = Image.open("image7.jpg")
img8 = Image.open("image8.jpg")
img9 = Image.open("image9.jpg")
img10 = Image.open("image10.jpg")
img11 = Image.open("image11.jpg")
img12 = Image.open("image12.jpg")
# ascii characters used to build the output text
#ASCII_CHARS = ["#", "#", "S", "%", "?", "*", "+", ";", ":", ",", "."]
ASCII_CHARS = [img1, img2, img3, img4, img5, img6, img8, img9, img10, img11, img12]
# resize image according to a new width
def resize_image(image, new_width=80):
width, height = image.size
ratio = height/width
new_height = int(new_width * ratio)
resized_image = image.resize((new_width, new_height))
return(resized_image)
# convert each pixel to greyscale
def grayify(image):
grayscale_image = image.convert("L")
return(grayscale_image)
# convert pixels to a string of ASCII characters
def pixels_to_ascii(image):
pixels = image.getdata()
characters = "".join([ASCII_CHARS[pixel//25] for pixel in pixels])
return(characters)
def main(new_width=80):
# attempt to open image from user-input
path = input("Enter path to image:\n")
try:
image = PIL.Image.open(path)
except:
print(path, " is not a valid pathname to an image")
return
# convert image to ASCII
new_image_data = pixels_to_ascii(grayify(resize_image(image)))
# format
pixel_count = len(new_image_data)
ascii_image = "\n".join([new_image_data[index:(index+new_width)] for index in range(0, pixel_count, new_width)])
# print result
print(ascii_image)
# save
#with Image.open("ascii_image.jpg") as f:
# f.write(ascii_image)
main()
When I am processing a bunch of images, on one of them I get this error
File "/home/tensorflowpython/firstmodel/yololoss.py", line 153, in data_generator
image, box = get_random_data(annotation_lines[i], input_shape, random=True)
File "/home/tensorflowpython/firstmodel/yololoss.py", line 226, in get_random_data
image = image.resize((nw,nh), Image.BICUBIC)
File "/home/tensorflowpython/kenv/lib/python3.6/site-packages/PIL/Image.py", line 1858, in resize
self.load()
File "/home/tensorflowpython/kenv/lib/python3.6/site-packages/PIL/ImageFile.py", line 247, in load
"(%d bytes not processed)" % len(b)
OSError: image file is truncated (25 bytes not processed)
I have already tried the solution suggested here but it doesn't work
my code looks like this
from PIL import Image
def get_random_data(annotation_line, input_shape, random=True, max_boxes=20, jitter=.3, hue=.1, sat=1.5, val=1.5, proc_img=True):
Image.LOAD_TRUNCATED_IMAGES = True
line = annotation_line.split()
image = Image.open(line[0])
iw, ih = image.size
h, w = input_shape
box = np.array([np.array(list(map(int,box.split(',')))) for box in line[1:]])
try:
image.load()
except IOError:
pass # You can always log it to logger
if not random:
# resize image
scale = min(w/iw, h/ih)
nw = int(iw*scale)
nh = int(ih*scale)
dx = (w-nw)//2
dy = (h-nh)//2
image_data=0
if proc_img:
image = image.resize((nw,nh), Image.BICUBIC)
new_image = Image.new('RGB', (w,h), (128,128,128))
new_image.paste(image, (dx, dy))
image_data = np.array(new_image)/255.
# correct boxes
box_data = np.zeros((max_boxes,5))
if len(box)>0:
np.random.shuffle(box)
if len(box)>max_boxes: box = box[:max_boxes]
box[:, [0,2]] = box[:, [0,2]]*scale + dx
box[:, [1,3]] = box[:, [1,3]]*scale + dy
box_data[:len(box)] = box
return image_data, box_data
# resize image
new_ar = w/h * rand(1-jitter,1+jitter)/rand(1-jitter,1+jitter)
scale = rand(.25, 2)
if new_ar < 1:
nh = int(scale*h)
nw = int(nh*new_ar)
else:
nw = int(scale*w)
nh = int(nw/new_ar)
image = image.resize((nw,nh), Image.BICUBIC) #error occurs here
The difference between my error and the previous solution is, mine says OS error and the solution is for IO error
EDIT: I have figured out the image that is causing this error, it can be downloaded from this link
I tried the solution that you linked with the truncated image and it worked. You made a slight mistake when trying to apply this solution: you have to set ImageFile.LOAD_TRUNCATED_IMAGES=True, not Image.LOAD_TRUNCATED_IMAGES.
LOAD_TRUNCATED_IMAGES does not originally exist in Image module, so when you do Image.LOAD_TRUNCATED_IMAGES=True you set a new variable which is not used by the library.
So I think you juste have to do that:
from PIL import ImageFile, Image
ImageFile.LOAD_TRUNCATED_IMAGES = True
image = Image.open("00090.jpg")
# resize now doesn't fail
image.resize((h, w), Image.BICUBIC)
import sys, Image, scipy, cv2, numpy
from scipy.misc import imread
from cv2 import cv
from SRM import SRM
def ndarrayToIplImage (source):
"""Conversion of ndarray to iplimage"""
image = cv.CreateImageHeader((source.shape[1], source.shape[0]), cv.IPL_DEPTH_8U, 3)
cv.SetData(image, source.tostring(), source.dtype.itemsize * 3 * source.shape[1])
return image
"""Main Program"""
filename = "snap.jpeg"
Q = 64
im = imread(filename)
name = filename[:-4]
img = Image.fromarray(im)
if img.size[0] > 200 or img.size[1] > 200:
ratio = img.size[0]/img.size[1]
size = int(ratio*200), 200
img = numpy.array(img.resize(size, Image.ANTIALIAS))
srm = SRM(img, Q)
srm.initialization()
srm.segmentation()
classes, map = srm.map()
"""Converting ndarray to PIL Image to iplimage"""
pil_img = Image.fromarray(map)
cv_img = cv.CreateImageHeader(pil_img.size, cv.IPL_DEPTH_8U, 3)
cv.SetData(cv_img, pil_img.tostring(), pil_img.size[0]*3)
print type(cv_img) ##prints <type 'cv2.cv.iplimage'>
"""Using ndarrayToIplImage function also gives the same error!"""
"""
cv_img if of type iplimage but still gives error while using cv.ShowImage()
or cv.SaveImage().
There is no error displayed. Just the console hangs...
"""
I am using the SRM (Statistical Region Merging) Package available at this page.
I have just changed the example program given in the package. I had to convert the type returned by the SRM package functions to iplimage. There is no error in using the package but somewhere in using opencv functions.
This is the image that is saved after the console closes after hanging.
It used cv.SaveImage().
I tried cv2.imwrite() and I got this as the result:
This is the image that should have been saved. I used scipy.misc.imsave('image.jpg', map) to save this.
Why do you use IplImage and PIL? SRM library read numpy array and you get a numpy array from cv2.imread(image), then if you need to resize yuor image you can use opencv function cv2.resize(...). Finally you can save an image with opencv with cv2.imwrite(...) your code should appear like this:
import sys, cv2, numpy
from SRM import SRM
"""Main Program"""
filename = "snap.jpeg"
Q = 64
img = cv2.imread(filename)
name = filename[:-4]
if img.shape[0] > 200 or img.shape[1] > 200:
ratio = img.shape[0] * 1. / img.shape[1]
size = (int(ratio * 200), 200)
img = cv2.resize(img, size, interpolation=cv2.INTER_LANCZOS4)
srm = SRM(img, Q)
srm.initialization()
srm.segmentation()
classes, srmMap = srm.map() # Map is a python function, use different variable name
srmMap = srmMap.astype('uint8') # or you can try other opencv supported type
# I suppose that srmMap is your image returned as numpy array
cv2.imwrite('name.jpeg', srmMap)
# or
cv2.imshow('image', srmMap)
cv2.waitKey(0)