Related
How can I crop images that looks like this and save as 3 different images?
The issue is that images are different in size and non-proportional, so I want to make a code that dynamically cuts black borders but not the black part which is inside the picture.
Here is the desired outcome:
Below is the sample code I've made which works only for one specific image.
from PIL import Image
im = Image.open(r"image.jpg")
# Setting the points for cropped image1
# im1 = im.crop((left, top, right, bottom))
im1 = im.crop((...))
im2 = im.crop((...))
im3 = im.crop((...))
im1 = im1.save(r"image1.jpg")
im2 = im2.save(r"image2.jpg")
im3 = im3.save(r"image3.jpg")
Finally I've found the solution. Here is what I did:
from PIL import Image, ImageChops
def RemoveBlackBorders(img):
bg = Image.new(img.mode, img.size, img.getpixel((0,0)))
diff = ImageChops.difference(img, bg)
diff = ImageChops.add(diff, diff, 2.0, -100)
bbox = diff.getbbox()
if bbox:
return img.crop(bbox)
# Opens a image in RGB mode
im = Image.open(r"C:\Path\Image.jpg")
# removing borders
im = RemoveBlackBorders(im)
# getting midpoint from size
width, height = im.size
mwidth = width/2
# assign shape of figure from the midpoint
#crop((x,y of top left, x, y of bottom right))
im1 = im.crop((0, 0, mwidth-135, height))
im2 = im.crop((mwidth-78, 0, mwidth+84, height))
im3 = im.crop((mwidth+135, 0, width, height))
The function to remove borders I've found from here.
Although the solution is not completely dynamic, it still solves my problem with ~90% accuracy. But I believe there should be a more universal approach for this problem.
If the areas have always the same size and the same top and bottom coordinates the following should work:
The coordinates for the crops can be retrieved by calculating the sums per rows and per columns, then analyzing them.
import cv2
import numpy as np
im = cv2.imread(image_path)
sum_of_rows = np.sum(im, axis=(1,2))
sum_of_cols = np.sum(im, axis=(0,2))
The top and bottom can be calculated by calculating the sum for each row (each sum value being calculated R+G+B, the value should be zero for black). Then looking for the first value being different form zero and the last value being different than zero. Both indicating the top and bottom.
top = np.argmax(sum_of_rows > 0)
bottom = top + np.argmax(sum_of_rows[top:]==0)
The same can be done for the sum for each column, but here checking for multiple left and right values.
I'm trying to resize a batch of grayscale images that are 256 x N pixels (N varies, but is always ≤256).
My intention is to downscale the images.
The resize would have to output a square (1:1) image, with:
resized image centered vertically
aspect ratio maintained
remaining pixels rendered black
Visually this would be the desired result:
I have tried creating a numpy zeroes matrix with the target size (e.g. 200 x 200) but have not been able to paste the resized image into its vertical center.
Any suggestions using cv2, PIL or numpy are welcome.
You can use Pillow to accomplish that:
Code:
from PIL import Image
def make_square(im, min_size=256, fill_color=(0, 0, 0, 0)):
x, y = im.size
size = max(min_size, x, y)
new_im = Image.new('RGBA', (size, size), fill_color)
new_im.paste(im, (int((size - x) / 2), int((size - y) / 2)))
return new_im
Test Code:
test_image = Image.open('hLarp.png')
new_image = make_square(test_image)
new_image.show()
For a white background you can do:
new_image = make_square(test_image, fill_color=(255, 255, 255, 0))
Result:
Here is a code that solve your question with OPENCV module (using NUMPY module too)
#Importing modules opencv + numpy
import cv2
import numpy as np
#Reading an image (you can use PNG or JPG)
img = cv2.imread("image.png")
#Getting the bigger side of the image
s = max(img.shape[0:2])
#Creating a dark square with NUMPY
f = np.zeros((s,s,3),np.uint8)
#Getting the centering position
ax,ay = (s - img.shape[1])//2,(s - img.shape[0])//2
#Pasting the 'image' in a centering position
f[ay:img.shape[0]+ay,ax:ax+img.shape[1]] = img
#Showing results (just in case)
cv2.imshow("IMG",f)
#A pause, waiting for any press in keyboard
cv2.waitKey(0)
#Saving the image
cv2.imwrite("img2square.png",f)
cv2.destroyAllWindows()
PIL.ImageOps.pad:
from PIL import Image, ImageOps
with Image.open('hLARP.png') as im:
im = ImageOps.pad(im, (200, 200), color='black')
im.save('result.png')
PIL has the thumbnail method which will scale keeping the aspect ratio. From there you just need to paste it centered onto your black background rectangle.
from PIL import Image
def black_background_thumbnail(path_to_image, thumbnail_size=(200,200)):
background = Image.new('RGBA', thumbnail_size, "black")
source_image = Image.open(path_to_image).convert("RGBA")
source_image.thumbnail(thumbnail_size)
(w, h) = source_image.size
background.paste(source_image, ((thumbnail_size[0] - w) / 2, (thumbnail_size[1] - h) / 2 ))
return background
if __name__ == '__main__':
img = black_background_thumbnail('hLARP.png')
img.save('tmp.jpg')
img.show()
from PIL import Image
def reshape(image):
'''
Reshapes the non-square image by pasting
it to the centre of a black canvas of size
n*n where n is the biggest dimension of
the non-square image.
'''
old_size = image.size
max_dimension, min_dimension = max(old_size), min(old_size)
desired_size = (max_dimension, max_dimension)
position = int(max_dimension/2) - int(min_dimension/2)
blank_image = Image.new("RGB", desired_size, color='black')
if image.height<image.width:
blank_image.paste(image, (0, position))
else:
blank_image.paste(image, (position, 0))
return blank_image
Behold! A greatly-overengineered version of #Stepeh Rauch's answer that contains an interactive element and accounts for odd-pixel padding.
Usage
# Note: PySide2 can also be replaced by PyQt5, PyQt6, PySide6
# Also note! Any of the above are >100MB
pip install utilitys pyside2 pillow
$ python <file.py> --help
usage: <file>.py [-h] [--folder FOLDER] [--ext EXT]
optional arguments:
-h, --help show this help message and exit
--folder FOLDER Folder of images allowed for viewing. Must have at least one image (default: .)
--ext EXT Image extension to look for (default: png)
$ python <file>.py --folder "./path/to/folder/of/your/image(s).png" --ext "jpg"
file.py contents
import argparse
from pathlib import Path
from typing import Tuple, Union, Any
import numpy as np
import pyqtgraph as pg
from PIL import Image
from utilitys import fns, widgets, RunOpts
def pad_to_size(
image: Image.Image,
size_wh: Union[int, Tuple[int, int]] = None,
fill_color: Any = 0,
**resize_kwargs,
) -> Image.Image:
"""
Keeps an image's aspect ratio by resizing until the largest side is constrained
by the specified output size. Then, the deficient dimension is padded until
the image is the specified size.
"""
if size_wh is None:
size_wh = max(image.size)
if isinstance(size_wh, int):
size_wh = (size_wh, size_wh)
im_size_wh = np.array(image.size)
ratios = im_size_wh / size_wh
# Resize until the largest side is constrained by the specified output size
im_size_wh = np.ceil(im_size_wh / ratios.max()).astype(int)
# Prefer 1-pixel difference in aspect ratio vs. odd padding
pad_amt = np.array(size_wh) - im_size_wh
use_ratio_idx = np.argmax(ratios)
unused_ratio_idx = 1 - use_ratio_idx
# Sanity check for floating point accuracy: At least one side must match
# user-requested dimension
if np.all(pad_amt != 0):
# Adjust dimension that is supposed to match
im_size_wh[use_ratio_idx] += pad_amt[use_ratio_idx]
# Prefer skewing aspect ratio by 1 pixel instead of odd padding
# If odd, 1 will be added. Otherwise, the dimension remains unchanged
im_size_wh[unused_ratio_idx] += pad_amt[unused_ratio_idx] % 2
image = image.resize(tuple(im_size_wh), **resize_kwargs)
new_im = Image.new("RGB", size_wh, fill_color)
width, height = image.size
new_im.paste(image, (int((size_wh[0] - width) / 2), int((size_wh[1] - height) / 2)))
return new_im
def main(folder=".", ext="png"):
"""
Parameters
----------
folder: str, Path
Folder of images allowed for viewing. Must have at least one image
ext: str, Path
Image extension to look for
"""
folder = Path(folder)
files = fns.naturalSorted(folder.glob(f"*.{ext}"))
err_msg = f"{folder} must have at least one image file with extension `{ext}`"
assert len(files), err_msg
pg.mkQApp()
viewer = widgets.ImageViewer()
def readim(file_index=0, try_pad=False, output_w=512, output_h=512):
if 0 > file_index > len(files):
return
image = Image.open(files[file_index])
if try_pad:
image = pad_to_size(image, (output_w, output_h), fill_color=(255, 255, 255))
viewer.setImage(np.array(image))
viewer.toolsEditor.registerFunc(readim, runOpts=RunOpts.ON_CHANGED)
wc = viewer.widgetContainer()
readim()
wc.show()
pg.exec()
if __name__ == "__main__":
# Print defaults in help signature
fmt = dict(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
cli = fns.makeCli(main, parserKwargs=fmt)
args = cli.parse_args()
main(**vars(args))
I've very much a noob when it comes to image processing :(
I have a bunch of PNG files (300 of them) that have large areas of transparency that I wish to crop. I want to automate the process obviously, hence why i tried using python and PIL.
Now I have a look at the following link,
Crop a PNG image to its minimum size, and also using Numpy as suggested by this link, Automatically cropping an image with python/PIL, both to no success :( The output files are identical to the input files! no cropping of the transparency, same size. The getbbox is returning same width and height.
Here's a link to one of those images; 98x50button
The image is of a button icon in the shape of a bell. it's drawn in white so it's hard to see which transparent background. The expected outcome would a 20x17 Button (with the transparency inside that 20x17 box remaining in tact)
Here's the code i'm using;
#!/usr/bin/env python
import sys
import os
import Image
import numpy as np
def autocrop_image2(image):
image.load()
image_data = np.asarray(image)
image_data_bw = image_data.max(axis=2)
non_empty_columns = np.where(image_data_bw.max(axis=0) > 0)[0]
non_empty_rows = np.where(image_data_bw.max(axis=1) > 0)[0]
cropBox = (min(non_empty_rows), max(non_empty_rows),
min(non_empty_columns), max(non_empty_columns))
image_data_new = image_data[cropBox[0]:cropBox[
1] + 1, cropBox[2]:cropBox[3] + 1, :]
new_image = Image.fromarray(image_data_new)
return new_image
def autocrop_image(image, border=0):
# Get the bounding box
bbox = image.getbbox()
# Crop the image to the contents of the bounding box
image = image.crop(bbox)
# Determine the width and height of the cropped image
(width, height) = image.size
# Add border
width += border * 2
height += border * 2
# Create a new image object for the output image
cropped_image = Image.new("RGBA", (width, height), (0, 0, 0, 0))
# Paste the cropped image onto the new image
cropped_image.paste(image, (border, border))
# Done!
return cropped_image
walk_dir = sys.argv[1]
print('walk_dir = ' + walk_dir)
# If your current working directory may change during script execution, it's recommended to
# immediately convert program arguments to an absolute path. Then the variable root below will
# be an absolute path as well. Example:
# walk_dir = os.path.abspath(walk_dir)
print('walk_dir (absolute) = ' + os.path.abspath(walk_dir))
for root, subdirs, files in os.walk(walk_dir):
print('--\nroot = ' + root)
list_file_path = os.path.join(root, 'my-directory-list.txt')
print('list_file_path = ' + list_file_path)
with open(list_file_path, 'wb') as list_file:
for subdir in subdirs:
print('\t- subdirectory ' + subdir)
for filename in files:
file_path = os.path.join(root, filename)
print('\t- file %s (full path: %s)' % (filename, file_path))
filename, file_extension = os.path.splitext(filename)
if file_extension.lower().endswith('.png'):
# Open the input image
image = Image.open(file_path)
# Do the cropping
# image = autocrop_image(image, 0)
new_image = autocrop_image2(image)
# Save the output image
output = os.path.join("output", filename + ".png")
print output
new_image.save(output)
Thank you all for the help :)
The issue you're having is that your images contain transparent white pixels, and your code is only going to crop pixels that are both transparent and black. The RGBA values for most of the pixels in your example image are (255, 255, 255, 0).
In autocrop_image2, you're taking the max of the channel values. You probably just want the alpha channel's value directly, so change:
image_data_bw = image_data.max(axis=2)
To:
image_data_bw = image_data[:,:,3]
The rest of the function should then work as intended.
The autocrop_image function has the same problem. The getbbox method returns the bounds of the non-zero pixels, and transparent white pixels are not zero. To fix it, try converting the image from "RGBA" mode to premultiplied alpha "RGBa" mode before finding the bounding box:
bbox = image.convert("RGBa").getbbox()
Here is one solution to crop the transparent borders.
Just throw this script in your folder with your batch .png files:
from PIL import Image
import numpy as np
from os import listdir
def crop(image_name):
pil_image = Image.open(image_name)
np_array = np.array(pil_image)
blank_px = [255, 255, 255, 0]
mask = np_array != blank_px
coords = np.argwhere(mask)
x0, y0, z0 = coords.min(axis=0)
x1, y1, z1 = coords.max(axis=0) + 1
cropped_box = np_array[x0:x1, y0:y1, z0:z1]
pil_image = Image.fromarray(cropped_box, 'RGBA')
print(pil_image.width, pil_image.height)
pil_image.save(png_image_name)
print(png_image_name)
for f in listdir('.'):
if f.endswith('.png'):
crop(f)
Here's a new solution; I just ran into this problem:
You have an RGBA image:
When the pixel's A is 0, the cell should be fully transparent,
but some of your pixels have A as 0, and RGB values not zero.
Pillow's getbbox() and other functions now fail.
You want to force your RGB to 0 whenever alpha is 0
So:
Make a pure black RGBA image, each pixel being (0, 0, 0, 0)
Make a composite with your image and an RGBA black image, using your
image as a mask.
Wherever your A was 0, your RGB will now be zero
This is a solution; there is probably a lower-memory solution.
Here is the code:
black = Image.new('RGBA', myImage.size)
myImage = Image.composite(myImage, black, myImage)
myCroppedImage = myImage.crop(myImage.getbbox())
I have a large number of images of a fixed size (say 500*500). I want to write a python script which will resize them to a fixed size (say 800*800) but will keep the original image at the center and fill the excess area with a fixed color (say black).
I am using PIL. I can resize the image using the resize function now, but that changes the aspect ratio. Is there any way to do this?
You can create a new image with the desired new size, and paste the old image in the center, then saving it. If you want, you can overwrite the original image (are you sure? ;o)
import Image
old_im = Image.open('someimage.jpg')
old_size = old_im.size
new_size = (800, 800)
new_im = Image.new("RGB", new_size) ## luckily, this is already black!
box = tuple((n - o) // 2 for n, o in zip(new_size, old_size))
new_im.paste(old_im, box)
new_im.show()
# new_im.save('someimage.jpg')
You can also set the color of the new border with a third argument of Image.new() (for example: Image.new("RGB", new_size, "White"))
Yes, there is.
Make something like this:
from PIL import Image, ImageOps
ImageOps.expand(Image.open('original-image.png'),border=300,fill='black').save('imaged-with-border.png')
You can write the same at several lines:
from PIL import Image, ImageOps
img = Image.open('original-image.png')
img_with_border = ImageOps.expand(img,border=300,fill='black')
img_with_border.save('imaged-with-border.png')
And you say that you have a list of images. Then you must use a cycle to process all of them:
from PIL import Image, ImageOps
for i in list-of-images:
img = Image.open(i)
img_with_border = ImageOps.expand(img,border=300,fill='black')
img_with_border.save('bordered-%s' % i)
Alternatively, if you are using OpenCV, they have a function called copyMakeBorder that allows you to add padding to any of the sides of an image. Beyond solid colors, they've also got some cool options for fancy borders like reflecting or extending the image.
import cv2
img = cv2.imread('image.jpg')
color = [101, 52, 152] # 'cause purple!
# border widths; I set them all to 150
top, bottom, left, right = [150]*4
img_with_border = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color)
Sources: OpenCV border tutorial and
OpenCV 3.1.0 Docs for copyMakeBorder
PIL's crop method can actually handle this for you by using numbers that are outside the bounding box of the original image, though it's not explicitly stated in the documentation. Negative numbers for left and top will add black pixels to those edges, while numbers greater than the original width and height for right and bottom will add black pixels to those edges.
This code accounts for odd pixel sizes:
from PIL import Image
with Image.open('/path/to/image.gif') as im:
old_size = im.size
new_size = (800, 800)
if new_size > old_size:
# Set number of pixels to expand to the left, top, right,
# and bottom, making sure to account for even or odd numbers
if old_size[0] % 2 == 0:
add_left = add_right = (new_size[0] - old_size[0]) // 2
else:
add_left = (new_size[0] - old_size[0]) // 2
add_right = ((new_size[0] - old_size[0]) // 2) + 1
if old_size[1] % 2 == 0:
add_top = add_bottom = (new_size[1] - old_size[1]) // 2
else:
add_top = (new_size[1] - old_size[1]) // 2
add_bottom = ((new_size[1] - old_size[1]) // 2) + 1
left = 0 - add_left
top = 0 - add_top
right = old_size[0] + add_right
bottom = old_size[1] + add_bottom
# By default, the added pixels are black
im = im.crop((left, top, right, bottom))
Instead of the 4-tuple, you could instead use a 2-tuple to add the same number of pixels on the left/right and top/bottom, or a 1-tuple to add the same number of pixels to all sides.
It is important to consider old dimension, new dimension and their difference here. If the difference is odd (not even), you will need to specify slightly different values for left, top, right and bottom borders.
Assume the old dimension is ow,oh and new one is nw,nh.
So, this would be the answer:
import Image, ImageOps
img = Image.open('original-image.png')
deltaw=nw-ow
deltah=nh-oh
ltrb_border=(deltaw/2,deltah/2,deltaw-(deltaw/2),deltah-(deltah/2))
img_with_border = ImageOps.expand(img,border=ltrb_border,fill='black')
img_with_border.save('imaged-with-border.png')
You can load the image with scipy.misc.imread as a numpy array. Then create an array with the desired background with numpy.zeros((height, width, channels)) and paste the image at the desired location:
import numpy as np
import scipy.misc
im = scipy.misc.imread('foo.jpg', mode='RGB')
height, width, channels = im.shape
# make canvas
im_bg = np.zeros((height, width, channels))
im_bg = (im_bg + 1) * 255 # e.g., make it white
# Your work: Compute where it should be
pad_left = ...
pad_top = ...
im_bg[pad_top:pad_top + height,
pad_left:pad_left + width,
:] = im
# im_bg is now the image with the background.
ximg = Image.open(qpath)
xwid,xhgt = func_ResizeImage(ximg)
qpanel_3 = tk.Frame(Body,width=xwid+10,height=xhgt+10,bg='white',bd=5)
ximg = ximg.resize((xwid,xhgt),Image.ANTIALIAS)
ximg = ImageTk.PhotoImage(ximg)
panel = tk.Label(qpanel_3,image=ximg)
panel.image = ximg
panel.grid(row = 2)
from PIL import Image
from PIL import ImageOps
img = Image.open("dem.jpg").convert("RGB")
This part will add black borders at the sides (10% of width)
img_side = ImageOps.expand(img, border=(int(0.1*img.size[0]),0,int(0.1*img.size[0]),0), fill=(0,0,0))
img_side.save("sunset-sides.jpg")
This part will add black borders at the bottom & top (10% of height)
img_updown = ImageOps.expand(img, border=(0,int(0.1*img.size[1]),0,int(0.1*img.size[1])), fill=(0,0,0))
img_updown.save("sunset-top_bottom.jpg")
This part will add black borders at the bottom,top & sides (10% of height-width)
img_updown_side = ImageOps.expand(img, border=(int(0.1*img.size[0]),int(0.1*img.size[1]),int(0.1*img.size[0]),int(0.1*img.size[1])), fill=(0,0,0))
img_updown_side.save("sunset-all_sides.jpg")
img.close()
img_side.close()
img_updown.close()
img_updown_side.close()
Currently I am using:
os.chdir(album.path)
images = glob.glob('*.*')
# thumbs size
size = 80,80
for image in images:
#create thumb
file, ext = os.path.splitext(image)
im = Image.open(os.path.join(album.path,image))
im.thumbnail(size, Image.ANTIALIAS)
thumb_path = os.path.join(album.path, 'thumbs', file + ".thumb" + ".jpeg")
im.save(thumb_path)
Although this works, I end up with different sizes images (some are portrait and some are landscape), but I want all of the images to have an exact size. Maybe a sensible cropping?
UPDATE:
I don't mind cropping a small portion of the image. When I said sensible cropping I mean something like this algorythm:
if image is portrait:
make width 80px
crop the height (will be more than 80px)
else if image is landscape:
make height 80px
crop the width to 80px (will be more than 80px)
Here is my take on doing a padded fit for an image:
#!/usr/bin/env python
from PIL import Image, ImageChops
F_IN = "/path/to/image_in.jpg"
F_OUT = "/path/to/image_out.jpg"
size = (80,80)
image = Image.open(F_IN)
image.thumbnail(size, Image.ANTIALIAS)
image_size = image.size
thumb = image.crop( (0, 0, size[0], size[1]) )
offset_x = max( (size[0] - image_size[0]) / 2, 0 )
offset_y = max( (size[1] - image_size[1]) / 2, 0 )
thumb = ImageChops.offset(thumb, offset_x, offset_y)
thumb.save(F_OUT)
It first uses the thumbnail operation to bring the image down to within your original bounds and preserving the aspect. Then it crops it back out to actually fill the size of your bounds (since unless the original image was square, it will be smaller now), and we find the proper offset to center the image. The image is offset to the center, so you end up with black padding but no image cropping.
Unless you can make a really sensible guess at a proper center crop without losing possible important image data on the edges, a padded fit approach will work better.
Update
Here is a version that can do either center crop or pad fit.
#!/usr/bin/env python
from PIL import Image, ImageChops, ImageOps
def makeThumb(f_in, f_out, size=(80,80), pad=False):
image = Image.open(f_in)
image.thumbnail(size, Image.ANTIALIAS)
image_size = image.size
if pad:
thumb = image.crop( (0, 0, size[0], size[1]) )
offset_x = max( (size[0] - image_size[0]) / 2, 0 )
offset_y = max( (size[1] - image_size[1]) / 2, 0 )
thumb = ImageChops.offset(thumb, offset_x, offset_y)
else:
thumb = ImageOps.fit(image, size, Image.ANTIALIAS, (0.5, 0.5))
thumb.save(f_out)
source = "/path/to/source/image.JPG"
makeThumb(source, "/path/to/source/image_padded.JPG", pad=True)
makeThumb(source, "/path/to/source/image_centerCropped.JPG", pad=False)
Obviously, you would need to crop or pad the images. You could do something like below to get a maximal centered crop according to the aspect ratio of the thumbnails (untested):
aspect = lambda size: float(size[0]) / float(size[1])
sa = aspect(size)
if aspect(im.size) > sa:
width = int(sa * im.size[1])
left = (im.size[0] - width) / 2
im = im.crop((left, 0, left + width, im.size[1]))
else:
height = int(im.size[0] / sa)
top = (im.size[1] - height) / 2
im = im.crop((0, top, im.size[0], top + height))
im.thumbnail(size, Image.ANTIALIAS)
If you use easy-thumbnails you'll need to set crop to True and upscale to True to always fill-up the space (have the exact same dimensions).
Ex: makes image_2 fits in image_1 dimensions:
thumbnailer = get_thumbnailer(image_2)
thumbnail = thumbnailer.generate_thumbnail(thumbnail_options={
'crop': True,
'upscale': True,
'size': image_1.size
})
image_2 = thumbnail.image