Related
Hello I want to reflect an object in the image as in this image[enter image description here][1]
[1]: https://i.stack.imgur.com/N9J3I.jpg How can I get this kind of result?
It is possible that OpenCV does not have good solutions for this, take a closer look at Pillow.
from PIL import Image, ImageFilter
def drop_shadow(image, iterations=3, border=8, offset=(5,5), background_colour=0xffffff, shadow_colour=0x444444):
shadow_width = image.size[0] + abs(offset[0]) + 2 * border
shadow_height = image.size[1] + abs(offset[1]) + 2 * border
shadow = Image.new(image.mode, (shadow_width, shadow_height), background_colour)
shadow_left = border + max(offset[0], 0)
shadow_top = border + max(offset[1], 0)
shadow.paste(shadow_colour, [shadow_left, shadow_top, shadow_left + image.size[0], shadow_top + image.size[1]])
for i in range(iterations):
shadow = shadow.filter(ImageFilter.BLUR)
img_left = border - min(offset[0], 0)
img_top = border - min(offset[1], 0)
shadow.paste(image, (img_left, img_top))
return shadow
drop_shadow(Image.open('boobs.jpg')).save('shadowed_boobs.png')
Here is one way to do the reflection in Python/OpenCV.
One flips the image. Then makes a vertical ramp (gradient) image and puts that into the alpha channel of the flipped image. Then one concatenates the original and the flipped images.
Input:
import cv2
import numpy as np
# set top and bottom opacity percentages
top = 85
btm = 15
# load image
img = cv2.imread('bear2.png')
hh, ww = img.shape[:2]
# flip the input
flip = np.flip(img, axis=0)
# add opaque alpha channel to input
img = cv2.cvtColor(img, cv2.COLOR_BGR2BGRA)
# make vertical gradient that is bright at top and dark at bottom as alpha channel for the flipped image
gtop = 255*top//100
gbtm = 255*btm//100
grady = np.linspace(gbtm, gtop, hh, dtype=np.uint8)
gradx = np.linspace(1, 1, ww, dtype=np.uint8)
grad = np.outer(grady, gradx)
grad = np.flip(grad, axis=0)
# alternate method
#grad = np.linspace(0, 255, hh, dtype=np.uint8)
#grad = np.tile(grad, (ww,1))
#grad = np.transpose(grad)
#grad = np.flip(grad, axis=0)
# put the gradient into the alpha channel of the flipped image
flip = cv2.cvtColor(flip, cv2.COLOR_BGR2BGRA)
flip[:,:,3] = grad
# concatenate the original and the flipped versions
result = np.vstack((img, flip))
# save output
cv2.imwrite('bear2_vertical_gradient.png', grad)
cv2.imwrite('bear2_reflection.png', result)
# Display various images to see the steps
cv2.imshow('flip',flip)
cv2.imshow('grad',grad)
cv2.imshow('result',result)
cv2.waitKey(0)
cv2.destroyAllWindows()
Ramped (Gradient) Image:
Result:
The code below is intended to take an infrared image (B&W) and convert it to RGB. It does so successfully, but with significant noise. I have included a few lines for noise reduction but they don't seem to help. I've included the starting/resulting photos below. Any advice/corrections are welcome and thank you in advance!
from skimage import io
import numpy as np
import glob, os
from tkinter import Tk
from tkinter.filedialog import askdirectory
import cv2
path = askdirectory(title='Select PNG Folder') # shows dialog box and return the path
outpath = askdirectory(title='Select SAVE Folder')
# wavelength in microns
MWIR = 4.5
R = .642
G = .532
B = .44
vector = [R, G, B]
vectorsum = np.sum(vector)
vector = vector / vectorsum #normalize
vector = vector*255 / MWIR #changing this value changes the outcome significantly so I
#have been messing with it in the hopes of fixing it but no luck so far.
vector = np.power(vector, 4)
for file in os.listdir(path):
if file.endswith(".png"):
imIn = io.imread(os.path.join(path, file))
imOut = imIn * vector
ret,thresh = cv2.threshold(imOut,64,255,cv2.THRESH_BINARY)
kernel = np.ones((5, 5), np.uint8)
erode = cv2.erode(thresh, kernel, iterations = 1)
result = cv2.bitwise_or(imOut, erode)
io.imsave(os.path.join(outpath, file) + '_RGB.png',imOut.astype(np.uint8))
Your noise looks like completely random values, so I suspect you have an error in your conversion from float to uint8. But instead of rolling everything for yourself, why don't you just use:
imOut = cv2.cvtColor(imIn,cv2.COLOR_GRAY2BGR)
Here is one way to do that in Python/OpenCV.
Your issue is likely that your channel values are exceeding the 8-bit range.
Sorry, I do not understand the relationship between your R,G,B weights and your MWIR. Dividing by MWIR will do nothing if your weights are properly normalized.
Input:
import cv2
import numpy as np
# read image
img = cv2.imread('car.jpg')
# convert to gray
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# make color channels
red = gray.copy()
green = gray.copy()
blue = gray.copy()
# set weights
R = .642
G = .532
B = .44
MWIR = 4.5
# get sum of weights and normalize them by the sum
R = R**4
G = G**4
B = B**4
sum = R + G + B
R = R/sum
G = G/sum
B = B/sum
print(R,G,B)
# combine channels with weights
red = (R*red)
green = (G*green)
blue = (B*blue)
result = cv2.merge([red,green,blue])
# scale by ratio of 255/max to increase to fully dynamic range
max=np.amax(result)
result = ((255/max)*result).clip(0,255).astype(np.uint8)
# write result to disk
cv2.imwrite("car_colored.png", result)
# display it
cv2.imshow("RESULT", result)
cv2.waitKey(0)
Result
If the noise is coming from the sensor itself, like a grainy noise, you'll need to look into denoising algorithms. scikit-image and opencv provide some denoising algorithms you can try. Maybe take a look at this and this.
I recently learned about matplotlib.cm, which handles colormaps. I've been using those to artificially color IR images, and made a brief example using the same black & white car image used above. Basically, I create a colormap .csv file locally, then refer to it for RGB weights. You may have to pick and choose which colormap you prefer, but that's up to personal preference.
Input image:
Python:
import os
import numpy as np
import cv2
from matplotlib import cm
# Multiple colormap options are available- I've hardcoded viridis for this example.
colormaps = ["viridis", "plasma", "inferno", "magma", "cividis"]
def CreateColormap():
if not os.path.exists("viridis_colormap.csv"):
# Get 256 entries from "viridis" or any other Matplotlib colormap
colormap = cm.get_cmap("viridis", 256)
# Make a Numpy array of the 256 RGB values
# Each line corresponds to an RGB colour for a greyscale level
np.savetxt("viridis_colormap.csv", (colormap.colors[...,0:3]*255).astype(np.uint8), fmt='%d', delimiter=',')
def RecolorInfraredImageToRGB(ir_image):
# Load RGB lookup table from CSV file
lookup_table = np.loadtxt("viridis_colormap.csv", dtype=np.uint8, delimiter=",")
# Make output image, same height and width as IR image, but 3-channel RGB
result = np.zeros((*ir_image.shape, 3), dtype=np.uint8)
# Take entries from RGB LUT according to greyscale values in image
np.take(lookup_table, ir_image, axis=0, out=result)
return result
if __name__ == "__main__":
CreateColormap()
img = cv2.imread("bwcar.jpg")
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
recolored = RecolorInfraredImageToRGB(gray)
cv2.imwrite("car_recolored.png", recolored)
cv2.imshow("Viridis recolor", recolored)
cv2.waitKey(0)
Output:
So in my coursework, I am supposed to change the different variations of colors and get an image that looks like the example provided. I have gotten halfway there, but for the life of me cannot figure out how to get the colors to change, even a little bit. Even if the code runs without errors, the colors will not change
i've tried numpy arrays, editing the pixel color, etc. I can not get anything to work.
import PIL
from PIL import Image
from PIL import ImageEnhance
from PIL import ImageDraw
from PIL import ImageFont
fnt = ImageFont.truetype('readonly/fanwood-webfont.ttf', 75)
# read image and convert to RGB
image=Image.open("readonly/msi_recruitment.gif")
image=image.convert('RGB')
drawing_object = ImageDraw.Draw(image)
# build a list of 9 images which have different brightnesses
enhancer=ImageEnhance.Brightness(image)
images=[]
x = 1
for i in range(0, 10):
pixels = img.load()
print(image.size)
x += 1
z = x
if x%3 == 1 :
z = 9
drawing_object.rectangle((0,450,800,325), fill='black')
drawing_object.text((20,350),'channel intensity o 0.{}'.format(z), font=fnt, fill=(255,255,255))
elif x%3 == 0:
z = 5
drawing_object.rectangle((0,450,800,325), fill='black')
drawing_object.text((20,350),'channel intensity o 0.{}'.format(z), font=fnt, fill=(255,255,255))
else:
z = 1
drawing_object.rectangle((0,450,800,325), fill='black')
drawing_object.text((20,350),'channel intensity o 0.{}'.format(z), font=fnt, fill=(255,255,255))
images.append(enhancer.enhance(10/10))
## create a contact sheet from different brightnesses
first_image=images[0]
contact_sheet=PIL.Image.new(first_image.mode, (first_image.width*3,first_image.height*3))
x=0
y=0
for img in images:
# Lets paste the current image into the contact sheet
contact_sheet.paste(img, (x, y) )
# Now we update our X position. If it is going to be the width of the image, then we set it to 0
# and update Y as well to point to the next "line" of the contact sheet.
if x+first_image.width == contact_sheet.width:
x=0
y=y+first_image.height
else:
x=x+first_image.width
# resize and display the contact sheet
contact_sheet = contact_sheet.resize((int(contact_sheet.width/2),int(contact_sheet.height/2) ))
display(contact_sheet)
So in general you can use OpenCVs ColorMap to change colors in your image:
import cv2
img = cv2.imread(r"<IMAGE PATH>")
img = cv2.applyColorMap(img, cv2.COLORMAP_JET) # Change colors of image with predefined colormap
# Display the new image
cv2.imshow("img", img)
cv2.waitKey()
Now if you want to create your own colormap instead of using a predefined one in OpenCV you can do it with cv2.LUT() (OpenCV Documentation)
Example Input:
Example Output:
And here would be a quick example of how to change Gamma with this approach:
def adjust_gamma(img, gamma=1.0):
assert (img.shape[0] == 1)
invGamma = 1.0 / gamma
table = np.array([((i / 255.0) ** invGamma) *
255 for i in np.arange(0, 256)]).astype("uint8")
new_img = cv2.LUT(np.array(img, dtype=np.uint8), table)
return new_img
It looks like you have a typo in your loop:
for i in range(0, 10):
# if-elses
...
images.append(enhancer.enhance(10/10))
You're enhancing the image with a factor of 1.0 at each iteration, which according to the docs:
Adjust image brightness.
This class can be used to control the brightness of an image. An enhancement factor of 0.0 gives a black image. A factor of 1.0 gives the original image.
Using z, which I suspect was your intention, will give you the tiers of brightness your comments describe:
for i in range(0, 10):
# if-elses
...
images.append(enhancer.enhance(z/10))
Edit: changed assumed value from i to z, based on the text you're writing.
I should also point out that you probably want to create a temp "watermark" image during the for-loop to apply the text/rectangle to. Since the ImageDraw objects modify the image in-place, you're applying the text on top of each other at each iteration, causing some weird text on the later images.
I'm trying to resize a batch of grayscale images that are 256 x N pixels (N varies, but is always ≤256).
My intention is to downscale the images.
The resize would have to output a square (1:1) image, with:
resized image centered vertically
aspect ratio maintained
remaining pixels rendered black
Visually this would be the desired result:
I have tried creating a numpy zeroes matrix with the target size (e.g. 200 x 200) but have not been able to paste the resized image into its vertical center.
Any suggestions using cv2, PIL or numpy are welcome.
You can use Pillow to accomplish that:
Code:
from PIL import Image
def make_square(im, min_size=256, fill_color=(0, 0, 0, 0)):
x, y = im.size
size = max(min_size, x, y)
new_im = Image.new('RGBA', (size, size), fill_color)
new_im.paste(im, (int((size - x) / 2), int((size - y) / 2)))
return new_im
Test Code:
test_image = Image.open('hLarp.png')
new_image = make_square(test_image)
new_image.show()
For a white background you can do:
new_image = make_square(test_image, fill_color=(255, 255, 255, 0))
Result:
Here is a code that solve your question with OPENCV module (using NUMPY module too)
#Importing modules opencv + numpy
import cv2
import numpy as np
#Reading an image (you can use PNG or JPG)
img = cv2.imread("image.png")
#Getting the bigger side of the image
s = max(img.shape[0:2])
#Creating a dark square with NUMPY
f = np.zeros((s,s,3),np.uint8)
#Getting the centering position
ax,ay = (s - img.shape[1])//2,(s - img.shape[0])//2
#Pasting the 'image' in a centering position
f[ay:img.shape[0]+ay,ax:ax+img.shape[1]] = img
#Showing results (just in case)
cv2.imshow("IMG",f)
#A pause, waiting for any press in keyboard
cv2.waitKey(0)
#Saving the image
cv2.imwrite("img2square.png",f)
cv2.destroyAllWindows()
PIL.ImageOps.pad:
from PIL import Image, ImageOps
with Image.open('hLARP.png') as im:
im = ImageOps.pad(im, (200, 200), color='black')
im.save('result.png')
PIL has the thumbnail method which will scale keeping the aspect ratio. From there you just need to paste it centered onto your black background rectangle.
from PIL import Image
def black_background_thumbnail(path_to_image, thumbnail_size=(200,200)):
background = Image.new('RGBA', thumbnail_size, "black")
source_image = Image.open(path_to_image).convert("RGBA")
source_image.thumbnail(thumbnail_size)
(w, h) = source_image.size
background.paste(source_image, ((thumbnail_size[0] - w) / 2, (thumbnail_size[1] - h) / 2 ))
return background
if __name__ == '__main__':
img = black_background_thumbnail('hLARP.png')
img.save('tmp.jpg')
img.show()
from PIL import Image
def reshape(image):
'''
Reshapes the non-square image by pasting
it to the centre of a black canvas of size
n*n where n is the biggest dimension of
the non-square image.
'''
old_size = image.size
max_dimension, min_dimension = max(old_size), min(old_size)
desired_size = (max_dimension, max_dimension)
position = int(max_dimension/2) - int(min_dimension/2)
blank_image = Image.new("RGB", desired_size, color='black')
if image.height<image.width:
blank_image.paste(image, (0, position))
else:
blank_image.paste(image, (position, 0))
return blank_image
Behold! A greatly-overengineered version of #Stepeh Rauch's answer that contains an interactive element and accounts for odd-pixel padding.
Usage
# Note: PySide2 can also be replaced by PyQt5, PyQt6, PySide6
# Also note! Any of the above are >100MB
pip install utilitys pyside2 pillow
$ python <file.py> --help
usage: <file>.py [-h] [--folder FOLDER] [--ext EXT]
optional arguments:
-h, --help show this help message and exit
--folder FOLDER Folder of images allowed for viewing. Must have at least one image (default: .)
--ext EXT Image extension to look for (default: png)
$ python <file>.py --folder "./path/to/folder/of/your/image(s).png" --ext "jpg"
file.py contents
import argparse
from pathlib import Path
from typing import Tuple, Union, Any
import numpy as np
import pyqtgraph as pg
from PIL import Image
from utilitys import fns, widgets, RunOpts
def pad_to_size(
image: Image.Image,
size_wh: Union[int, Tuple[int, int]] = None,
fill_color: Any = 0,
**resize_kwargs,
) -> Image.Image:
"""
Keeps an image's aspect ratio by resizing until the largest side is constrained
by the specified output size. Then, the deficient dimension is padded until
the image is the specified size.
"""
if size_wh is None:
size_wh = max(image.size)
if isinstance(size_wh, int):
size_wh = (size_wh, size_wh)
im_size_wh = np.array(image.size)
ratios = im_size_wh / size_wh
# Resize until the largest side is constrained by the specified output size
im_size_wh = np.ceil(im_size_wh / ratios.max()).astype(int)
# Prefer 1-pixel difference in aspect ratio vs. odd padding
pad_amt = np.array(size_wh) - im_size_wh
use_ratio_idx = np.argmax(ratios)
unused_ratio_idx = 1 - use_ratio_idx
# Sanity check for floating point accuracy: At least one side must match
# user-requested dimension
if np.all(pad_amt != 0):
# Adjust dimension that is supposed to match
im_size_wh[use_ratio_idx] += pad_amt[use_ratio_idx]
# Prefer skewing aspect ratio by 1 pixel instead of odd padding
# If odd, 1 will be added. Otherwise, the dimension remains unchanged
im_size_wh[unused_ratio_idx] += pad_amt[unused_ratio_idx] % 2
image = image.resize(tuple(im_size_wh), **resize_kwargs)
new_im = Image.new("RGB", size_wh, fill_color)
width, height = image.size
new_im.paste(image, (int((size_wh[0] - width) / 2), int((size_wh[1] - height) / 2)))
return new_im
def main(folder=".", ext="png"):
"""
Parameters
----------
folder: str, Path
Folder of images allowed for viewing. Must have at least one image
ext: str, Path
Image extension to look for
"""
folder = Path(folder)
files = fns.naturalSorted(folder.glob(f"*.{ext}"))
err_msg = f"{folder} must have at least one image file with extension `{ext}`"
assert len(files), err_msg
pg.mkQApp()
viewer = widgets.ImageViewer()
def readim(file_index=0, try_pad=False, output_w=512, output_h=512):
if 0 > file_index > len(files):
return
image = Image.open(files[file_index])
if try_pad:
image = pad_to_size(image, (output_w, output_h), fill_color=(255, 255, 255))
viewer.setImage(np.array(image))
viewer.toolsEditor.registerFunc(readim, runOpts=RunOpts.ON_CHANGED)
wc = viewer.widgetContainer()
readim()
wc.show()
pg.exec()
if __name__ == "__main__":
# Print defaults in help signature
fmt = dict(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
cli = fns.makeCli(main, parserKwargs=fmt)
args = cli.parse_args()
main(**vars(args))
Can anyone help me figure out what's happening in my image auto-cropping script? I have a png image with a large transparent area/space. I would like to be able to automatically crop that space out and leave the essentials. Original image has a squared canvas, optimally it would be rectangular, encapsulating just the molecule.
here's the original image:
Doing some googling i came across PIL/python code that was reported to work, however in my hands, running the code below over-crops the image.
import Image
import sys
image=Image.open('L_2d.png')
image.load()
imageSize = image.size
imageBox = image.getbbox()
imageComponents = image.split()
rgbImage = Image.new("RGB", imageSize, (0,0,0))
rgbImage.paste(image, mask=imageComponents[3])
croppedBox = rgbImage.getbbox()
print imageBox
print croppedBox
if imageBox != croppedBox:
cropped=image.crop(croppedBox)
print 'L_2d.png:', "Size:", imageSize, "New Size:",croppedBox
cropped.save('L_2d_cropped.png')
the output is this:
Can anyone more familiar with image-processing/PLI can help me figure out the issue?
Install Pillow
pip install Pillow
and use as
from PIL import Image
image=Image.open('L_2d.png')
imageBox = image.getbbox()
cropped = image.crop(imageBox)
cropped.save('L_2d_cropped.png')
When you search for boundaries by mask=imageComponents[3], you search only by blue channel.
You can use numpy, convert the image to array, find all non-empty columns and rows and then create an image from these:
import Image
import numpy as np
image=Image.open('L_2d.png')
image.load()
image_data = np.asarray(image)
image_data_bw = image_data.max(axis=2)
non_empty_columns = np.where(image_data_bw.max(axis=0)>0)[0]
non_empty_rows = np.where(image_data_bw.max(axis=1)>0)[0]
cropBox = (min(non_empty_rows), max(non_empty_rows), min(non_empty_columns), max(non_empty_columns))
image_data_new = image_data[cropBox[0]:cropBox[1]+1, cropBox[2]:cropBox[3]+1 , :]
new_image = Image.fromarray(image_data_new)
new_image.save('L_2d_cropped.png')
The result looks like
If anything is unclear, just ask.
I tested most of the answers replied in this post, however, I was ended up my own answer. I used anaconda python3.
from PIL import Image, ImageChops
def trim(im):
bg = Image.new(im.mode, im.size, im.getpixel((0,0)))
diff = ImageChops.difference(im, bg)
diff = ImageChops.add(diff, diff, 2.0, -100)
#Bounding box given as a 4-tuple defining the left, upper, right, and lower pixel coordinates.
#If the image is completely empty, this method returns None.
bbox = diff.getbbox()
if bbox:
return im.crop(bbox)
if __name__ == "__main__":
bg = Image.open("test.jpg") # The image to be cropped
new_im = trim(bg)
new_im.show()
Here's another version using pyvips.
import sys
import pyvips
image = pyvips.Image.new_from_file(sys.argv[1])
left, top, width, height = image.find_trim(threshold=2, background=[255, 255, 255])
image = image.crop(left, top, width, height)
image.write_to_file(sys.argv[2])
The pyvips trimmer is useful for photographic images. It does a median filter, subtracts the background, finds pixels over the threshold, and removes up to the first and last row and column outside this set. The median and threshold mean it is not thrown off by things like JPEG compression, where noise or invisible compression artefacts can confuse other trimmers.
If you don't supply the background argument, it uses the pixel at (0, 0). threshold defaults to 10, which is about right for JPEG.
Here it is running on an 8k x 8k pixel NASA earth image:
$ time ./trim.py /data/john/pics/city_lights_asia_night_8k.jpg x.jpg
real 0m1.868s
user 0m13.204s
sys 0m0.280s
peak memory: 100mb
Before:
After:
There's a blog post with some more discussion here.
This is an improvement over snew's reply, which works for transparent background. With mathematical morphology we can make it work on white background (instead of transparent), with the following code:
from PIL import Image
from skimage.io import imread
from skimage.morphology import convex_hull_image
from skimage.color import rgb2gray
im = imread('L_2d.jpg')
plt.imshow(im)
plt.title('input image')
plt.show()
# create a binary image
im1 = 1 - rgb2gray(im)
threshold = 0.5
im1[im1 <= threshold] = 0
im1[im1 > threshold] = 1
chull = convex_hull_image(im1)
plt.imshow(chull)
plt.title('convex hull in the binary image')
plt.show()
imageBox = Image.fromarray((chull*255).astype(np.uint8)).getbbox()
cropped = Image.fromarray(im).crop(imageBox)
cropped.save('L_2d_cropped.jpg')
plt.imshow(cropped)
plt.show()
pilkit already contains processor for automatic cropping TrimBorderColor. SOmething like this should work:
from pilkit.lib import Image
from pilkit.processors import TrimBorderColor
img = Image.open('/path/to/my/image.png')
processor = TrimBorderColor()
new_img = processor.process(img)
https://github.com/matthewwithanm/pilkit/blob/b24990167aacbaab3db6d8ec9a02f9ad42856898/pilkit/processors/crop.py#L33
Came across this post recently and noticed the PIL library has changed. I re-implemented this with openCV:
import cv2
def crop_im(im, padding=0.1):
"""
Takes cv2 image, im, and padding % as a float, padding,
and returns cropped image.
"""
bw = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
rows, cols = bw.shape
non_empty_columns = np.where(bw.min(axis=0)<255)[0]
non_empty_rows = np.where(bw.min(axis=1)<255)[0]
cropBox = (int(min(non_empty_rows) * (1 - padding)),
int(min(max(non_empty_rows) * (1 + padding), rows)),
int(min(non_empty_columns) * (1 - padding)),
int(min(max(non_empty_columns) * (1 + padding), cols)))
cropped = im[cropBox[0]:cropBox[1]+1, cropBox[2]:cropBox[3]+1 , :]
return cropped
im = cv2.imread('testimage.png')
cropped = crop_im(im)
cv2.imshow('', cropped)
cv2.waitKey(0)
I know that this post is old but, for some reason, none of the suggested answers worked for me. So I hacked my own version from existing answers:
import Image
import numpy as np
import glob
import shutil
import os
grey_tolerance = 0.7 # (0,1) = crop (more,less)
f = 'test_image.png'
file,ext = os.path.splitext(f)
def get_cropped_line(non_empty_elms,tolerance,S):
if (sum(non_empty_elms) == 0):
cropBox = ()
else:
non_empty_min = non_empty_elms.argmax()
non_empty_max = S - non_empty_elms[::-1].argmax()+1
cropBox = (non_empty_min,non_empty_max)
return cropBox
def get_cropped_area(image_bw,tol):
max_val = image_bw.max()
tolerance = max_val*tol
non_empty_elms = (image_bw<=tolerance).astype(int)
S = non_empty_elms.shape
# Traverse rows
cropBox = [get_cropped_line(non_empty_elms[k,:],tolerance,S[1]) for k in range(0,S[0])]
cropBox = filter(None, cropBox)
xmin = [k[0] for k in cropBox]
xmax = [k[1] for k in cropBox]
# Traverse cols
cropBox = [get_cropped_line(non_empty_elms[:,k],tolerance,S[0]) for k in range(0,S[1])]
cropBox = filter(None, cropBox)
ymin = [k[0] for k in cropBox]
ymax = [k[1] for k in cropBox]
xmin = min(xmin)
xmax = max(xmax)
ymin = min(ymin)
ymax = max(ymax)
ymax = ymax-1 # Not sure why this is necessary, but it seems to be.
cropBox = (ymin, ymax-ymin, xmin, xmax-xmin)
return cropBox
def auto_crop(f,ext):
image=Image.open(f)
image.load()
image_data = np.asarray(image)
image_data_bw = image_data[:,:,0]+image_data[:,:,1]+image_data[:,:,2]
cropBox = get_cropped_area(image_data_bw,grey_tolerance)
image_data_new = image_data[cropBox[0]:cropBox[1]+1, cropBox[2]:cropBox[3]+1 , :]
new_image = Image.fromarray(image_data_new)
f_new = f.replace(ext,'')+'_cropped'+ext
new_image.save(f_new)