How to improve accuracy of OpenCV matching results in Python - python

I'm trying to configure OpenCV within Python 3.6 to match a character icon (pattern) 1 with a box of characters 2. Nevertheless, the match is quite low, especially for shaded characters like 1.
I tried to solve it by using not only matchTemplate, but also comparing histograms, nevertheless - result is still poor.
I did try using gray-scale, colors, matching just a center of picture (cropped face), matching whole picture... resizing pattern to have exact dimension as it would be in a box... all combinations... and still this is VERY random (see attached image of correlation results)
Thank you in advance for help!
Here's the code:
import numpy as np
import cv2 as cv
from PIL import Image
import os
box = Image.open("/Users/user/Desktop/dbz/my_box.jpeg")
box.thumbnail((592,1053))
#conditions for each match step
character_threshold = 0.6 #checks in box
hist_threshold = 0.3
import numpy as np
import cv2 as cv
from PIL import Image
import os
box = Image.open("/Users/user/Desktop/dbz/my_box.jpeg")
box.thumbnail((592,1053))
#conditions for each match step
character_threshold = 0.6
hist_threshold = 0.3
for root, dirs, files in os.walk("/Users/user/Desktop/dbz/img/Super/TEQ/"):
for file in files:
if not file.startswith("."):
print("now " + file)
char = os.path.join(root, file)
#Opens and generate character's icon
character = Image.open(char)
character.thumbnail((153,139))
#Crops face from the character's icon and converts to grayscale CV object
face = character.crop((22,22,94,94)) #size 72x72 with centered face (should be 22,22,94,94)
face_array = np.array(face).astype(np.uint8)
face_array_gray = cv.cvtColor(face_array, cv.COLOR_RGB2GRAY)
#Converts the character's icon to grayscale CV object
character_array = np.array(character).astype(np.uint8)
character_array_gray = cv.cvtColor(character_array, cv.COLOR_RGB2GRAY)
#Converts box screen to grayscale CV object
box_array = np.array(box).astype(np.uint8)
box_array_gray = cv.cvtColor(box_array, cv.COLOR_RGB2GRAY)
#Check whether the face is in the box
character_score = cv.matchTemplate(box_array[:,:,2],face_array[:,:,2],cv.TM_CCOEFF_NORMED)
if character_score.max() > character_threshold:
ij = np.unravel_index(np.argmax(character_score),character_score.shape)
x, y = ij[::-1] #np returns lower-left coordinates, whilst PIL accepts upper, left,lower, right !!!
w, h = face_array_gray.shape
face.show()
found = box.crop((x,y,x+w,y+h)) #expand border to 25 pixels in each size (Best is (x-20,y-5,x+w,y+h+20))
#found.show()
#found_character = np.array(found_character).astype(np.uint8)
#found_character = cv.cvtColor(found_character, cv.COLOR_RGB2GRAY)
found_array = np.array(found).astype(np.uint8)
found_array_gray = cv.cvtColor(found_array, cv.COLOR_RGB2GRAY)
found_hist = cv.calcHist([found_array],[0,1,2],None,[8,8,8],[0,256,0,256,0,256])
found_hist = cv.normalize(found_hist,found_hist).flatten()
found_hist_gray = cv.calcHist([found_array_gray],[0],None,[8],[0,256])
found_hist_gray = cv.normalize(found_hist_gray,found_hist_gray).flatten()
face_hist = cv.calcHist([face_array],[0,1,2],None,[8,8,8],[0,256,0,256,0,256])
face_hist = cv.normalize(face_hist,face_hist).flatten()
face_hist_gray = cv.calcHist([face_array_gray],[0],None,[8],[0,256])
face_hist_gray = cv.normalize(face_hist_gray,face_hist_gray).flatten()
character_hist = cv.calcHist([character_array],[0,1,2],None,[8,8,8],[0,256,0,256,0,256])
character_hist = cv.normalize(character_hist,character_hist).flatten()
character_hist_gray = cv.calcHist([character_array_gray],[0],None,[8],[0,256])
character_hist_gray = cv.normalize(character_hist_gray,character_hist_gray).flatten()
hist_compare_result_CORREL = cv.compareHist(found_hist_gray, character_hist_gray,cv.HISTCMP_CORREL)
#hist_compare_result_CHISQR = cv.compareHist(found_hist_gray, character_hist_gray,cv.HISTCMP_CHISQR)
#hist_compare_result_INTERSECT = cv.compareHist(found_hist_gray, character_hist_gray,cv.HISTCMP_INTERSECT)
#hist_compare_result_BHATTACHARYYA = cv.compareHist(found_hist_gray, character_hist_gray,cv.HISTCMP_BHATTACHARYYA)
if (hist_compare_result_CORREL+character_score.max()) > 1:
print(f"Found {file} with a score:\n match:{character_score.max()}\n hist_correl: {hist_compare_result_CORREL}\n SUM:{hist_compare_result_CORREL+character_score.max()}", file=open("/Users/user/Desktop/dbz/out.log","a+"))
(1)
(2)

Here is a simple example of masked template matching in Python/OpenCV.
Image:
Transparent Template:
Template with alpha removed:
Template alpha channel extracted as mask image:
i
mport cv2
import numpy as np
# read image
img = cv2.imread('logo.png')
# read template with alpha
tmplt = cv2.imread('hat_alpha.png', cv2.IMREAD_UNCHANGED)
hh, ww = tmplt.shape[:2]
# extract template mask as grayscale from alpha channel and make 3 channels
tmplt_mask = tmplt[:,:,3]
tmplt_mask = cv2.merge([tmplt_mask,tmplt_mask,tmplt_mask])
# extract templt2 without alpha channel from tmplt
tmplt2 = tmplt[:,:,0:3]
# do template matching
corrimg = cv2.matchTemplate(img,tmplt2,cv2.TM_CCORR_NORMED, mask=tmplt_mask)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(corrimg)
max_val_ncc = '{:.3f}'.format(max_val)
print("correlation match score: " + max_val_ncc)
xx = max_loc[0]
yy = max_loc[1]
print('xmatch =',xx,'ymatch =',yy)
# draw red bounding box to define match location
result = img.copy()
pt1 = (xx,yy)
pt2 = (xx+ww, yy+hh)
cv2.rectangle(result, pt1, pt2, (0,0,255), 1)
cv2.imshow('image', img)
cv2.imshow('template2', tmplt2)
cv2.imshow('template_mask', tmplt_mask)
cv2.imshow('result', result)
cv2.waitKey(0)
cv2.destroyAllWindows()
# save results
cv2.imwrite('logo_hat_match2.png', result)
Match location on input:
Match Information:
correlation match score: 1.000
xmatch = 417 ymatch = 44
Without the mask, the large green area in the template would mismatch in the input and lower the match score dramatically.

Related

How would I warp text around an image's edges?

I am trying to create an image with the edges replaced with text, similar to This Youtube video thumbnail but from a source image. I've used OpenCV to get a version of a source image with edges, and Pillow to actually write the text, but I'm not sure where to start when it comes to actually manipulating the text automatically to fit to the edges. The code I have so far is:
import cv2 as cv
from matplotlib import pyplot as plt
from PIL import Image, ImageFont, ImageDraw, ImageShow
font = ImageFont.truetype(r"C:\Users\X\Downloads\Montserrat\Montserrat-Light.ttf", 12)
text = ["text", "other text"]
img = cv.imread(r"C:\Users\X\Pictures\picture.jpg",0)
edges = cv.Canny(img,100,200)
img = cv.cvtColor(img, cv.COLOR_BGR2RGB)
im_pil = Image.fromarray(edges)
This code is just for the edge detection and moving the detected edges to Pillow.
Please help
I am not sure where the "edges" comes in from the canny edge detector.
However, the circular text wrap can be done very simply in Python/Wand that uses ImageMagick. Or one can do that in Python/OpenCV using cv2.remap and custom transformation maps.
Input:
1. Python Wand
(output size determined automatically from input size)
from wand.image import Image
from wand.font import Font
from wand.display import display
with Image(filename='some_text.png') as img:
img.background_color = 'white'
img.virtual_pixel = 'white'
# 360 degree arc, rotated 0 degrees
img.distort('arc', (360,0))
img.save(filename='some_text_arc.png')
img.format = 'png'
display(img)
Result:
2. Python/OpenCV
import numpy as np
import cv2
import math
# read input
img = cv2.imread("some_text.png")
hin, win = img.shape[:2]
win2 = win / 2
# specify desired square output dimensions and center
hout = 100
wout = 100
xcent = wout / 2
ycent = hout / 2
hwout = max(hout,wout)
hwout2 = hwout / 2
# set up the x and y maps as float32
map_x = np.zeros((hout, wout), np.float32)
map_y = np.zeros((hout, wout), np.float32)
# create map with the arc distortion formula --- angle and radius
for y in range(hout):
Y = (y - ycent)
for x in range(wout):
X = (x - xcent)
XX = (math.atan2(Y,X)+math.pi/2)/(2*math.pi)
XX = XX - int(XX+0.5)
XX = XX * win + win2
map_x[y, x] = XX
map_y[y, x] = hwout2 - math.hypot(X,Y)
# do the remap this is where the magic happens
result = cv2.remap(img, map_x, map_y, cv2.INTER_CUBIC, borderMode = cv2.BORDER_CONSTANT, borderValue=(255,255,255))
# save results
cv2.imwrite("some_text_arc.jpg", result)
# display images
cv2.imshow('img', img)
cv2.imshow('result', result)
cv2.waitKey(0)
cv2.destroyAllWindows()
Result:
Neither OpenCV nor PIL has a way to do that, but you can use ImageMagick.
How to warp an image to take shape of path with python?

Detecting the volume/overlap region in image registration for OCT data

I am working on image registration of OCT data. I would like to locate the regions/area in my targeted registered image, where image registration has actually occurred from the source images. I am working in Python. Can anyone please tell me what are the available techniques?
Any suggestions on how to proceed with the problem are also welcomed. I have done some trial image registration on two images initially. The goal is to do registration of a large dataset.
My code is given below:
#importing libraries
import cv2
import numpy as np
# from skimage.measure import structural_similarity as ssim
# from skimage.measure import compare_ssim
import skimage
from skimage import measure
import matplotlib.pyplot as plt
def imageRegistration():
# open the image files
path = 'D:/Fraunhofer Thesis/LatestPythonImplementations/Import_OCT_Vision/sliceImages(_x_)/'
image1 = cv2.imread(str(path) + '104.png')
image2 = cv2.imread(str(path) + '0.png')
# converting to greyscale
img1 = cv2.cvtColor(image1, cv2.COLOR_BGR2GRAY)
img2 = cv2.cvtColor(image2, cv2.COLOR_BGR2GRAY)
height, width = img2.shape
# Create ORB detector with 5000 features.
orb_detector = cv2.ORB_create(5000)
# Find keypoints and descriptors.
# The first arg is the image, second arg is the mask
# (which is not reqiured in this case).
kp1, d1 = orb_detector.detectAndCompute(img1, None)
kp2, d2 = orb_detector.detectAndCompute(img2, None)
# Match features between the two images.
# We create a Brute Force matcher with
# Hamming distance as measurement mode.
matcher = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
# Match the two sets of descriptors.
matches = matcher.match(d1, d2)
# Sort matches on the basis of their Hamming distance.
matches.sort(key=lambda x: x.distance)
# Take the top 90 % matches forward.
matches = matches[:int(len(matches) * 90)]
no_of_matches = len(matches)
# Define empty matrices of shape no_of_matches * 2.
p1 = np.zeros((no_of_matches, 2))
p2 = np.zeros((no_of_matches, 2))
for i in range(len(matches)):
p1[i, :] = kp1[matches[i].queryIdx].pt
p2[i, :] = kp2[matches[i].trainIdx].pt
# Find the homography matrix.
homography, mask = cv2.findHomography(p1, p2, cv2.RANSAC)
# Use this matrix to transform the
# colored image wrt the reference image.
transformed_img = cv2.warpPerspective(image1,
homography, (width, height))
# Save the output.
cv2.imwrite('output.jpg', transformed_img)
#following is the code figuring out difference in the source image, target image and the registered image
# 0 mse means perfect similarity , no difference
# mse >1 means there is difference and as the value increases , the difference increases
def findingDifferenceMSE():
path = 'D:/Fraunhofer Thesis/LatestPythonImplementations/Import_OCT_Vision/sliceImages(_x_)/'
image1 = cv2.imread(str(path) + '104.png')
image2 = cv2.imread(str(path) + '0.png')
image3 = cv2.imread('D:/Fraunhofer Thesis/LatestPythonImplementations/Import_OCT_Vision/output.jpg')
err = np.sum((image1.astype("float") - image3.astype("float")) ** 2)
err /= float(image1.shape[0] * image3.shape[1])
print("MSE:")
print('Image 104 and output image: ', + err)
err1 = np.sum((image2.astype("float") - image3.astype("float")) ** 2)
err1 /= float(image2.shape[0] * image3.shape[1])
print('Image 0 and output image: ', + err1)
def findingDifferenceSSIM():
path = 'D:/Fraunhofer Thesis/LatestPythonImplementations/Import_OCT_Vision/sliceImages(_x_)/'
image1 = cv2.imread(str(path) + '104.png')
image2 = cv2.imread(str(path) + '0.png')
image3 = cv2.imread('D:/Fraunhofer Thesis/LatestPythonImplementations/Import_OCT_Vision/output.jpg')
result1=measure.compare_ssim(image1,image3)
print(result1)
#calling the fucntion
imageRegistration()
findingDifferenceMSE()
#findingDifferenceSSIM()
This is the registered image:
This image is the first reference image:
This is the second reference image:
Image differentiation technique can be used to identify the registered area in the images by comparing it with base images. In this way, the different areas will be recognized.

OpenCV SURF for live streaming from webcam in Python

I am working on surf implementation in opencv using python which will detect the template in the given image. I have modified the code such that it will take video capture from the webcam connected and convert into images and then apply surf on it. Following is the modified code.
import cv2
import numpy as np
cap = cv2.VideoCapture(0)
while(True):
ret ,img = cap.read()
# Convert them to grayscale
imgg =cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
# SURF extraction
surf = cv2.SURF()
kp, descritors = surf.detect(imgg,None,useProvidedKeypoints = False)
# Setting up samples and responses for kNN
samples = np.array(descritors)
responses = np.arange(len(kp),dtype = np.float32)
# kNN training
knn = cv2.KNearest()
knn.train(samples,responses)
# Now loading a template image and searching for similar keypoints
template = cv2.imread('template.png')
templateg= cv2.cvtColor(template,cv2.COLOR_BGR2GRAY)
keys,desc = surf.detect(templateg,None,useProvidedKeypoints = False)
for h,des in enumerate(desc):
des = np.array(des,np.float32).reshape((1,128))
retval, results, neigh_resp, dists = knn.find_nearest(des,1)
res,dist = int(results[0][0]),dists[0][0]
if dist<0.1: # draw matched keypoints in red color
color = (0,0,255)
else: # draw unmatched in blue color
print dist
color = (255,0,0)
#Draw matched key points on original image
x,y = kp[res].pt
center = (int(x),int(y))
cv2.circle(img,center,2,color,-1)
#Draw matched key points on template image
x,y = keys[h].pt
center = (int(x),int(y))
cv2.circle(template,center,2,color,-1)
cv2.imwrite('img',img)
cv2.imwrite('tm',template)
cv2.waitKey(0)
cap.release()
But the error which is coming is
knn.train(samples,responses)
TyepError: data type = 17 is not supported
Does anybody have any idea on this?
CV probably expects regular arrays but you are passing numpy arrays instead. Try this
knn.train(samples.tolist(),responses.tolist())

Input samples must be floating-point matrix in function CvKNearest::find_nearest

I am trying to execute the code from this URL
However, I started getting this error:
error: ..\..\..\..\opencv\modules\ml\src\knearest.cpp:370: error: (-5) Input samples must be floating-point matrix (<num_samples>x<var_count>) in function CvKNearest::find_nearest
I have not made any major changes though. But will paste what I did:
import scipy as sp
import numpy as np
import cv2
# Load the images
img =cv2.imread("image1.png")
# Convert them to grayscale
imgg =cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
# SURF extraction
surf = cv2.FeatureDetector_create("SURF")
surfDescriptorExtractor = cv2.DescriptorExtractor_create("SURF")
kp = surf.detect(imgg)
kp, descritors = surfDescriptorExtractor.compute(imgg,kp)
# Setting up samples and responses for kNN
samples = np.array(descritors)
responses = np.arange(len(kp),dtype = np.float32)
# kNN training
knn = cv2.KNearest()
knn.train(samples,responses)
modelImages = ["image2.png"]
for modelImage in modelImages:
# Now loading a template image and searching for similar keypoints
template = cv2.imread(modelImage)
templateg= cv2.cvtColor(template,cv2.COLOR_BGR2GRAY)
keys = surf.detect(templateg)
keys,desc = surfDescriptorExtractor.compute(templateg, keys)
for h,des in enumerate(desc):
#debug
print(des.shape)
des = np.array(des,np.float32).reshape(64L)
retval, results, neigh_resp, dists = knn.find_nearest(des,1)
res,dist = int(results[0][0]),dists[0][0]
if dist<0.1: # draw matched keypoints in red color
color = (0,0,255)
else: # draw unmatched in blue color
#print dist
color = (255,0,0)
#Draw matched key points on original image
x,y = kp[res].pt
center = (int(x),int(y))
cv2.circle(img,center,2,color,-1)
#Draw matched key points on template image
x,y = keys[h].pt
center = (int(x),int(y))
cv2.circle(template,center,2,color,-1)
cv2.imshow('img',img)
cv2.imshow('tm',template)
cv2.waitKey(0)
cv2.destroyAllWindows()
Any help is greatly appreciated.
Thanks in advance!

Automatically cropping an image with python/PIL

Can anyone help me figure out what's happening in my image auto-cropping script? I have a png image with a large transparent area/space. I would like to be able to automatically crop that space out and leave the essentials. Original image has a squared canvas, optimally it would be rectangular, encapsulating just the molecule.
here's the original image:
Doing some googling i came across PIL/python code that was reported to work, however in my hands, running the code below over-crops the image.
import Image
import sys
image=Image.open('L_2d.png')
image.load()
imageSize = image.size
imageBox = image.getbbox()
imageComponents = image.split()
rgbImage = Image.new("RGB", imageSize, (0,0,0))
rgbImage.paste(image, mask=imageComponents[3])
croppedBox = rgbImage.getbbox()
print imageBox
print croppedBox
if imageBox != croppedBox:
cropped=image.crop(croppedBox)
print 'L_2d.png:', "Size:", imageSize, "New Size:",croppedBox
cropped.save('L_2d_cropped.png')
the output is this:
Can anyone more familiar with image-processing/PLI can help me figure out the issue?
Install Pillow
pip install Pillow
and use as
from PIL import Image
image=Image.open('L_2d.png')
imageBox = image.getbbox()
cropped = image.crop(imageBox)
cropped.save('L_2d_cropped.png')
When you search for boundaries by mask=imageComponents[3], you search only by blue channel.
You can use numpy, convert the image to array, find all non-empty columns and rows and then create an image from these:
import Image
import numpy as np
image=Image.open('L_2d.png')
image.load()
image_data = np.asarray(image)
image_data_bw = image_data.max(axis=2)
non_empty_columns = np.where(image_data_bw.max(axis=0)>0)[0]
non_empty_rows = np.where(image_data_bw.max(axis=1)>0)[0]
cropBox = (min(non_empty_rows), max(non_empty_rows), min(non_empty_columns), max(non_empty_columns))
image_data_new = image_data[cropBox[0]:cropBox[1]+1, cropBox[2]:cropBox[3]+1 , :]
new_image = Image.fromarray(image_data_new)
new_image.save('L_2d_cropped.png')
The result looks like
If anything is unclear, just ask.
I tested most of the answers replied in this post, however, I was ended up my own answer. I used anaconda python3.
from PIL import Image, ImageChops
def trim(im):
bg = Image.new(im.mode, im.size, im.getpixel((0,0)))
diff = ImageChops.difference(im, bg)
diff = ImageChops.add(diff, diff, 2.0, -100)
#Bounding box given as a 4-tuple defining the left, upper, right, and lower pixel coordinates.
#If the image is completely empty, this method returns None.
bbox = diff.getbbox()
if bbox:
return im.crop(bbox)
if __name__ == "__main__":
bg = Image.open("test.jpg") # The image to be cropped
new_im = trim(bg)
new_im.show()
Here's another version using pyvips.
import sys
import pyvips
image = pyvips.Image.new_from_file(sys.argv[1])
left, top, width, height = image.find_trim(threshold=2, background=[255, 255, 255])
image = image.crop(left, top, width, height)
image.write_to_file(sys.argv[2])
The pyvips trimmer is useful for photographic images. It does a median filter, subtracts the background, finds pixels over the threshold, and removes up to the first and last row and column outside this set. The median and threshold mean it is not thrown off by things like JPEG compression, where noise or invisible compression artefacts can confuse other trimmers.
If you don't supply the background argument, it uses the pixel at (0, 0). threshold defaults to 10, which is about right for JPEG.
Here it is running on an 8k x 8k pixel NASA earth image:
$ time ./trim.py /data/john/pics/city_lights_asia_night_8k.jpg x.jpg
real 0m1.868s
user 0m13.204s
sys 0m0.280s
peak memory: 100mb
Before:
After:
There's a blog post with some more discussion here.
This is an improvement over snew's reply, which works for transparent background. With mathematical morphology we can make it work on white background (instead of transparent), with the following code:
from PIL import Image
from skimage.io import imread
from skimage.morphology import convex_hull_image
from skimage.color import rgb2gray
im = imread('L_2d.jpg')
plt.imshow(im)
plt.title('input image')
plt.show()
# create a binary image
im1 = 1 - rgb2gray(im)
threshold = 0.5
im1[im1 <= threshold] = 0
im1[im1 > threshold] = 1
chull = convex_hull_image(im1)
plt.imshow(chull)
plt.title('convex hull in the binary image')
plt.show()
imageBox = Image.fromarray((chull*255).astype(np.uint8)).getbbox()
cropped = Image.fromarray(im).crop(imageBox)
cropped.save('L_2d_cropped.jpg')
plt.imshow(cropped)
plt.show()
pilkit already contains processor for automatic cropping TrimBorderColor. SOmething like this should work:
from pilkit.lib import Image
from pilkit.processors import TrimBorderColor
img = Image.open('/path/to/my/image.png')
processor = TrimBorderColor()
new_img = processor.process(img)
https://github.com/matthewwithanm/pilkit/blob/b24990167aacbaab3db6d8ec9a02f9ad42856898/pilkit/processors/crop.py#L33
Came across this post recently and noticed the PIL library has changed. I re-implemented this with openCV:
import cv2
def crop_im(im, padding=0.1):
"""
Takes cv2 image, im, and padding % as a float, padding,
and returns cropped image.
"""
bw = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
rows, cols = bw.shape
non_empty_columns = np.where(bw.min(axis=0)<255)[0]
non_empty_rows = np.where(bw.min(axis=1)<255)[0]
cropBox = (int(min(non_empty_rows) * (1 - padding)),
int(min(max(non_empty_rows) * (1 + padding), rows)),
int(min(non_empty_columns) * (1 - padding)),
int(min(max(non_empty_columns) * (1 + padding), cols)))
cropped = im[cropBox[0]:cropBox[1]+1, cropBox[2]:cropBox[3]+1 , :]
return cropped
im = cv2.imread('testimage.png')
cropped = crop_im(im)
cv2.imshow('', cropped)
cv2.waitKey(0)
I know that this post is old but, for some reason, none of the suggested answers worked for me. So I hacked my own version from existing answers:
import Image
import numpy as np
import glob
import shutil
import os
grey_tolerance = 0.7 # (0,1) = crop (more,less)
f = 'test_image.png'
file,ext = os.path.splitext(f)
def get_cropped_line(non_empty_elms,tolerance,S):
if (sum(non_empty_elms) == 0):
cropBox = ()
else:
non_empty_min = non_empty_elms.argmax()
non_empty_max = S - non_empty_elms[::-1].argmax()+1
cropBox = (non_empty_min,non_empty_max)
return cropBox
def get_cropped_area(image_bw,tol):
max_val = image_bw.max()
tolerance = max_val*tol
non_empty_elms = (image_bw<=tolerance).astype(int)
S = non_empty_elms.shape
# Traverse rows
cropBox = [get_cropped_line(non_empty_elms[k,:],tolerance,S[1]) for k in range(0,S[0])]
cropBox = filter(None, cropBox)
xmin = [k[0] for k in cropBox]
xmax = [k[1] for k in cropBox]
# Traverse cols
cropBox = [get_cropped_line(non_empty_elms[:,k],tolerance,S[0]) for k in range(0,S[1])]
cropBox = filter(None, cropBox)
ymin = [k[0] for k in cropBox]
ymax = [k[1] for k in cropBox]
xmin = min(xmin)
xmax = max(xmax)
ymin = min(ymin)
ymax = max(ymax)
ymax = ymax-1 # Not sure why this is necessary, but it seems to be.
cropBox = (ymin, ymax-ymin, xmin, xmax-xmin)
return cropBox
def auto_crop(f,ext):
image=Image.open(f)
image.load()
image_data = np.asarray(image)
image_data_bw = image_data[:,:,0]+image_data[:,:,1]+image_data[:,:,2]
cropBox = get_cropped_area(image_data_bw,grey_tolerance)
image_data_new = image_data[cropBox[0]:cropBox[1]+1, cropBox[2]:cropBox[3]+1 , :]
new_image = Image.fromarray(image_data_new)
f_new = f.replace(ext,'')+'_cropped'+ext
new_image.save(f_new)

Categories

Resources