Image thresholding in OpenCV - python

I have following image ,which is scanned image of A4 paper.What I need to do is to render individual characters from scanned image in to 28*28 or 32*32 image. I am doing it for dataset generation for Devanagari OCR. I applied simple threshing that doesn't help me.But when I applied adaptive threshing the result is not like expected.
After performing greyscaling and thresholding I get following thresholded image
My Final individual character image rendered are like
But my expected images(28*28) are was like ..
My source code is:
# grayscale
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# cv2.imshow('gray', gray)
# binary
#simple threshing
#ret, thresh = cv2.threshold(gray, 127, 255, cv2.THRESH_BINARY_INV)
#cv2.imshow('threshold', thresh)
# adaptive threshing
thresh = cv2.adaptiveThreshold(gray,255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY_INV,11,6)
cv2.imshow('threshold', thresh)
# dilation
kernel = np.ones((10, 1), np.uint8)
img_dilation = cv2.dilate(thresh, kernel, iterations=1)
# find contours
# cv2.findCountours() function changed from OpenCV3 to OpenCV4: now it have only two parameters instead of 3
cv2MajorVersion = cv2.__version__.split(".")[0]
# check for contours on thresh
if int(cv2MajorVersion) >= 4:
ctrs, hier = cv2.findContours(img_dilation.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
else:
im2, ctrs, hier = cv2.findContours(img_dilation.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
# sort contours
sorted_ctrs = sorted(ctrs, key=lambda ctr: cv2.boundingRect(ctr)[0])
for i, ctr in enumerate(sorted_ctrs):
# Get bounding box
x, y, w, h = cv2.boundingRect(ctr)
# Getting ROI
# roi = image[y:y + h, x:x + w]
roi = thresh[y:y + h, x:x + w]
# resizing
resize = cv2.resize(roi, (28, 28))
if w > 15 and h > 15:
cv2.imwrite(str(i) + str(time.time()) + '{}.png'.format(i), resize)
Please suggest me how to extract individual character in black and white 28*28 image for dataset collection.If you have any idea about dataset generation,It will be highly appriciated.

The problem is how you are finding contours.
An input parameter which requires Contour Retrieval Modes in a cv.findContours() (available options are)
cv.RETR_LIST, cv.RETR_TREE, cv.RETR_CCOMP, cv.RETR_EXTERNAL
cv.RETR_EXTERNAL only finds the outer most member in the nested contours family. More can be found here
So with your code at
if int(cv2MajorVersion) >= 4:
ctrs, hier = cv2.findContours(img_dilation.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
When drawing rectangles on contours shows
On changing contours retrieval code using cv2.RETR_LIST
if int(cv2MajorVersion) >= 4:
ctrs, hier = cv2.findContours(thresh.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
When drawing rectangles on contours shows
Later on, using your logic with the restricted size of contours
if w > 15 and h > 15: (on changing this dimension as needed) you can get you the images of characters
Edited
if (w > 20 and h > 20) and (w < 130 and h < 120):
cv2.imwrite(str(i) + str(time.time()) + '{}.png'.format(i), resize)

This works for me, gives nice pictures, black and white:
#!/usr/bin/env python
import cv2
counter = 0
img = cv2.imread( 'Downloads/dewangari.jpg' )
print img.shape #(2338, 1700, 3)
img = img[77:1942,91:1517]
img.shape # (1426, 1623, 3)
_, img = cv2.threshold( img[:,:,2], 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)
x_size = img.shape[0] / 15
y_size = img.shape[1] / 8
for i in range(8) :
for j in range(15) :
cell = img[j*x_size+7:(j+1)*x_size-7,i*y_size+7:(i+1)*y_size-7]
cv2.imshow( 'frame', cell )
cell = (255 - cell) # this inverts the image
cv2.imwrite( 'image_%03d.png' % counter, cell)
counter += 1 # this saves the image
cv2.waitKey(0)
cv2.destroyAllWindows()
and so on ....

Related

Unable to segment handwritten characters

I am trying to extract handwritten numbers and alphabet from an image, for that i followed this stackoverflow link. It is working fine for most of the images where letter is written using marker but when i am using image where data is written using Pen it is failing miserably. Need some help to fix this.
Below is my code:
import cv2
import imutils
from imutils import contours
# Load image, grayscale, Otsu's threshold
image = cv2.imread('xxx/pic_crop_7.png')
image = imutils.resize(image, width=350)
img=image.copy()
# Remove border
kernel_vertical = cv2.getStructuringElement(cv2.MORPH_RECT, (1,50))
temp1 = 255 - cv2.morphologyEx(image, cv2.MORPH_CLOSE, kernel_vertical)
horizontal_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (50,1))
temp2 = 255 - cv2.morphologyEx(image, cv2.MORPH_CLOSE, horizontal_kernel)
temp3 = cv2.add(temp1, temp2)
result = cv2.add(temp3, image)
# Convert to grayscale and Otsu's threshold
gray = cv2.cvtColor(result, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray,(5,5),0)
_,thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_OTSU | cv2.THRESH_BINARY_INV)
# thresh=cv2.dilate(thresh,None,iterations=1)
# Find contours and filter using contour area
cnts = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[0]
MIN_AREA=45
digit_contours = []
for c in cnts:
if cv2.contourArea(c)>MIN_AREA:
x,y,w,h = cv2.boundingRect(c)
cv2.rectangle(img, (x, y), (x + w, y + h), (36,255,12), 2)
digit_contours.append(c)
# cv2.imwrite("C:/Samples/Dataset/ocr/segmented" + str(i) + ".png", image[y:y+h,x:x+w])
sorted_digit_contours = contours.sort_contours(digit_contours, method='left-to-right')[0]
contour_number = 0
for c in sorted_digit_contours:
x,y,w,h = cv2.boundingRect(c)
ROI = image[y:y+h, x:x+w]
cv2.imwrite('xxx/segment_{}.png'.format(contour_number), ROI)
contour_number += 1
cv2.imshow('thresh', thresh)
cv2.imshow('img', img)
cv2.waitKey()
It is correctly able to extract the numbers when written using marker.
Below is an example:
Original Image
Correctly extracting charachters
Image where it fails to read.
Original Image
Incorrectly Extracting
In this case, you only need to adjust your parameter.
Because there is no vertical line in your handwritten characters' background, so I decided to delete them.
# Remove border
horizontal_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (50,1))
temp2 = 255 - cv2.morphologyEx(image, cv2.MORPH_CLOSE, horizontal_kernel)
result = cv2.add(temp2, image)
And it works.
The solution that CodingPeter has given is perfectly fine, except that it may not be generic apropos the two test images you have posted. So, here's my take on it that might work on both of your test images, albeit with a little lesser accuracy.
import numpy as np
import cv2
import matplotlib.pyplot as plt
plt.rcParams['figure.figsize'] = (20, 20)
plt.rcParams["image.cmap"] = 'gray'
img_rgb = cv2.imread('path/to/your/image.jpg')
img = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2GRAY)
th = cv2.adaptiveThreshold(img,255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY_INV,11,2)
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (15,1))
horiz = cv2.morphologyEx(th, cv2.MORPH_OPEN, kernel, iterations=3)
ctrs, _ = cv2.findContours(horiz,cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
for ctr in ctrs:
x,y,w,h = cv2.boundingRect(ctr)
if w < 20:
cv2.drawContours(horiz, [ctr], 0, 0, cv2.FILLED)
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (1,10))
vert = cv2.morphologyEx(th, cv2.MORPH_OPEN, kernel, iterations=3)
ctrs, _ = cv2.findContours(vert,cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
for ctr in ctrs:
x,y,w,h = cv2.boundingRect(ctr)
if h < 25:
cv2.drawContours(vert, [ctr], 0, 0, cv2.FILLED)
th = th - (horiz | vert)
ctrs, _ = cv2.findContours(th,cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
min_ctr_area = 400 # Min character bounding box area
for ctr in ctrs:
x, y, w, h = cv2.boundingRect(ctr)
# Filter contours based on size
if w * h > min_ctr_area and \
w < 100 and h < 100:
cv2.rectangle(img_rgb, (x, y), (x+w, y+h), (0, 255, 0), 1)
plt.imshow(img_rgb)
Of course some of the parameters here are hard-coded for filtering, which compare the contour height and width to ascertain whether it is a part of a line or maybe a character. With different images you may have to smartly change these values.

opencv, python, how to read grouped text in boxes

I would like to get from the image in the groups that are on the image
I have managed to remove first contour (as described below), but issue is that when I try to read the text, I have some missing text, I expect that this is because of other contours that have stayed on the image, but while I try to remove them, I loose the grouping or part of text...
for i in range(len(contours)):
if 800 < cv2.contourArea(contours[i]) < 2000:
x, y, width, height = cv2.boundingRect(contours[i])
roi = img[y:y + height, x:x + width]
roi_h = roi.shape[0]
roi_w = roi.shape[1]
resize_roi = cv2.resize(roi,(int(roi_w*6),int(roi_h*6)), interpolation=cv2.INTER_LINEAR)
afterd = cv2.cvtColor(resize_roi, cv2.COLOR_BGR2GRAY)
retim, threshm = cv2.threshold(afterd, 210, 225, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
contoursm, hierarchym = cv2.findContours(threshm, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
mask = np.ones(resize_roi.shape[:2], dtype="uint8") * 255
for m in range(len(contoursm)):
if 10000 < cv2.contourArea(contoursm[m]) < 33000:
cv2.drawContours(mask, contoursm, m, 0, 7)
afterd = cv2.bitwise_not(afterd)
afterd = cv2.bitwise_and(afterd, afterd, mask=mask)
afterd = cv2.bitwise_not(afterd)
print(pytesseract.image_to_string(afterd, lang='eng', config='--psm 3'))
Instead of dealing with all the boxes, I suggest deleting them by finding connected components, and filling the large clusters with background color.
You may use the following stages:
Convert image to Grayscale, apply threshold, and invert polarity.
Delete all clusters having more than 100 pixels (assume letters are smaller).
Dilate thresh for uniting text areas to single "blocks".
Find contours on the dilated thresh image.
Find bounding rectangles, and apply OCR to the rectangle.
Here is the complete code sample:
import numpy as np
import cv2
import pytesseract
pytesseract.pytesseract.tesseract_cmd = r'C:\Program Files\Tesseract-OCR\tesseract.exe' # I am using Windows
img = cv2.imread('img.png') # Read input image
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # Convert to Grayscale.
ret, thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU) # Convert to binary and invert polarity
nlabel,labels,stats,centroids = cv2.connectedComponentsWithStats(thresh, connectivity=8)
thresh_size = 100
# Delete all lines by filling large clusters with zeros.
for i in range(1, nlabel):
if stats[i, cv2.CC_STAT_AREA] > thresh_size:
thresh[labels == i] = 0
# Dilate thresh for uniting text areas to single blocks.
dilated_thresh = cv2.dilate(thresh, np.ones((5,5)))
# Find contours on dilated thresh
contours, hierarchy = cv2.findContours(dilated_thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
# Iterate contours, find bounding rectangles
for c in contours:
# Get bounding rectangle
x, y, w, h = cv2.boundingRect(c)
# Draw green rectangle for testing
cv2.rectangle(img, (x, y), (x+w, y+h), (0, 255, 0), thickness = 1)
# Get the slice with the text (slice with margins).
afterd = thresh[y-3:y+h+3, x-3:x+w+3]
# Show afterd as image for testing
# cv2.imshow('afterd', afterd)
# cv2.waitKey(100)
# The OCR works only when image is enlarged and black text?
resized_afterd = cv2.resize(afterd, (afterd.shape[1]*5, afterd.shape[0]*5), interpolation=cv2.INTER_LANCZOS4)
print(pytesseract.image_to_string(255 - resized_afterd, lang='eng', config='--psm 3'))
cv2.imshow('thresh', thresh)
cv2.imshow('img', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
Result strings after OCR:
DF6DF645
RFFTW
2345
2277
AABBA
DF1267
ABCET5456
Input image with green boxes around the text:
Update:
Grouping contours:
For contours contours you may use the hierarchy result of cv2.findContours with cv2.RETR_TREE.
See Contours Hierarchy documentation.
You may use the parent-child relationship for grouping contours.
Here is an incomplete sample code for using the hierarchy:
img = cv2.imread('img.png') # Read input image
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # Convert to Grayscale.
ret, thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU) # Convert to binary and invert polarity
nlabel,labels,stats,centroids = cv2.connectedComponentsWithStats(thresh, connectivity=8)
thresh_boxes = np.zeros_like(thresh)
thresh_size = 100
# Delete all lines by filling large clusters with zeros.
# Make new image that contains only boxes - without text
for i in range(1, nlabel):
if stats[i, cv2.CC_STAT_AREA] > thresh_size:
thresh[labels == i] = 0
thresh_boxes[labels == i] = 255
# Find contours on thresh_boxes, use cv2.RETR_TREE to build tree with hierarchy
contours, hierarchy = cv2.findContours(thresh_boxes, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
# Iterate contours, and hierarchy
for c, i in zip(contours, range(len(contours))):
h = hierarchy[0, i, :]
h_child = h[2]
# if contours has no child (last level)
if h_child == -1:
h_parent = h[3]
x, y, w, h = cv2.boundingRect(c)
cv2.putText(img, str(h_parent), (x+w//2-4, y+h//2+8), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=1, color=(0, 0, 255), thickness=2)
cv2.imshow('thresh', thresh)
cv2.imshow('img', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
Result:

How to separate images using watershed algorithm in Python

How to separate indiviual images among multiple images after image segmentaion using watershed algorithm in Python
The attached image is consists of 4 images , from which we need to apply image segmentation and separate individual image from those 4 images
We will flood fill it first
import cv2;
import numpy as np;
# Read image
im_in = cv2.imread("2SNAT.jpg", cv2.IMREAD_GRAYSCALE);
# Threshold.
# Set values equal to or above 220 to 0.
# Set values below 220 to 255.
th, im_th = cv2.threshold(im_in, 220, 255, cv2.THRESH_BINARY_INV);
# Copy the thresholded image.
im_floodfill = im_th.copy()
# Mask used to flood filling.
# Notice the size needs to be 2 pixels than the image.
h, w = im_th.shape[:2]
mask = np.zeros((h+2, w+2), np.uint8)
# Floodfill from point (0, 0)
cv2.floodFill(im_floodfill, mask, (0,0), 255);
# Invert floodfilled image
im_floodfill_inv = cv2.bitwise_not(im_floodfill)
# Combine the two images to get the foreground.
im_out = im_th | im_floodfill_inv
Then find contour and crop out
im, contours, hierarchy = cv2.findContours(im_out.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
final_contours = []
for contour in contours:
area = cv2.contourArea(contour)
if area > 1000:
final_contours.append(contour)
Crop out step, also drawing rectangle on original image
counter = 0
for c in final_contours:
counter = counter + 1
# for c in [final_contours[0]]:
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.01 * peri, True)
x,y,w,h = cv2.boundingRect(approx)
print(x, y, w, h)
aspect_ratio = w / float(h)
if (aspect_ratio >= 0.8 and aspect_ratio <= 4):
cv2.rectangle(im_in,(x,y),(x+w,y+h),(0,255,0),2)
cv2.imwrite('splitted_{}.jpg'.format(counter), im_in[y:y+h, x:x+w])
cv2.imwrite('rectangled_split.jpg', im_in)
Instead of using watershed, here's a simple approach using thresholding + morphological operations. The idea is to obtain a binary image then perform morph close to combine each object as a single contour. We then find contours and extract/save each ROI using Numpy slicing.
Here's each individual object highlighted in green
Individual saved object
Code
import cv2
# Load image, grayscale, Otsu's threshold
image = cv2.imread('1.jpg')
original = image.copy()
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]
# Morph close
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (7,7))
close = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel, iterations=3)
# Find contours and extract ROI
cnts = cv2.findContours(close, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
num = 0
for c in cnts:
x,y,w,h = cv2.boundingRect(c)
cv2.rectangle(image, (x, y), (x + w, y + h), (36,255,12), 2)
ROI = original[y:y+h, x:x+w]
cv2.imwrite('ROI_{}.png'.format(num), ROI)
num += 1
cv2.imshow('image', image)
cv2.waitKey()

How to extract multiple objects from an image using Python OpenCV?

I am trying to extract object from an image using the color using OpenCV, I have tried by inverse thresholding and grayscale combined with cv2.findContours() but I am unable to use it recursively. Furthermore I can't figure out how to "cut out" the match from the original image and save it to a single file.
EDIT
~
import cv2
import numpy as np
# load the images
empty = cv2.imread("empty.jpg")
full = cv2.imread("test.jpg")
# save color copy for visualization
full_c = full.copy()
# convert to grayscale
empty_g = cv2.cvtColor(empty, cv2.COLOR_BGR2GRAY)
full_g = cv2.cvtColor(full, cv2.COLOR_BGR2GRAY)
empty_g = cv2.GaussianBlur(empty_g, (51, 51), 0)
full_g = cv2.GaussianBlur(full_g, (51, 51), 0)
diff = full_g - empty_g
# thresholding
diff_th =
cv2.adaptiveThreshold(full_g,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
cv2.THRESH_BINARY,11,2)
# combine the difference image and the inverse threshold
zone = cv2.bitwise_and(diff, diff_th, None)
# threshold to get the mask instead of gray pixels
_, zone = cv2.threshold(bag, 100, 255, 0)
# dilate to account for the blurring in the beginning
kernel = np.ones((15, 15), np.uint8)
bag = cv2.dilate(bag, kernel, iterations=1)
# find contours, sort and draw the biggest one
contours, _ = cv2.findContours(bag, cv2.RETR_TREE,
cv2.CHAIN_APPROX_SIMPLE)
contours = sorted(contours, key=cv2.contourArea, reverse=True)[:3]
i = 0
while i < len(contours):
x, y, width, height = cv2.boundingRect(contours[i])
roi = full_c[y:y+height, x:x+width]
cv2.imwrite("piece"+str(i)+".png", roi)
i += 1
Where empty is just a white image size 1500 * 1000 as the one above and test is the one above.
This is what I came up with, only downside, I have a third image instead of only the 2 expected showing a shadow zone now...
Here's a simple approach:
Obtain binary image. Load the image, grayscale, Gaussian blur, Otsu's threshold, then dilate to obtain a binary black/white image.
Extract ROI. Find contours, obtain bounding boxes, extract ROI using Numpy slicing, and save each ROI
Binary image (Otsu's thresholding + dilation)
Detected ROIs highlighted in green
To extract each ROI, you can find the bounding box coordinates using cv2.boundingRect(), crop the desired region, then save the image
x,y,w,h = cv2.boundingRect(c)
ROI = original[y:y+h, x:x+w]
First object
Second object
import cv2
# Load image, grayscale, Gaussian blur, Otsu's threshold, dilate
image = cv2.imread('1.jpg')
original = image.copy()
gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (5,5), 0)
thresh = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (7,7))
dilate = cv2.dilate(thresh, kernel, iterations=1)
# Find contours, obtain bounding box coordinates, and extract ROI
cnts = cv2.findContours(dilate, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
image_number = 0
for c in cnts:
x,y,w,h = cv2.boundingRect(c)
cv2.rectangle(image, (x, y), (x + w, y + h), (36,255,12), 2)
ROI = original[y:y+h, x:x+w]
cv2.imwrite("ROI_{}.png".format(image_number), ROI)
image_number += 1
cv2.imshow('image', image)
cv2.imshow('thresh', thresh)
cv2.imshow('dilate', dilate)
cv2.waitKey()

detect rectangle in image and crop

I have lots of scanned images of handwritten digit inside a rectangle(small one).
Please help me to crop each image containing digits and save them by giving the same name to each row.
import cv2
img = cv2.imread('Data\Scan_20170612_4.jpg')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray = cv2.bilateralFilter(gray, 11, 17, 17)
edged = cv2.Canny(gray, 30, 200)
_, contours, hierarchy = cv2.findContours(edged, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
i = 0
for c in contours:
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.09 * peri, True)
if len(approx) == 4:
screenCnt = approx
cv2.drawContours(img, [screenCnt], -1, (0, 255, 0), 3)
cv2.imwrite('cropped\\' + str(i) + '_img.jpg', img)
i += 1
Here is My Version
import cv2
import numpy as np
fileName = ['9','8','7','6','5','4','3','2','1','0']
img = cv2.imread('Data\Scan_20170612_17.jpg')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray = cv2.bilateralFilter(gray, 11, 17, 17)
kernel = np.ones((5,5),np.uint8)
erosion = cv2.erode(gray,kernel,iterations = 2)
kernel = np.ones((4,4),np.uint8)
dilation = cv2.dilate(erosion,kernel,iterations = 2)
edged = cv2.Canny(dilation, 30, 200)
_, contours, hierarchy = cv2.findContours(edged, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
rects = [cv2.boundingRect(cnt) for cnt in contours]
rects = sorted(rects,key=lambda x:x[1],reverse=True)
i = -1
j = 1
y_old = 5000
x_old = 5000
for rect in rects:
x,y,w,h = rect
area = w * h
if area > 47000 and area < 70000:
if (y_old - y) > 200:
i += 1
y_old = y
if abs(x_old - x) > 300:
x_old = x
x,y,w,h = rect
out = img[y+10:y+h-10,x+10:x+w-10]
cv2.imwrite('cropped\\' + fileName[i] + '_' + str(j) + '.jpg', out)
j+=1
That's an easy thing if u try. Here's my output- (The image and its one small bit)
What i did?
Resized the image first because it was too big in my screen
Erode, Dilate to remove small dots and thicken the lines
Threshold the image
Flood fill, beginning at the right point
Invert the flood fill
Find contours and draw one at a time which are in range of approximately the
area on the rectangle. For my resized (500x500) image i put Area of
contour in range 500 to 2500 (trial and error anyway).
Find bounding rectangle and crop that mask from main image.
Then save that piece with proper name- which i didn't do.
Maybe, there's a simpler way, but i liked this. Not putting the code because
i made it all clumsy. Will put if u still need it.
Here's how the mask looks when you find contours each at a time
code:
import cv2;
import numpy as np;
# Run the code with the image name, keep pressing space bar
# Change the kernel, iterations, Contour Area, position accordingly
# These values work for your present image
img = cv2.imread("your_image.jpg", 0);
h, w = img.shape[:2]
kernel = np.ones((15,15),np.uint8)
e = cv2.erode(img,kernel,iterations = 2)
d = cv2.dilate(e,kernel,iterations = 1)
ret, th = cv2.threshold(d, 150, 255, cv2.THRESH_BINARY_INV)
mask = np.zeros((h+2, w+2), np.uint8)
cv2.floodFill(th, mask, (200,200), 255); # position = (200,200)
out = cv2.bitwise_not(th)
out= cv2.dilate(out,kernel,iterations = 3)
cnt, h = cv2.findContours(out,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
for i in range(len(cnt)):
area = cv2.contourArea(cnt[i])
if(area>10000 and area<100000):
mask = np.zeros_like(img)
cv2.drawContours(mask, cnt, i, 255, -1)
x,y,w,h = cv2.boundingRect(cnt[i])
crop= img[ y:h+y,x:w+x]
cv2.imshow("snip",crop )
if(cv2.waitKey(0))==27:break
cv2.destroyAllWindows()
_, contours, hierarchy = cv2.findContours(edged, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
you are using cv2.RETR_LIST to find contours in the image. For your image to get better output use cv2.RETR_EXTERNAL. Before using that first remove black border line from the image.
cv2.RETR_LIST gives you list of all contours for image
cv2.RETR_EXTERNAL gives you only external or outer contours, not internal contours
change line to
_, contours, hierarchy = cv2.findContours(edged, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
Contours Hierarchy

Categories

Resources