How can i access the ordering of contours in `opencv` - python

import cv2
import Image
import numpy as np
#improve image..........................................................
im = cv2.imread('bw_image1.jpg')
gray = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray,(5,5),0)
thresh = cv2.adaptiveThreshold(blur,255,1,1,11,2)
contours,hierarchy = cv2.findContours(thresh,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
i=0
for cnt in contours:
[x,y,w,h] = cv2.boundingRect(cnt)
if h>15:
cv2.rectangle(im,(x,y),(x+w,y+h),(0,0,255),1)
im3=im[y:y+h,x:x+w]
cv2.imwrite('objects/pix%i.png'%i,im3)
i+=1
cv2.imshow('norm',im)
cv2.imwrite('objects/shhh.jpg',im)
key = cv2.waitKey(0)
#adding object............
im0 = cv2.imread('objects/pix0.png',0)
im1 = cv2.imread('objects/pix1.png',0)
im2 = cv2.imread('objects/pix2.png',0)
im3 = cv2.imread('objects/pix3.png',0)
im4 = cv2.imread('objects/pix4.png',0)
im5 = cv2.imread('objects/pix5.png',0)
h0, w0 = im0.shape[:2]
h1, w1 = im1.shape[:2]
h2, w2 = im2.shape[:2]
h3, w3 = im3.shape[:2]
h4, w4 = im4.shape[:2]
h5, w5 = im5.shape[:2]
maxh=max(h0,h1,h2,h3,h4,h5)
#add 50 for space between the objects
new = np.zeros((maxh, w0+w1+w2+w3+w4+w5+5),np.uint8)
new=(255-new)
new[maxh-h0:, :w0] = im0
new[maxh-h1:, w0+1:w0+w1+1] = im1
new[maxh-h2:, w0+w1+2:w0+w1+w2+2] = im2
new[maxh-h3:, w0+w1+w2+3:w0+w1+w2+w3+3] = im3
new[maxh-h4:, w0+w1+w2+w3+4:w0+w1+w2+w3+w4+4] = im4
new[maxh-h5:, w0+w1+w2+w3+w4+5:] = im5
gray = cv2.cvtColor(new, cv2.COLOR_GRAY2BGR)
cv2.imshow('norm',gray)
cv2.imwrite('objects/new_image.jpg',gray)
key = cv2.waitKey(0)
# threshold ................................................
im_gray = cv2.imread('objects/new_image.jpg', cv2.CV_LOAD_IMAGE_GRAYSCALE)
(thresh, im_bw) = cv2.threshold(im_gray, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
thresh = 20
im_bw = cv2.threshold(im_gray, thresh, 255, cv2.THRESH_BINARY)[1]
cv2.imwrite('bw_image1.jpg', im_bw)
im = Image.open('bw_image1.jpg')
im2 = im.resize((300, 175), Image.NEAREST)
im2.save('bw_image1.jpg')
I am using above code to reordering a image
The problem is in final result image is not saving in sequence of main image.
Can anyone tell me how to do it ?
Main image :-
Result image :-
main image and the result image word should look like same. Thank in advance

Opencv find the contours from bottom of the image . so when you try to find the contours of an image like this :
the first contour are for 8 (a bit is lower of 3) then 3 ,7,9,4,e there is not a regular recipe for find the order of contours . so we need to store objects based on theirs x , with this method that from left to right x has been increased , so we can use the below code to store the founded objects after find conturs :
import numpy as np
import cv2
im = cv2.imread('nnn.jpg')
gray = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray,(5,5),0)
thresh = cv2.adaptiveThreshold(blur,255,1,1,19,4)
contours,hierarchy = cv2.findContours(thresh,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
h_list=[]
for cnt in contours:
[x,y,w,h] = cv2.boundingRect(cnt)
if w*h>250:
h_list.append([x,y,w,h])
#print h_list
ziped_list=zip(*h_list)
x_list=list(ziped_list[0])
dic=dict(zip(x_list,h_list))
x_list.sort()
i=0
for x in x_list:
[x,y,w,h]=dic[x]
#cv2.rectangle(im,(x,y),(x+w,y+h),(0,0,255),1)
im3=im[y:y+h,x:x+w]
cv2.imwrite('objects/pix%i.png'%i,im3)
i+=1
cv2.imshow('norm',im)
cv2.imwrite('objects/shhh.jpg',im)
key = cv2.waitKey(0)
Note the line #cv2.rectangle(im,(x,y),(x+w,y+h),(0,0,255),1) has been commented for refusing of extra lines in result image !
then concatenate the saved objects whit this code :
import numpy as np
import cv2
im0 = cv2.imread('objects/pix0.png',0)
im1 = cv2.imread('objects/pix1.png',0)
im2 = cv2.imread('objects/pix2.png',0)
im3 = cv2.imread('objects/pix3.png',0)
im4 = cv2.imread('objects/pix4.png',0)
im5 = cv2.imread('objects/pix5.png',0)
h0, w0 = im0.shape[:2]
h1, w1 = im1.shape[:2]
h2, w2 = im2.shape[:2]
h3, w3 = im3.shape[:2]
h4, w4 = im4.shape[:2]
h5, w5 = im5.shape[:2]
maxh=max(h0,h1,h2,h3,h4,h5)
#add 50 for space between the objects
new = np.zeros((maxh, w0+w1+w2+w3+w4+w5+50),np.uint8)
new=(255-new)
new[maxh-h0:, :w0] = im0
new[maxh-h1:, w0+10:w0+w1+10] = im1
new[maxh-h2:, w0+w1+20:w0+w1+w2+20] = im2
new[maxh-h3:, w0+w1+w2+30:w0+w1+w2+w3+30] = im3
new[maxh-h4:, w0+w1+w2+w3+40:w0+w1+w2+w3+w4+40] = im4
new[maxh-h5:, w0+w1+w2+w3+w4+50:] = im5
gray = cv2.cvtColor(new, cv2.COLOR_GRAY2BGR)
cv2.imshow('norm',gray)
cv2.imwrite('objects/new_image.jpg',gray)
key = cv2.waitKey(0)
result:

import cv2
import numpy as np
im = cv2.imread('0.jpg')
gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (5, 5), 0)
thresh = cv2.adaptiveThreshold(blur, 255, 1, 1, 11, 2)
_, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
i = 0
for cnt in contours:
[x, y, w, h] = cv2.boundingRect(cnt)
if h > 15:
cv2.rectangle(im, (x, y), (x + w, y + h), (0, 0, 255), 1)
im3 = im[y:y + h, x:x + w]
cv2.imwrite('ob/pix%i.png' % i, im3)
i += 1
cv2.imshow('norm', im)
key = cv2.waitKey(0)

Related

is there a way to able to read text from such images using selenium python

https://imgur.com/a/zCmwUEf.jpg
this is the image from whom i am trying to extract text but unable to do so.
import contours
import cv2
import pytesseract
pytesseract.pytesseract.tesseract_cmd = r'C:\Users\tan\tesseract\Tesseract-OCR\tesseract.exe'
# Opening the image & storing it in an image object
img = cv2.imread("C:/Users/tan/Desktop/my tppc bots/training challange - Copy/sample4.jpg")
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, thresh1 = cv2.threshold(gray, 0, 255, cv2.THRESH_OTSU | cv2.THRESH_BINARY_INV)
rect_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (18, 18))
dilation = cv2.dilate(thresh1, rect_kernel, iterations=1)
contours, hierarchy = cv2.findContours(dilation, cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)
im2 = img.copy()
file = open("recognized.txt", "w+")
file.write("")
file.close()
for cnt in contours:
x, y, w, h = cv2.boundingRect(cnt)
rect = cv2.rectangle(im2, (x, y), (x + w, y + h), (0, 255, 0), 2)
cropped = im2[y:y + h, x:x + w]
file = open("recognized.txt", "a")
text = pytesseract.image_to_string(cropped)
file.write(text)
file.write("\n")
this is my script
when i run it, it execute fine but when i open the text file it doesnt show any texts there just empty.
am i doing something wrong?
if someone can help me that be great
thanks!
I have found easyocr lib promising here.
Import the libs
import numpy as np
import easyocr
import cv2
read the image file
reader = easyocr.Reader(['en'],gpu = False) # load once only in memory.
image_file_name='capImage.png' # this is the screen snap of your image
image = cv2.imread(image_file_name)
get the text from image
image_text=(reader.readtext(image,detail=0)[0]) # output came as D F7BE1
print(image_text.replace(" ","")) # removed the space and output is : DF7BE1
clean up options for image :
image = cv2.imread(image_file_name)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
sharpen_kernel = np.array([[-1,-1,-1], [-1,9,-1], [-1,-1,-1]])
sharpen = cv2.filter2D(gray, -1, sharpen_kernel)
thresh = cv2.threshold(sharpen, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]
height = 100
dim = (800, 800)
resized = cv2.resize(image, dim, interpolation = cv2.INTER_AREA)
now utilize the images.
image_text=(reader.readtext(thresh,detail=0)[0])
print(image_text.replace(" ",""))
image_text=(reader.readtext(sharpen,detail=0)[0])
print(image_text.replace(" ",""))
output:

Python opencv Defect detection

I am trying to detect foreign substances in a round circle using opencv.
However, if the threshold is increased, the area around the circle is distorted, and if the threshold is decreased, foreign matter cannot be detected. Is there a way to detect foreign matter well while maintaining the circle?
import cv2
import cv2 as cv
import numpy as np
def roiSetting():
img = cv2.imread('img.jpg')
x = 100; y = 10;
w = 700; h = 600;
img_roi = img[400:1250,600:1450] #[colStart:colEnd, rowStart:rowEnd]
cv2.imwrite("3_roi_img.jpg", img_roi)
return img_roi
def imgCvt():
print("imgCvt in")
roi_img = roiSetting()
bgr_img = cv2.cvtColor(roi_img,cv2.COLOR_RGB2BGR) #RGB->BGR convert
bila_img = cv2.bilateralFilter(bgr_img,9,100,100) #bilaterafilter apply
r,g,b=cv2.split(bila_img) #split
merge_img=cv2.merge((r,g,b)) #merge
gray_img = cv2.cvtColor(merge_img,cv2.COLOR_RGB2GRAY) #RGB->GRAY convert
ret,thresh_img = cv2.threshold(gray_img, 0, 255, cv2.THRESH_OTSU+cv2.THRESH_BINARY) #threshold apply
equalize_img = cv2.equalizeHist(thresh_img) #Histogram equlization
canny_img = cv2.Canny(equalize_img,250,255) #Canny edge
kernel = np.ones((2,2), np.uint8)
dilate_img = cv2.dilate(canny_img, kernel, iterations = 1) # img dilate
new, contours, hierarchy = cv2.findContours(dilate_img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
contours= sorted(contours, key = cv2.contourArea, reverse = True)[:10] #sorting
print(contours)
c = contours[0]
mask = np.zeros(roi_img.shape,np.uint8) #mask create(roi_img.shape)
cont_image = cv2.drawContours(mask, [c], -1, (255, 255, 255), -1) #background remove
con_gray_img = cv2.cvtColor(cont_image, cv2.COLOR_BGR2GRAY)
ret, thresh1 = cv2.threshold(con_gray_img, 0, 255, cv2.THRESH_OTSU+cv2.THRESH_BINARY)
last_img = cv2.bitwise_and(roi_img, roi_img, mask = thresh1)
kernel = np.ones((3,3), np.uint8)
src = cv2.erode(last_img, kernel, iterations = 2)
gray = cv2.cvtColor(src, cv2.COLOR_RGB2GRAY)
rt1, dst1 = cv2.threshold(gray, 0, 255, cv2.THRESH_OTSU+cv2.THRESH_BINARY)
rt1, dst2 = cv2.threshold(gray, 150, 255, cv2.THRESH_BINARY)
_, contour1, _ = cv.findContours(dst2, cv2.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)
for cnt in contour1:
cv2.drawContours(roi_img, [cnt], 0, (255, 0, 0), 2, cv2.LINE_8)
cv2.imshow('cont_roi.jpg', roi_img)
cv2.waitKey()
cv2.destroyAllWindows()
if __name__ == '__main__':
imgCvt()

pytesseract not recognising image

I am building a image de-shredding program, and despite my prepossessing attempts I am still not getting any text output. The images I am putting into it seem as simple as possible. They are binarized and have little to no skew. What am I doing wrong? is there a special neural network library that I am not importing?Image
here is my code
import cv2
import numpy as np
import os
import imutils
from pytesseract import Output
import pytesseract
a1 = 0
a2 = 1
diflist = []
areas = []
images = []
rotated_shreds = []
cropped_shreds = []
idx = 0
scan = cv2.imread('test3.jpeg')
height = scan.shape[0]
width = scan.shape[1]
DrawnContours = np.zeros(shape=[height, width, 3], dtype=np.uint8)
blank_image2 = np.zeros(shape=[height, width, 3], dtype=np.uint8)
#grayscales the image
gray = cv2.cvtColor(scan, cv2.COLOR_BGR2GRAY)
gaus = cv2.GaussianBlur(gray, (3,3),0)
canny_output = cv2.Canny(gaus,50,50)
StripConts, hierarchy= cv2.findContours(canny_output, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
cv2.drawContours(DrawnContours, StripConts, -1, (255,255,255),12)
DrawnContoursgray = cv2.cvtColor(DrawnContours, cv2.COLOR_BGR2GRAY)
DrawConts, hierarchy= cv2.findContours(DrawnContoursgray, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
sorted_contours= sorted(DrawConts, key=cv2.contourArea, reverse= True)
for contour in sorted_contours:
measure = cv2.contourArea(contour)
areas.append(measure)
print(measure)
for area in areas:
try:
dif = areas[a1]-areas[a2]
diflist.append(dif)
a1 += 1
a2 += 1
except:
break
maximum = max(diflist)
del diflist[0]
number = (diflist.index(maximum)+2)
print(number)
for c in sorted_contours:
if idx >= number:
break
else:
x,y,w,h = cv2.boundingRect(c)
new_img=scan[y:y+h,x:x+w]
images.append(new_img)
idx+=1
for image in images:
try:
shred_gaus = cv2.GaussianBlur(image, (3,3),0)
shred_canny_output = cv2.Canny(shred_gaus, 130, 130)
ShredCont, hierarchy = cv2.findContours(shred_canny_output, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
sorted_contours= sorted(ShredCont, key=cv2.contourArea, reverse= True)
cnt = sorted_contours[1]
rect = cv2.minAreaRect(cnt)
angle = rect[2]
if angle < 0:
rotguide = -90 +abs(angle)
print(rotguide)
else:
rotguide = 90 - angle
print(rotguide)
rotated_shred = imutils.rotate_bound(image, rotguide)
rotated_shreds.append(rotated_shred)
except:
counter = 100
for rotated_shred in rotated_shreds:
print("r")
ret, thresh2 = cv2.threshold(rotated_shred, 120, 255, cv2.THRESH_BINARY)
grayfinal = cv2.cvtColor(thresh2, cv2.COLOR_BGR2GRAY)
strait_shred, hierarchy= cv2.findContours(grayfinal, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
sorted_contours= sorted(strait_shred, key=cv2.contourArea, reverse= True)
x,y,w,h = cv2.boundingRect(sorted_contours[0])
new_img=thresh2[y:y+h,x:x+w]
gaus_img = cv2.GaussianBlur(new_img, (3,3),0)
cropped_shreds.append(gaus_img)
cv2.imwrite('rotate.jpeg',cropped_shreds[0])
print(pytesseract.image_to_string(cropped_shreds[1]))
I tried playing with the binarisation values, by blurring it and then binarizing it with a low threshold I fixed it.

Tesseract not detecting any text on RGB images on Python

Hey I started working with Tesseract OCR but I'm having problems getting the text from really simple RGB images.
It works just fine with text2image images.
Here is my code:
from PIL import Image
import pytesseract
import argparse
import cv2
import os
import sys
class wordExtractor():
def __init__(self, image_path):
self.image_path = image_path
pytesseract.pytesseract.tesseract_cmd = r'/home/yarin/tesseract/bin/debug/tesseract'
#self.resize_image()
def resize_image(self):
basewidth = 800
img = Image.open(self.image_path)
wpercent = (basewidth/float(img.size[0]))
hsize = int((float(img.size[1])*float(wpercent)))
img = img.resize((basewidth,hsize), Image.ANTIALIAS)
os.remove(self.image_path)
img.save(self.image_path[:-4] + '.png')
self.image_path = self.image_path[:-4] + '.png'
def get_text(self, lang):
# load the example image and convert it to grayscale
image = cv2.imread(self.image_path)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# check to see if we should apply thresholding to preprocess the
# image
#if args["preprocess"] == "thresh":
gray = cv2.threshold(gray, 0, 255,
cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
# make a check to see if median blurring should be done to remove
# noise
#elif args["preprocess"] == "blur":
# gray = cv2.medianBlur(gray, 3)
# write the grayscale image to disk as a temporary file so we can
# apply OCR to it
filename = "{}.png".format(os.getpid())
cv2.imwrite(filename, gray)
#load the image as a PIL/Pillow image, apply OCR, and then delete
# the temporary file
text = pytesseract.image_to_string(Image.open(filename), lang='eng')
os.remove(filename)
return text
# show the output images
#cv2.imshow("Image", image)
#cv2.imshow("Output", gray)
#cv2.waitKey(0)
w = wordExtractor('6.png')
print(w.get_text('eng'))
Tesseract returns empty string for the following images:
Please show me how can I solve this Thanks in advance!
After thresholding, you can use findContours to find contour for each shape. Then you can filter the contours and put every contour you are interested in into a blank white image. By then, you will get the letters and ready to process using tesseract. You can see the detail in the code below.
import cv2
import numpy as np
import pytesseract
# img = cv2.imread("dwLFQ.png", cv2.IMREAD_COLOR)
img = cv2.imread("NfwY4.png", cv2.IMREAD_COLOR)
# img = cv2.imread("xTH6s.png", cv2.IMREAD_COLOR)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
thresh = cv2.threshold(gray, 127, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]
items = cv2.findContours(thresh, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
contours = items[0] if len(items) == 2 else items[1]
base = np.zeros(thresh.shape, dtype=np.uint8)
base = cv2.bitwise_not(base)
max_area = 0
for i in range(len(contours)):
x, y, w, h = cv2.boundingRect(contours[i])
ratio = h / w
area = cv2.contourArea(contours[i])
cv2.drawContours(img, [contours[i]], 0, (255, 0, 0), 2)
if 1 < ratio < 3:
max_area = max(area, max_area)
print("area: " + str(area) + ", max area: " + str(max_area) + ", ratio: " + str(ratio))
# if 1000 < area < max_area / 2:
if 1000 < area < 40000:
mask = np.zeros(thresh.shape, dtype=np.uint8)
cv2.drawContours(mask, [contours[i]], -1, color=255, thickness=-1)
mean = cv2.mean(thresh, mask=mask)
segment = np.zeros((h, w), dtype=np.uint8)
segment[:h, :w] = thresh[y:y + h, x:x + w]
if mean[0] > 150:
# white, invert
segment = cv2.bitwise_not(segment)
base[y:y + h, x:x + w] = segment[:h, :w]
cv2.imshow("base", base)
cv2.drawContours(img, [contours[i]], 0, (255, 0, 0), 2)
cv2.waitKey(0)
custom_config = r'-l eng --oem 3 --psm 6 -c tessedit_char_whitelist="ABCDEFGHIJKLMNOPQRSTUVWXYZ " '
text = pytesseract.image_to_string(base, config=custom_config)
print("detected: " + text)
cv2.imshow("img", img)
cv2.imshow("base", base)
cv2.waitKey(0)
cv2.destroyAllWindows()
Result
detected: NO
ENTRY

OpenCV: How to correctly apply morphologyEx operation?

I am having a problem regarding the kernel size for morphologyEx. I have some captcha images and I want to do the same operation on them and get the same final result.
code :
image = cv2.imread("Image.jpg")
gray = cv2.cvtColor(image , cv2.COLOR_BGR2GRAY)
ret, thresh1 = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
k1 = np.ones((3,3))
k2 = np.ones((5,5))
bottom_image = cv2.morphologyEx(thresh1, cv2.MORPH_CLOSE, k1)
bottom_image = 255-bottom_image
bottom_image = remove_boxes(bottom_image , True)
ret,thresh2 = cv2.threshold(bottom_image,127,255,cv2.THRESH_BINARY_INV)
opening = cv2.morphologyEx(thresh2, cv2.MORPH_OPEN, k1)
#closing = cv2.morphologyEx(opening, cv2.MORPH_CLOSE, k)
# cv2.imshow('opening', opening)
dilate = cv2.morphologyEx(opening, cv2.MORPH_DILATE, k2)
dilate = cv2.bitwise_not(dilate)
# cv2.imshow('dilation', dilate)
bottom_image = cv2.morphologyEx(bottom_image, cv2.MORPH_CLOSE, k1)
The perfect result would be
Input:
Output:
But the problem appears when I apply it to other images with the same structure output is distorted.
Example 1 :
Input:
Output:
Example 2 :
Input:
Output:
Example 3:
Input:
Output:
This answer was inspired by this excellent post.
import numpy as np
import cv2
if __name__ == '__main__':
image = cv2.imread('image.png',cv2.IMREAD_UNCHANGED)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
ret,binary = cv2.threshold(gray, 0, 255, cv2.THRESH_OTSU | cv2.THRESH_BINARY)
binary = cv2.bitwise_not(binary)
H = cv2.Sobel(binary, cv2.CV_8U, 0, 2)
V = cv2.Sobel(binary, cv2.CV_8U, 2, 0)
rows,cols = image.shape[:2]
_,contours,_ = cv2.findContours(V, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
(x,y,w,h) = cv2.boundingRect(cnt)
# rows/3 is the threshold for length of line
if h > rows/3:
cv2.drawContours(V, [cnt], -1, 255, -1)
cv2.drawContours(binary, [cnt], -1, 255, -1)
else:
cv2.drawContours(V, [cnt], -1, 0, -1)
_,contours,_ = cv2.findContours(H, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
(x,y,w,h) = cv2.boundingRect(cnt)
# cols/3 is the threshold for length of line
if w > cols/3:
cv2.drawContours(H, [cnt], -1, 255, -1)
cv2.drawContours(binary, [cnt], -1, 255, -1)
else:
cv2.drawContours(H, [cnt], -1, 0, -1)
kernel = cv2.getStructuringElement(shape=cv2.MORPH_RECT, ksize=(3,3))
H = cv2.morphologyEx(H, cv2.MORPH_DILATE, kernel,iterations = 3)
V = cv2.morphologyEx(V, cv2.MORPH_DILATE, kernel, iterations = 3)
cross = cv2.bitwise_and(H, V)
_,contours,_ = cv2.findContours(cross,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
centroids = []
for cnt in contours:
mom = cv2.moments(cnt)
(x,y) = mom['m10']/mom['m00'], mom['m01']/mom['m00']
cv2.circle(image,(int(x),int(y)),4,(0,255,0),-1)
centroids.append((x,y))
centroids.sort(key = lambda x: x[0], reverse = False)
centroids.sort(key = lambda x: x[1], reverse = False)
dx = int(centroids[1][0] - centroids[0][0])
centroids = np.array(centroids, dtype = np.float32)
(x,y,w,h) = cv2.boundingRect(centroids)
if x-dx > -5: x = max(x-dx,0)
if h+dx <= rows+5: h = min(h+dx,rows)
if w+dx <= cols+5: w = min(w+dx,cols)
cv2.rectangle(image, (x,y), (x+w,y+h), (0,255,0))
roi = binary[y:y+h,x:x+w]
roi = cv2.morphologyEx(roi, cv2.MORPH_OPEN, kernel,iterations = 1)
cv2.imshow('image', image)
cv2.imshow('roi', roi)
cv2.waitKey(0)
cv2.destroyAllWindows()

Categories

Resources