How to run a loop on max contour - python

How do I run a loop to get the contour and pixles for 8 objects in an image, rather than just finding max contour and pixels of one object in an image.
import cv2
import numpy as np
img = cv2.imread('C:\\Users\\marnes\\Downloads\\25%.png')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# get shape of largest contour and count pixels
edges = cv2.Canny(image=img, threshold1=100, threshold2=200)
contours, hierarchy = cv2.findContours(edges, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
max_contour = max(contours, key=lambda c: cv2.contourArea(c))
mask = np.zeros_like(gray)
cv2.drawContours(mask, [max_contour], 0, 255, -1)
pct_100 = cv2.countNonZero(mask)
cv2.imshow("mask", mask)
# get dark area and count pixels
ret, thresh = cv2.threshold(gray, 100, 255, cv2.THRESH_BINARY_INV)
thresh = cv2.bitwise_and(thresh, mask)
pct_dark = cv2.countNonZero(thresh)
cv2.imshow("dark", thresh)
print(f"mask = {pct_100}, dark = {pct_dark}, %dark = {pct_dark / pct_100 * 100}")
first_operator = 100
second_operator = pct_dark / pct_100 * 100
output1 = first_operator - second_operator
parameter = 'starch breakdown'
print([parameter] + [output1])
cv2.waitKey(0)
cv2.destroyAllWindows()

Related

How to remove text from color image by using python

I try to remove background and white text from a photo 1 like below but I can only remove like these images2 3. They still have white text inside the circle.
I've used the following code.
Any help from everyone is greatly appreciated by me.
img = cv2.imread('sample.jpg')
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
#Crop image
croped_img = img[51:403,102:454]
#plt.imshow(croped_img)
radius = 176
cx, cy = radius, radius # The center of circle
x,y = np.ogrid[-radius: radius, -radius: radius]
index = x**2 + y**2 > radius**2
croped_img[cy-radius:cy+radius, cx-radius:cx+radius][index] = 0
plt.imshow(croped_img)
croped_img=cv2.cvtColor(croped_img, cv2.COLOR_BGR2RGB)
cv2.imwrite('croped_circle_2.jpg', croped_img)
One approach is to create a mask of the text and use that to do inpainting. In Python/OpenCV, there are two forms of inpainting: Telea and Navier-Stokes. Both produce about the same results.
Input:
import cv2
import numpy as np
# read input
img = cv2.imread('circle_text.png')
# convert to gray
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# threshold and invert
thresh = cv2.threshold(gray, 155, 255, cv2.THRESH_BINARY)[1]
# apply morphology close
kernel = np.ones((3,3), np.uint8)
thresh = cv2.morphologyEx(thresh, cv2.MORPH_DILATE, kernel)
# get contours and filter to keep only small regions
mask = np.zeros_like(gray, dtype=np.uint8)
cntrs = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cntrs = cntrs[0] if len(cntrs) == 2 else cntrs[1]
for c in cntrs:
area = cv2.contourArea(c)
if area < 1000:
cv2.drawContours(mask,[c],0,255,-1)
# do inpainting
result1 = cv2.inpaint(img,mask,3,cv2.INPAINT_TELEA)
result2 = cv2.inpaint(img,mask,3,cv2.INPAINT_NS)
# save results
cv2.imwrite('circle_text_threshold.png', thresh)
cv2.imwrite('circle_text_mask.png', mask)
cv2.imwrite('circle_text_inpainted_telea.png', result1)
cv2.imwrite('circle_text_inpainted_ns.png', result2)
# show results
cv2.imshow('thresh',thresh)
cv2.imshow('mask',mask)
cv2.imshow('result1',result1)
cv2.imshow('result2',result2)
cv2.waitKey(0)
cv2.destroyAllWindows()
Threshold image:
Mask image:
Telea Inpainting:
Navier-Stokes Inpainting:

Python & OpenCV: How to add lines to gridless table

I have the following table:
I want to write a script that creates lines based on the natural breakages on the table text. The result would look like this:
Is there an OpenCV implementation that makes drawing these lines possible? I looked at the answers to the questions here and here, but neither worked. What is the best approach to solving this problem?
Here is one way to get the horizontal lines in Python/OpenCV by counting the number of white pixels in each row of the image to find their center y values. The vertical lines can be added by a similar process.
Input:
import cv2
import numpy as np
# read image
img = cv2.imread("table.png")
hh, ww = img.shape[:2]
# convert to grayscale
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
# threshold gray image
thresh = cv2.threshold(gray, 254, 255, cv2.THRESH_BINARY)[1]
# count number of non-zero pixels in each row
count = np.count_nonzero(thresh, axis=1)
# threshold count at ww (width of image)
count_thresh = count.copy()
count_thresh[count==ww] = 255
count_thresh[count<ww] = 0
count_thresh = count_thresh.astype(np.uint8)
# get contours
contours = cv2.findContours(count_thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contours = contours[0] if len(contours) == 2 else contours[1]
# loop over contours and get bounding boxes and ycenter and draw horizontal line at ycenter
result = img.copy()
for cntr in contours:
x,y,w,h = cv2.boundingRect(cntr)
ycenter = y+h//2
cv2.line(result, (0,ycenter), (ww-1,ycenter), (0, 0, 0), 2)
# write results
cv2.imwrite("table_thresh.png", thresh)
cv2.imwrite("table_lines.png", result)
# display results
cv2.imshow("THRESHOLD", thresh)
cv2.imshow("RESULT", result)
cv2.waitKey(0)
Threshold Image:
Result with lines:
ADDITION
Here is an alternate method that is slightly simpler. It averages the image down to one column rather than counting white pixels.
import cv2
import numpy as np
# read image
img = cv2.imread("table.png")
hh, ww = img.shape[:2]
# convert to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# average gray image to one column
column = cv2.resize(gray, (1,hh), interpolation = cv2.INTER_AREA)
# threshold on white
thresh = cv2.threshold(column, 254, 255, cv2.THRESH_BINARY)[1]
# get contours
contours = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contours = contours[0] if len(contours) == 2 else contours[1]
# loop over contours and get bounding boxes and ycenter and draw horizontal line at ycenter
result = img.copy()
for cntr in contours:
x,y,w,h = cv2.boundingRect(cntr)
ycenter = y+h//2
cv2.line(result, (0,ycenter), (ww-1,ycenter), (0, 0, 0), 2)
# write results
cv2.imwrite("table_lines2.png", result)
# display results
cv2.imshow("RESULT", result)
cv2.waitKey(0)
Result:

Calculate multiple areas with opencv

I'm trying to calculate different areas within a picture using opencv's contourArea without much success. The picture I'm using as an example is the following one:
My objective is to calculate the table's free area (greyish) and the occupied area (orange objects), and so far managed to print the contours with the following code:
img = cv2.imread('table.jpg', 1)
b,g,r = cv2.split(img)
imgRGB = cv2.merge([r,g,b])
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
hsv_channels = cv2.split(hsv)
rows = img.shape[0]
cols = img.shape[1]
for i in range(0, rows):
for j in range(0, cols):
h = hsv_channels[1][i][j]
if h > 90 and h < 120:
hsv_channels[2][i][j] = 255
else:
hsv_channels[2][i][j] = 0
image, contours, hierarchy = cv2.findContours(hsv_channels[2],cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
img1 = cv2.drawContours(imgRGB, contours, -1, (0,255,0), 3)
However, I'm facing two issues:
1- The code detects contours inside the circle.
2- Given the multiple contours I don't know if the area returned is the table's, the objects' or both.
Any suggestion?
Thanks a million.
Since you transforming to HSV colorspace have you thought about cv2.inRange()? After that you can find contours with cv2.findContours() and draw them out of the image, leaving only the gray area.
Example:
import cv2
import numpy as np
img = cv2.imread('tablest.jpg')
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
lower = np.array([0,0,50])
upper = np.array([160,255,255])
mask = cv2.inRange(hsv, lower, upper)
res = cv2.bitwise_and(hsv,hsv, mask= mask)
gray = cv2.cvtColor(res,cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(gray,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
_, contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
for i in contours:
cnt = cv2.contourArea(i)
if cnt > 1000:
cv2.drawContours(img, [i], 0, (0,0,0), -1)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(gray,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
_, contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
cnt = max(contours, key=cv2.contourArea)
area = cv2.contourArea(cnt)
cv2.putText(img,'Gray area ='+str(area),(60,90), cv2.FONT_HERSHEY_COMPLEX, 0.5,(0,255,0),1,cv2.LINE_AA)
cv2.imshow('img', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
EDIT
For calculating percentage:
import cv2
import numpy as np
img = cv2.imread('tablest.jpg')
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
lower = np.array([0,0,50])
upper = np.array([160,255,255])
# Calculate whole area
h,w = img.shape[:2]
whole_area_mask = np.ones((h, w), np.uint8)
ret, thresh = cv2.threshold(whole_area_mask,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
_, contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
cnt = max(contours, key=cv2.contourArea)
whole_area = cv2.contourArea(cnt)
# Threshold the HSV image to get only blue colors
mask = cv2.inRange(hsv, lower, upper)
# Bitwise-AND mask and original image
res = cv2.bitwise_and(hsv,hsv, mask= mask)
gray = cv2.cvtColor(res,cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(gray,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
_, contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
other_area = []
table_area = []
for i in contours:
cnt = cv2.contourArea(i)
M = cv2.moments(i)
cx = int(M['m10']/M['m00'])
if cnt > 1000:
cv2.drawContours(img, [i], 0, (0,0,0), -1)
if w-100 > cx > 100:
other_area.append(cnt)
else:
table_area.append(cnt)
# Percentage table/napkin/object 1/object 2
table_per = (100*(table_area[0]+table_area[1]))/whole_area
print('Table percentage: ', table_per)
napkin_per = (100*(whole_area-other_area[0]-other_area[1]-table_area[0]-table_area[1]))/whole_area
print('Napkin percentage: ', napkin_per)
first_object_per = (100*other_area[0])/whole_area
print('First object percentage: ', first_object_per)
second_object_per = (100*other_area[1])/whole_area
print('Second object percentage: ', second_object_per)
print('SUM: ', table_per+napkin_per+first_object_per+second_object_per)
cv2.imshow('img', img)
cv2.imwrite('tablest_res.png', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
Output:
Table percentage: 9.875440996472028
Napkin percentage: 58.93872849017208
First object percentage: 28.05565555475556
Second object percentage: 3.1301749586003313
SUM: 100.0

Detecting vertical lines using Hough transforms in opencv

I'm trying to remove the square boxes(vertical and horizontal lines) using Hough transform in opencv (Python). The problem is none of the vertical lines are being detected. I've tried looking through contours and hierarchy but there are too many contours in this image and I'm confused how to use them.
After looking through related posts, I've played with the threshold and rho parameters but that didn't help.
I've attached the code for more details. Why does Hough transform not find the vertical lines in the image?. Any suggestions in solving this task are welcome. Thanks.
Input Image :
Hough transformed Image:
Drawing contours:
import cv2
import numpy as np
import pdb
img = cv2.imread('/home/user/Downloads/cropped/robust_blaze_cpp-300-0000046A-02-HW.jpg')
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(gray, 140, 255, 0)
im2, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cv2.drawContours(img, contours, -1, (0,0,255), 2)
edges = cv2.Canny(gray,50,150,apertureSize = 3)
minLineLength = 5
maxLineGap = 100
lines = cv2.HoughLinesP(edges,rho=1,theta=np.pi/180,threshold=100,minLineLength=minLineLength,maxLineGap=maxLineGap)
for x1,y1,x2,y2 in lines[0]:
cv2.line(img,(x1,y1),(x2,y2),(0,255,0),2)
cv2.imwrite('probHough.jpg',img)
To be honest, rather than looking for the lines, I'd instead look for the white boxes.
Preparation
import cv2
import numpy as np
Load the image
img = cv2.imread("digitbox.jpg", 0)
Binarize it, so that both the boxes and the digits are black, rest is white
_, thresh = cv2.threshold(img, 200, 255, cv2.THRESH_BINARY)
cv2.imwrite('digitbox_step1.png', thresh)
Find contours. In this example image, it's fine to just look for external contours.
_, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
Process the contours, filtering out any with too small an area. Find convex hull of each contour, create a mask of all areas outside the contour. Store the bounding boxes of each found contour, sorted by x coordinate.
mask = np.ones_like(img) * 255
boxes = []
for contour in contours:
if cv2.contourArea(contour) > 100:
hull = cv2.convexHull(contour)
cv2.drawContours(mask, [hull], -1, 0, -1)
x,y,w,h = cv2.boundingRect(contour)
boxes.append((x,y,w,h))
boxes = sorted(boxes, key=lambda box: box[0])
cv2.imwrite('digitbox_step2.png', mask)
Dilate the mask (to shrink the black parts), to clip off any remains the the gray frames.
mask = cv2.dilate(mask, np.ones((5,5),np.uint8))
cv2.imwrite('digitbox_step3.png', mask)
Fill all the masked pixels with white, to erase the frames.
img[mask != 0] = 255
cv2.imwrite('digitbox_step4.png', img)
Process the digits as you desire -- i'll just draw the bounding boxes.
result = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
for n,box in enumerate(boxes):
x,y,w,h = box
cv2.rectangle(result,(x,y),(x+w,y+h),(255,0,0),2)
cv2.putText(result, str(n),(x+5,y+17), cv2.FONT_HERSHEY_SIMPLEX, 0.6,(255,0,0),2,cv2.LINE_AA)
cv2.imwrite('digitbox_step5.png', result)
The whole script in one piece:
import cv2
import numpy as np
img = cv2.imread("digitbox.jpg", 0)
_, thresh = cv2.threshold(img, 200, 255, cv2.THRESH_BINARY)
_, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
mask = np.ones_like(img) * 255
boxes = []
for contour in contours:
if cv2.contourArea(contour) > 100:
hull = cv2.convexHull(contour)
cv2.drawContours(mask, [hull], -1, 0, -1)
x,y,w,h = cv2.boundingRect(contour)
boxes.append((x,y,w,h))
boxes = sorted(boxes, key=lambda box: box[0])
mask = cv2.dilate(mask, np.ones((5,5),np.uint8))
img[mask != 0] = 255
result = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
for n,box in enumerate(boxes):
x,y,w,h = box
cv2.rectangle(result,(x,y),(x+w,y+h),(255,0,0),2)
cv2.putText(result, str(n),(x+5,y+17), cv2.FONT_HERSHEY_SIMPLEX, 0.6,(255,0,0),2,cv2.LINE_AA)
cv2.imwrite('digitbox_result.png', result)

removing largest contour from an image

I have an image such as this
I am trying to detect and remove the arrow from this image so that I end up with an image that just has the text.
I tried the below approach but it isn't working
image_src = cv2.imread("roi.png")
gray = cv2.cvtColor(image_src, cv2.COLOR_BGR2GRAY)
canny=cv2.Canny(gray,50,200,3)
ret, gray = cv2.threshold(canny, 10, 255, 0)
contours, hierarchy = cv2.findContours(gray, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
largest_area = sorted(contours, key=cv2.contourArea)[-1]
mask = np.ones(image_src.shape[:2], dtype="uint8") * 255
cv2.drawContours(mask, [largest_area], -1, 0, -1)
image = cv2.bitwise_and(image_src, image_src, mask=mask)
The above code seems to give me back the same image WITH the arrow.
How can I remove the arrow?
The following will remove the largest contour:
import numpy as np
import cv2
image_src = cv2.imread("roi.png")
gray = cv2.cvtColor(image_src, cv2.COLOR_BGR2GRAY)
ret, gray = cv2.threshold(gray, 250, 255,0)
image, contours, hierarchy = cv2.findContours(gray, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
mask = np.zeros(image_src.shape, np.uint8)
largest_areas = sorted(contours, key=cv2.contourArea)
cv2.drawContours(mask, [largest_areas[-2]], 0, (255,255,255,255), -1)
removed = cv2.add(image_src, mask)
cv2.imwrite("removed.png", removed)
Note, the largest contour in this case will be the whole image, so it is actually the second largest contour.

Categories

Resources