How to remove a contoured area from an image? - python

I have an image with a white background and some colored blocks. I have created multiple filters to find the colored blocks and get the contours for another purpose and I am able to draw the contours around the colored blocks.
These blocks are connected by some black lines which I would like to get the contours of. I was using the original contours to also get the lines but I was advised not to do that and instead remove the colored blocks from the image so that I would only remain with the black lines in the image making it easier to contour.
I have created a mask that would draw over the contoured block but when I display the final image the contoured blocks are black instead of white.
Is there any way to make the blocks white similar to the background so that I could remain with only the black lines?
From the images, you can see that the mask covers the image and the black line remains. However, I can't figure out how to make the blocks white instead of black so that only the line remains.
from ctypes import sizeof
from doctest import master
from cv2 import approxPolyDP, contourArea, cvtColor, inRange
import numpy as np
import cv2
kernel = (5, 5)
srcImg = cv2.imread('BluePurpleConnected.png', cv2.IMREAD_COLOR)
blur = cv2.GaussianBlur(srcImg, kernel, 0)
hsv = cv2.cvtColor(blur, cv2.COLOR_BGR2HSV)
grayImg = cv2.cvtColor(srcImg, cv2.COLOR_BGR2GRAY)
rows = int(grayImg.shape[0]/2)
cols = int(grayImg.shape[1]/2)
ker = np.ones((0, 0), 'uint8')
graySize = grayImg.shape
sig = 0.33
cPix = 200
bPix = 255
lPurp = np.array([130, 20, 10])
uPurp = np.array([145, 255, 255])
lBlue = np.array([94, 80, 2])
uBlue = np.array([126, 255, 255])
colArray = np.array([lPurp, uPurp, lBlue, uBlue])
masterStruc = []
maskArray = []
mask = np.ones(srcImg.shape[:2], dtype="uint8") * 255
x = 0
while x <= 1:
imgMask = cv2.inRange(hsv, colArray[2*x], colArray[2*x+1])
maskArray.append(imgMask)
v = np.median(imgMask)
lower = int(max(0, (1.0 - sig) * v))
upper = int(min(255, (1.0 + sig) * v))
bwImg = cv2.Canny(imgMask, lower, upper)
nbwImg = cv2.dilate(bwImg, kernel, iterations=1)
cImg = nbwImg
ret, threshold = cv2.threshold(cImg, cPix, bPix, cv2.THRESH_BINARY)
contours, hierarchy = cv2.findContours(threshold, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnt = 0
for cnt in contours:
curve = approxPolyDP(cnt, 0.02 * cv2.arcLength(cnt, True), True)
vert = len(curve)
area = cv2.contourArea(curve)
if(area > 1000):
if(vert == 4):
masterStruc.append([x, curve, area])
cv2.drawContours(mask, [curve], -1, 0, -1)
x = x + 1
srcImg = cv2.bitwise_and(srcImg, srcImg, mask=mask)
cv2.imshow('Mask', mask)
cv2.imshow('Image', srcImg)
if cv2.waitKey(0) & 0xFF == ord('q'):
cv2.destroyAllWindows()

Related

Change colors that do not match specific color(s) to white using opencv

I'm trying to use opencv to grab specific elements out of the image. So far what I have done is reduced the number of colors in the image, and gotten a dictionary of colors with their counts. What I want to do now is replace all colors which do not match this specific color to white, so that I can iterate over the image and create different images where each image only has one object of that particular color.
This is what I've done so far:
def showOnlyOneColor(img, rgb_key):
print("Getting only one color")
rgb_key = rgb_key.split("-")
r = int(rgb_key[0])
g = int(rgb_key[1])
b = int(rgb_key[2])
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
rgb_low = np.array([r,g,b])
rgb_high = np.array([r,g,b])
mask = cv2.inRange(hsv, rgb_low, rgb_high)
contours = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contours = contours[0] if len(contours) == 2 else contours[1]
cv2.fillPoly(mask, contours, (255, 255, 255))
result = cv2.bitwise_and(img,img,mask=mask)
return result
Second attempt:
def showOnlyOneColor(img, rgb_key):
print("Getting only one color")
rgb_key = rgb_key.split("-")
r = int(rgb_key[0])
g = int(rgb_key[1])
b = int(rgb_key[2])
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
rgb_low = np.array([r,g,b])
rgb_high = np.array([r,g,b])
mask = cv2.inRange(hsv, rgb_low, rgb_high)
img[mask!=255] = (255, 255, 255)
return img
This results in a white image for all iterations
This is what I'm using to reduce the number of colors in the image:
def reduceNumberOfColors(img):
div = 128
return img // div * div + div // 2
Latest attempt:
def showOnlyOneColor(img, rgb_key):
print("Getting only one color")
rgb_key = rgb_key.split("-")
r = int(rgb_key[0])
g = int(rgb_key[1])
b = int(rgb_key[2])
hsv_color = colorsys.rgb_to_hsv(r,g,b)
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
rgb_low = np.array(hsv_color)
rgb_high = np.array(hsv_color)
mask = cv2.inRange(hsv, rgb_low, rgb_high)
img[mask!=255] = (255, 255, 255)
return img
One of the resulting images shows something, the rest of the images are white
How I organize rgb:
def getColorCount(img):
color_dict = {}
print("Getting color count")
for i in tqdm(range(img.shape[0])):
for j in range(img.shape[1]):
color = img[i,j]
r = color[0]
g = color[1]
b = color[2]
rgb_askey = str(r)+"-"+str(g)+"-"+str(b)
if rgb_askey not in color_dict.keys():
color_dict[rgb_askey] = 1
else:
color_dict[rgb_askey] += 1
return color_dict
Here is one way to do what you want in Python/OpenCV. Note colors are in the order B,G,R.
Input:
import cv2
import numpy as np
# read image
img = cv2.imread('corn.jpg')
# do simple color reduction
imgcopy = img.copy()
div = 128
imgcopy = div * ( imgcopy // div ) + div // 2
# get list of unique colors
list_bgr_colors = np.unique(imgcopy.reshape(-1, imgcopy.shape[2]), axis=0)
print(list_bgr_colors)
print(list_bgr_colors[1])
# save reduced color image
cv2.imwrite("corn_reduced_colors.png", imgcopy)
# display reduced color image
cv2.imshow("reduced_colors", imgcopy)
cv2.waitKey(0)
# loop over colors in list and change all non-specified colors to white
i = 1
for color in list_bgr_colors:
# threshold on the specified color
lower=np.array((color))
upper=np.array((color))
mask = cv2.inRange(imgcopy, lower, upper)
# change all non-specified color to white
result = imgcopy.copy()
result[mask!=255] = (255, 255, 255)
# save results
cv2.imwrite("corn_color_{0}.png".format(i), result)
# display result
cv2.imshow("result", result)
cv2.waitKey(0)
cv2.destroyAllWindows()
# increment
i += 1
List of Reduced Colors:
[[ 64 64 64]
[ 64 64 192]
[ 64 192 64]
[ 64 192 192]
[192 192 192]]
Reduced Color Image:
Individual Color Images:
This works for me in Python/OpenCV. Note that OpenCV colors are in the order B,G,R.
import cv2
import numpy as np
# read image
img = cv2.imread('corn.jpg')
# threshold on yellow color
lower=(0,170,215)
upper=(70,255,255)
mask = cv2.inRange(img, lower, upper)
# change all non-yellow to white
result = img.copy()
result[mask!=255] = (255, 255, 255)
# save results
cv2.imwrite('corn_yellow.jpg',result)
# display result
cv2.imshow("mask", mask)
cv2.imshow("result", result)
cv2.waitKey(0)
cv2.destroyAllWindows()
Colors in OpenCV are in the order B,G,R.
So try this in Python/OpenCV.
def showOnlyOneColor(img, bgr_key):
print("Getting only one color")
bgr_key = bgr_key.split("-")
b = int(bgr_key[0])
g = int(bgr_key[1])
r = int(bgr_key[2])
bgr_low = np.array((b,g,r))
bgr_high = np.array((b,g,r))
mask = cv2.inRange(img, bgr_low, bgr_high)
img[mask!=255] = (255, 255, 255)
return img
ADDITION
I think your issue is in your assumption that you have RGB colors when you use colorsys to convert from RGB to HSV. Your colors are actually B, G, R. So to use colorsys you need to convert your image to RGB first or better reverse the order of the colors that you extract from B,G,R to R,G,B before using colorsys.
Your dictionary colors are actually B, G, R not R, G, B. So reverse them or reverse them later when using colorsys.

I'm getting an error while detecting bottle fill level with OpenCV

I am trying to detect the filling levels of bottles moving on a conveyor belt with opencv. Since the bottles are colored, I give a white light from the back and determine the liquid contours and measure. I'm getting an error in a certain part of the code. Mistake;
(contours, areas) = zip(*sorted(zip(contours, areas), key = lambda a:a[1])) ValueError: not enouhg values to unpack (expected 2, got 0)
For ex photograph
When I increase the threshold value in the code, the program runs. But I want this value to remain constant. The reason for this is related to the quality of the contours. Thank you from now.
Source Code:
_, frame = cap.read()
# hsv_frame = cv2.cvtColor(frame, cv2.COLOR.BGR2HSV)
bottle_gray = cv2.split(frame)[0]
bottle_gray = cv2.GaussianBlur(bottle_gray, (7,7), 0)
(T, bottle_threshold) = cv2.threshold(bottle_gray, 49.5, 250, cv2.THRESH_BINARY_INV)
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5,5))
bottle_open = cv2.morphologyEx(bottle_threshold, cv2.MORPH_CLOSE, kernel)
contours = cv2.findContours(bottle_open.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contours = imutils.grab_contours(contours)
bottle_clone = frame.copy()
cv2.drawContours(bottle_clone, contours, 0, (255,0,0), 2)
areas = [cv2.contourArea(contour) for contour in contours]
(contours, areas) = zip(*sorted(zip(contours,areas),key = lambda a:a[1]))```
import sys
import cv2
import numpy as np
# Fix HSV color range
def fixHSVRange(h, s, v):
return (180 * h / 360, 255 * s / 100, 255 * v / 100)
# Load image
dir = sys.path[0]
im = cv2.imread(dir+'/im.png')
# Convert image to HSV and find empty bottle range
hsv = cv2.cvtColor(im, cv2.COLOR_BGR2HSV)
msk = cv2.inRange(hsv, fixHSVRange(0, 50, 60), fixHSVRange(5, 100, 100))
# Smooth noise
msk = cv2.medianBlur(msk, 21)
# Invert colors
msk = ~msk
# Find empty space bounderies
cnts, _ = cv2.findContours(msk, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
x, y, w, h = cv2.boundingRect(cnts[1])
# Draw level line
msk = cv2.cvtColor(msk, cv2.COLOR_GRAY2BGR)
cv2.line(msk, (0, y+h), (im.shape[1], y+h), (0, 255, 0), 2, cv2.LINE_AA)
# Save output
cv2.imwrite(dir+'/im_msk.png', np.hstack((im, msk)))

Remove shadow from image with OpenCV

I am trying to remove the shadow of the pipe in the following image.
I use the following code to isolate the pipe with the shadow but I cannot find a way to remove the shadow.
img = cv2.imread("myimage.png",cv2.IMREAD_UNCHANGED)
blurred = cv2.GaussianBlur(img, (5, 5), 0)
gray = cv2.cvtColor(blurred, cv2.COLOR_BGR2GRAY)
thresh = cv2.threshold(gray, 120, 255, cv2.THRESH_BINARY_INV)[1]
cnts, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
img_X = img.shape[1] / 2
img_Y = img.shape[0]
img_cnts = None
img_distance = None
for c in cnts:
area = cv2.contourArea(c)
if area < 500:
continue
M = cv2.moments(c)
cnt_X = int(M["m10"] / M["m00"])
cnt_Y = int(M["m01"] / M["m00"])
cnt_distance = math.sqrt(sum((px - qx) ** 2.0 for px, qx in zip([cnt_X, cnt_Y], [img_X, img_Y])))
if img_distance == None or img_distance > cnt_distance:
img_cnts = c
img_distance = cnt_distance
mask = np.zeros_like(img) # Create mask where white is what we want, black otherwise
cv2.drawContours(mask, [img_cnts], -1, (255,255,255), -1) # Draw filled contour in mask
out = np.zeros_like(img) # Extract out the object and place into output image
out[mask == 255] = img[mask == 255]
This is the result of the previous code that is pretty close to what I need.
I tried to use cv2.adaptiveThreshold and cv2.createBackgroundSubtractorMOG without luck.
Well, if you apply adaptive-threshold along with bitwise-not operation result will be:
Of course, different blockSize and C parameters you will get different results.
Where: (source)
blockSize: determines the size of the neighbourhood area
C: constant that is subtracted from the mean or weighted sum of the neighbourhood pixels.
Code:
import cv2
img = cv2.imread("pdUqL.png")
gry = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
thr = cv2.adaptiveThreshold(gry, 255, cv2.ADAPTIVE_THRESH_MEAN_C,
cv2.THRESH_BINARY_INV, blockSize=33, C=31)
bnt = cv2.bitwise_not(thr)
cv2.imshow("out", bnt)
cv2.waitKey(0)
there is no good way to remove the shadow. you need to fix your lighting. use "softboxes" (indirect lighting), multiple lights from multiple angles, and whenever you have mirror surfaces, arrange lights so they don't reflect off that into the camera.

Difficulty in detecting the outer circle with cv2.HoughCircles

I am trying to detect the outer boundary of the circular object in the images below:
I tried OpenCV's Hough Circle, but the code is not working for every image. I also tried to adjust parameters such as minRadius and maxRadius in Hough Circle but its not working on every image.
The aim is to detect the object from the image and crop it.
Expected output:
Source code:
import imutils
import cv2
import numpy as np
from matplotlib import pyplot as plt
image = cv2.imread("path to the image i have provided")
r = 600.0 / image.shape[1]
dim = (600, int(image.shape[0] * r))
resized = cv2.resize(image, dim, interpolation = cv2.INTER_AREA)
cv2.imwrite("path to were we want to save downscaled image", resized)
image = cv2.imread('path of downscaled image')
image1 = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
image2 = cv2.GaussianBlur(image1, (5, 5), 0)
edged = cv2.Canny(image2, 30, 150)
img = cv2.medianBlur(image2,5)
cimg = cv2.cvtColor(img,cv2.COLOR_GRAY2BGR)
circles = cv2.HoughCircles(edged,cv2.HOUGH_GRADIENT,1,20,
param1=50,param2=30,minRadius=200,maxRadius=280)
circles = np.uint16(np.around(circles))
max_circle = max(circles[0,:], key=lambda x:x[2])
# print(max_circle)
# # Create mask
height,width = image1.shape
mask = np.zeros((height,width), np.uint8)
for i in [max_circle]:
cv2.circle(mask,(i[0],i[1]),i[2],(255,255,255),thickness=-1)
masked_data = cv2.bitwise_and(image, image, mask=mask)
_,thresh = cv2.threshold(mask,1,255,cv2.THRESH_BINARY)
# Find Contour
contours = cv2.findContours(thresh,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)[0]
x,y,w,h = cv2.boundingRect(contours[0])
# Crop masked_data
crop = masked_data[y:y+h,x:x+w]
#Code to close Window
cv2.imshow('OG',image)
cv2.imshow('Cropped ROI',crop)
cv2.imwrite("path to save roi image", crop)
cv2.waitKey(0)
cv2.destroyAllWindows()
Second Answer: an approach based on color segmentation.
While I was editing the question to improve it's readability and was inserting and resizing all the images from the link you shared to make it easier for everyone to visualize what you are trying to do, it occurred to me that this problem might be a better candidate for an approach based on segmentation by color:
This simpler (but clever) approach assumes that the reel appears pretty much in the same location and has more or less the same dimensions every time:
To discover the approximate color of the reel in the image, define a list of Regions of Interest (ROIs) to sample pixels from and determine the min and max color of that area in the HSV color space. The location and size of the ROI are values derived from the size of the image. In the images below, you can see the ROIs as draw as blue-ish rectangles:
Once the min and max HSV colors have been found, a threshold operation with cv2.inRange() can be executed to segment the reel:
Then, iterate though all the contours in the binary image and assume that the largest one represents the reel. Use this contour and draw it in a separate mask to be able to extract the pixels from original image:
At this stage, it is also possible to compute a bounding box for the contour and extract it's precise location to be able to perform a crop operation later and completely isolate the reel in the image:
This approach works for EVERY image shared on the question.
Source code:
import cv2
import numpy as np
import sys
# initialize global H, S, V values
min_global_h = 179
min_global_s = 255
min_global_v = 255
max_global_h = 0
max_global_s = 0
max_global_v = 0
# load input image from the cmd-line
filename = sys.argv[1]
img = cv2.imread(sys.argv[1])
if (img is None):
print('!!! Failed imread')
sys.exit(-1)
# create an auxiliary image for debugging purposes
dbg_img = img.copy()
# initiailize a list of Regions of Interest that need to be scanned to identify good HSV values to threhsold by color
w = img.shape[1]
h = img.shape[0]
roi_w = int(w * 0.10)
roi_h = int(h * 0.10)
roi_list = []
roi_list.append( (int(w*0.25), int(h*0.15), roi_w, roi_h) )
roi_list.append( (int(w*0.25), int(h*0.60), roi_w, roi_h) )
# convert image to HSV color space
hsv_img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
# iterate through the ROIs to determine the min/max HSV color of the reel
for rect in roi_list:
x, y, w, h = rect
x2 = x + w
y2 = y + h
print('ROI rect=', rect)
cropped_hsv_img = hsv_img[y:y+h, x:x+w]
h, s, v = cv2.split(cropped_hsv_img)
min_h = np.min(h)
min_s = np.min(s)
min_v = np.min(v)
if (min_h < min_global_h):
min_global_h = min_h
if (min_s < min_global_s):
min_global_s = min_s
if (min_v < min_global_v):
min_global_v = min_v
max_h = np.max(h)
max_s = np.max(s)
max_v = np.max(v)
if (max_h > max_global_h):
max_global_h = max_h
if (max_s > max_global_s):
max_global_s = max_s
if (max_v > max_global_v):
max_global_v = max_v
# debug: draw ROI in original image
cv2.rectangle(dbg_img, (x, y), (x2, y2), (255,165,0), 4) # red
cv2.imshow('ROIs', cv2.resize(dbg_img, dsize=(0, 0), fx=0.5, fy=0.5))
#cv2.waitKey(0)
cv2.imwrite(filename[:-4] + '_rois.png', dbg_img)
# define min/max color for threshold
low_hsv = np.array([min_h, min_s, min_v])
max_hsv = np.array([max_h, max_s, max_v])
#print('low_hsv=', low_hsv)
#print('max_hsv=', max_hsv)
# threshold image by color
img_bin = cv2.inRange(hsv_img, low_hsv, max_hsv)
cv2.imshow('binary', cv2.resize(img_bin, dsize=(0, 0), fx=0.5, fy=0.5))
cv2.imwrite(filename[:-4] + '_binary.png', img_bin)
#cv2.imshow('img_bin', cv2.resize(img_bin, dsize=(0, 0), fx=0.5, fy=0.5))
#cv2.waitKey(0)
# create a mask to store the contour of the reel (hopefully)
mask = np.zeros((img_bin.shape[0], img_bin.shape[1]), np.uint8)
crop_x, crop_y, crop_w, crop_h = (0, 0, 0, 0)
# iterate throw all the contours in the binary image:
# assume that the first contour with an area larger than 100k belongs to the reel
contours, hierarchy = cv2.findContours(img_bin, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
for contourIdx, cnt in enumerate(contours):
area = cv2.contourArea(contours[contourIdx])
print('contourIdx=', contourIdx, 'area=', area)
# draw potential reel blob on the mask (in white)
if (area > 100000):
crop_x, crop_y, crop_w, crop_h = cv2.boundingRect(cnt)
centers, radius = cv2.minEnclosingCircle(cnt)
cv2.circle(mask, (int(centers[0]), int(centers[1])), int(radius), (255), -1) # fill with white
break
cv2.imshow('mask', cv2.resize(mask, dsize=(0, 0), fx=0.5, fy=0.5))
cv2.imwrite(filename[:-4] + '_mask.png', mask)
# copy just the reel area into its own image
reel_img = cv2.bitwise_and(img, img, mask=mask)
cv2.imshow('reel_img', cv2.resize(reel_img, dsize=(0, 0), fx=0.5, fy=0.5))
cv2.imwrite(filename[:-4] + '_reel.png', reel_img)
# crop the reel to a smaller image
if (crop_w != 0 and crop_h != 0):
cropped_reel_img = reel_img[crop_y:crop_y+crop_h, crop_x:crop_x+crop_w]
cv2.imshow('cropped_reel_img', cv2.resize(cropped_reel_img, dsize=(0, 0), fx=0.5, fy=0.5))
output_filename = filename[:-4] + '_crop.png'
cv2.imwrite(output_filename, cropped_reel_img)
cv2.waitKey(0)
First answer: an approach based on pre-processing the image and executing an adaptiveThreshold operation.
There might be other ways of solving this problem that are not based on Hough Circles. Here is the result of an approach that is not:
Preprocess the image! Decreasing the size of the image and executing a blur helps with segmentation:
The segmentation method uses a cv2.adaptiveThreshold() to create a binary image that preserves the most important objects: the center of the reel and the external edge of the reel. This is an important step since we are only interested in what exists between these two objects. However, life is not perfect and neither is this segmentation. The shadow of reel on the table became part of the binary objects detected. Also, the outer edge is not fully connected as you can see on the resulting image on the right (look at the top left of the circumference):
To join broken segments, a morphological operation can be executed:
Finally, the entire reel area can be exposed by iterating through the contours of the image above and discarding those whose area is larger than what is expected for a reel. The resulting binary image (on the left) can then be used as a mask to identify the reel location on the original image:
Keep in mind that I'm not trying to find an universal solution for your problem. I'm merely showing that there might be other solutions that don't depend on Hough Circles.
Also, this code might need some adjustments to work on a larger number of cases.
Source code:
import cv2
import numpy as np
import sys
img = cv2.imread("test_images/reel.jpg")
if (img is None):
print('!!! Failed imread')
sys.exit(-1)
# create output image
output_img = img.copy()
# 1. Preprocess the image: downscale to speed up processing and execute a blur
SCALE_FACTOR = 0.5
smaller_img = cv2.resize(img, dsize=(0, 0), fx=SCALE_FACTOR, fy=SCALE_FACTOR)
blur_img = cv2.medianBlur(smaller_img, 9)
cv2.imwrite('reel1_blur_img.png', blur_img)
# 2. Segment the image to identify the 2 most important contours: the center of the reel and the outter edge
gray_img = cv2.cvtColor(blur_img, cv2.COLOR_BGR2GRAY)
img_bin = cv2.adaptiveThreshold(gray_img, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, 19, 4)
cv2.imwrite('reel2_img_bin.png', img_bin)
green_mask = np.zeros((img_bin.shape[0], img_bin.shape[1]), np.uint8)
#green_mask = cv2.cvtColor(img_bin, cv2.COLOR_GRAY2RGB) # debug
contours, hierarchy = cv2.findContours(img_bin, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
for contourIdx, cnt in enumerate(contours):
x, y, w, h = cv2.boundingRect(cnt)
area = cv2.contourArea(contours[contourIdx])
#print('contourIdx=', contourIdx, 'w=', w, 'h=', h, 'area=', area)
# filter out tiny segments
if (area < 5000):
#cv2.fillPoly(green_mask, pts=[cnt], color=(0, 0, 255)) # red
continue
# draw green contour (filled)
#cv2.fillPoly(green_mask, pts=[cnt], color=(0, 255, 0)) # green
cv2.fillPoly(green_mask, pts=[cnt], color=(255)) # white
# debug:
#cv2.imshow('green_mask', green_mask)
#cv2.waitKey(0)
cv2.imshow('green_mask', green_mask)
cv2.imwrite('reel2_green_mask.png', green_mask)
# 3. Fix mask: join segments nearby
kernel = np.ones((3,3), np.uint8)
img_dilation = cv2.dilate(green_mask, kernel, iterations=1)
green_mask = cv2.erode(img_dilation, kernel, iterations=1)
cv2.imshow('fixed green_mask', green_mask)
cv2.imwrite('reel3_img.png', green_mask)
# 4. Extract the reel area from the green mask
reel_mask = np.zeros((green_mask.shape[0], green_mask.shape[1]), np.uint8)
#reel_mask = cv2.cvtColor(green_mask, cv2.COLOR_GRAY2RGB) # debug
contours, hierarchy = cv2.findContours(green_mask, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
for contourIdx, cnt in enumerate(contours):
x, y, w, h = cv2.boundingRect(cnt)
area = cv2.contourArea(contours[contourIdx])
print('contourIdx=', contourIdx, 'w=', w, 'h=', h, 'area=', area)
# filter out smaller segments
if (area > 110000):
#cv2.fillPoly(reel_mask, pts=[cnt], color=(0, 0, 255)) # red
continue
# draw green contour (filled)
#cv2.fillPoly(reel_mask, pts=[cnt], color=(0, 255, 0)) # green
cv2.fillPoly(reel_mask, pts=[cnt], color=(255)) # white
# debug:
#cv2.imshow('reel_mask', reel_mask)
#cv2.waitKey(0)
cv2.imshow('reel_mask', reel_mask)
cv2.imwrite('reel4_reel_mask.png', reel_mask)
# 5. Draw the reel area on the original image
contours, hierarchy = cv2.findContours(reel_mask, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
for contourIdx, cnt in enumerate(contours):
centers, radius = cv2.minEnclosingCircle(cnt)
# rescale these values back to the original image size
centers_orig = (centers[0] // SCALE_FACTOR, centers[1] // SCALE_FACTOR)
radius_orig = radius // SCALE_FACTOR
print('centers=', centers_orig, 'radius=', radius_orig)
cv2.circle(output_img, (int(centers_orig[0]), int(centers_orig[1])), int(radius_orig), (128,0,255), 5) # magenta
cv2.imshow('output_img', output_img)
cv2.imwrite('reel5_output.png', output_img)
# display just the pixels from the original image
larger_reel_mask = cv2.resize(reel_mask, (int(img.shape[1]), int(img.shape[0])))
output_reel_img = cv2.bitwise_and(img, img, mask=larger_reel_mask)
cv2.imshow('output_reel_img', output_reel_img)
cv2.imwrite('reel5_output_reel.png', output_reel_img)
cv2.waitKey(0)
At this point, its possible to use larger_reel_maskand compute a minimal enclosing circle, draw it over this mask to make it a little bit more round and allow us to retrieve the area of the reel more accurately:
But the 4 lines of code that achieve this improvement I leave as an exercise for the reader.

detect rectangle in image and crop

I have lots of scanned images of handwritten digit inside a rectangle(small one).
Please help me to crop each image containing digits and save them by giving the same name to each row.
import cv2
img = cv2.imread('Data\Scan_20170612_4.jpg')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray = cv2.bilateralFilter(gray, 11, 17, 17)
edged = cv2.Canny(gray, 30, 200)
_, contours, hierarchy = cv2.findContours(edged, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
i = 0
for c in contours:
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.09 * peri, True)
if len(approx) == 4:
screenCnt = approx
cv2.drawContours(img, [screenCnt], -1, (0, 255, 0), 3)
cv2.imwrite('cropped\\' + str(i) + '_img.jpg', img)
i += 1
Here is My Version
import cv2
import numpy as np
fileName = ['9','8','7','6','5','4','3','2','1','0']
img = cv2.imread('Data\Scan_20170612_17.jpg')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray = cv2.bilateralFilter(gray, 11, 17, 17)
kernel = np.ones((5,5),np.uint8)
erosion = cv2.erode(gray,kernel,iterations = 2)
kernel = np.ones((4,4),np.uint8)
dilation = cv2.dilate(erosion,kernel,iterations = 2)
edged = cv2.Canny(dilation, 30, 200)
_, contours, hierarchy = cv2.findContours(edged, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
rects = [cv2.boundingRect(cnt) for cnt in contours]
rects = sorted(rects,key=lambda x:x[1],reverse=True)
i = -1
j = 1
y_old = 5000
x_old = 5000
for rect in rects:
x,y,w,h = rect
area = w * h
if area > 47000 and area < 70000:
if (y_old - y) > 200:
i += 1
y_old = y
if abs(x_old - x) > 300:
x_old = x
x,y,w,h = rect
out = img[y+10:y+h-10,x+10:x+w-10]
cv2.imwrite('cropped\\' + fileName[i] + '_' + str(j) + '.jpg', out)
j+=1
That's an easy thing if u try. Here's my output- (The image and its one small bit)
What i did?
Resized the image first because it was too big in my screen
Erode, Dilate to remove small dots and thicken the lines
Threshold the image
Flood fill, beginning at the right point
Invert the flood fill
Find contours and draw one at a time which are in range of approximately the
area on the rectangle. For my resized (500x500) image i put Area of
contour in range 500 to 2500 (trial and error anyway).
Find bounding rectangle and crop that mask from main image.
Then save that piece with proper name- which i didn't do.
Maybe, there's a simpler way, but i liked this. Not putting the code because
i made it all clumsy. Will put if u still need it.
Here's how the mask looks when you find contours each at a time
code:
import cv2;
import numpy as np;
# Run the code with the image name, keep pressing space bar
# Change the kernel, iterations, Contour Area, position accordingly
# These values work for your present image
img = cv2.imread("your_image.jpg", 0);
h, w = img.shape[:2]
kernel = np.ones((15,15),np.uint8)
e = cv2.erode(img,kernel,iterations = 2)
d = cv2.dilate(e,kernel,iterations = 1)
ret, th = cv2.threshold(d, 150, 255, cv2.THRESH_BINARY_INV)
mask = np.zeros((h+2, w+2), np.uint8)
cv2.floodFill(th, mask, (200,200), 255); # position = (200,200)
out = cv2.bitwise_not(th)
out= cv2.dilate(out,kernel,iterations = 3)
cnt, h = cv2.findContours(out,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
for i in range(len(cnt)):
area = cv2.contourArea(cnt[i])
if(area>10000 and area<100000):
mask = np.zeros_like(img)
cv2.drawContours(mask, cnt, i, 255, -1)
x,y,w,h = cv2.boundingRect(cnt[i])
crop= img[ y:h+y,x:w+x]
cv2.imshow("snip",crop )
if(cv2.waitKey(0))==27:break
cv2.destroyAllWindows()
_, contours, hierarchy = cv2.findContours(edged, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
you are using cv2.RETR_LIST to find contours in the image. For your image to get better output use cv2.RETR_EXTERNAL. Before using that first remove black border line from the image.
cv2.RETR_LIST gives you list of all contours for image
cv2.RETR_EXTERNAL gives you only external or outer contours, not internal contours
change line to
_, contours, hierarchy = cv2.findContours(edged, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
Contours Hierarchy

Categories

Resources