I have cropped the original photo to the surrounding area but it is dependent on the mouse position as well and will be different every time. Therefore I do not want a static crop of dimensions as it will vary.
The contours do not want to detect the rectangle I want because it is an incomplete rectangle since there is a cursor in the way.
Currently, my code is able to detect the contours but will only grab the outer rectangle and make a small crop.
I would like to make it detect the inner rectangle, do I have to somehow complete the rectangle first?
Source image
Contours detected
What I would LIKE to crop
What it is currently cropping
import cv2
import imutils # https://pypi.org/project/imutils/
import numpy as np
...
def crop_hover_list(cursor_coord_x, cursor_coord_y):
img_test = cv2.imread("source.jpg")
grayimage = cv2.cvtColor(img_test, cv2.COLOR_BGR2GRAY)
ret, mask = cv2.threshold(grayimage, 235, 255, cv2.THRESH_BINARY)
contours, hierarchy = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
cv2.drawContours(img_test, contours, -1, (0,255,0), 3)
cv2.imshow("test", img_test)
cv2.waitKey(0)
for contour in contours:
if cv2.contourArea(contour) < 250:
continue
print(list(contour))
ext_left = tuple(contour[contour[:, :, 0].argmin()][0])
ext_right = tuple(contour[contour[:, :, 0].argmax()][0])
ext_top = tuple(contour[contour[:, :, 1].argmin()][0])
ext_bot = tuple(contour[contour[:, :, 1].argmax()][0])
(x,y,w,h) = cv2.boundingRect(contour)
cv2.rectangle(img_test, (x, y), (x + w, y + h), (0, 0, 255), 3)
cv2.imshow("test", img_test)
cv2.waitKey(0)
I have your test image in images/ folder. Change it as you need to test the script.
img_test = cv2.imread("images/MCeLz.png")
Please pay attention here:
ret ,contours, hierarchy = cv2.findContours ...
I need 'ret' to work in my opencv version 3.2. May be you not.
My solution is use 2 parameters:
minimum area as you did , and color in a range. Then check that cursor is inside the contour.
import cv2
import imutils # https://pypi.org/project/imutils/
import numpy as np
def crop_hover_list(cursor_coord_x, cursor_coord_y):
img_test = cv2.imread("images/MCeLz.png")
cimg = img_test.copy()
mask = cv2.inRange(img_test, (200,200,200), (235, 235,235))
cv2.imshow("test", img_test)
cv2.imshow('mask',mask)
ret ,contours, hierarchy = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
for contour in contours:
if cv2.contourArea(contour) < 2000:
continue
(x,y,w,h) = cv2.boundingRect(contour)
is_cursor_inside = x <= cursor_coord_x <= (x+w) and y <= cursor_coord_y <= (y+h)
if is_cursor_inside:
cv2.rectangle(cimg, (x, y), (x + w, y + h), (0, 0, 255), 4)
cv2.drawContours(cimg, [contour], 0, (0,255,0), 3)
crop = img_test[y:y+h,x:x+w]
cv2.imshow("crop", crop)
cv2.imshow("cimg", cimg)
cv2.imshow("test", img_test)
cv2.waitKey(0)
crop_hover_list(300, 50)
Related
I am new to OpenCV. I have two questions to ask.
I am trying to print the no of contours available after applying the area. I am getting the correct output in imshow but not in the print statement. I understood that print(len(contours)) gives the total number of contours but I need the no of contours in the given area > 400. You can check the below python code for more details. Please help me with this.
Is it possible to change the threshold value above 255, whenever I change it to above 255, I am getting the black image even if I increase the max value?
Thank you!
import cv2 as cv
import numpy as np
im_color = cv.imread("D:\python_project\Focus_detection_1/_00005_cells.png", cv.IMREAD_COLOR)
im_gray = cv.cvtColor(im_color, cv.COLOR_BGR2GRAY)
_, thresh = cv.threshold(im_gray, thresh=254, maxval=255, type=cv.THRESH_BINARY)
mask = cv.cvtColor(thresh, cv.COLOR_GRAY2BGR)
im_thresh_color = cv.bitwise_and(im_color,mask)
contours, hierarchy = cv.findContours(thresh, cv.RETR_TREE,cv.CHAIN_APPROX_SIMPLE)
cv.drawContours(im_thresh_color,contours,-1,(0,0,255),2)
for c in contours:
area = cv.contourArea(c)
if area > 400:
x,y,w,h = cv.boundingRect(c)
im = cv.rectangle(im_thresh_color, (x,y), (x+w, y+h), (255,0,0), 2)
cv.drawContours(im_thresh_color,contours, -1,(0,0,255),2) #-1 is to draw all the contour, 0 is the 1st contour and so on
text = cv.putText(im, 'Focused', (x, y), cv.FONT_HERSHEY_SIMPLEX, 0.5, (36, 255, 12), 2)
no_of_images = len(contours)
print("images:", no_of_images)
while True:
cv.imshow("original image", im_color)
cv.imshow("Thresh color with contour", im_thresh_color)
#print("n:",len(im_thresh_color))
if cv.waitKey(1) == ord("n"):
break
cv.destroyAllWindows()
Here is the updated code for your first question: This code counts how many contours are in the area and then prints the number of contours in that area
import cv2 as cv
import numpy as np
im_color = cv.imread("D:\python_project\Focus_detection_1/_00005_cells.png", cv.IMREAD_COLOR)
im_gray = cv.cvtColor(im_color, cv.COLOR_BGR2GRAY)
_, thresh = cv.threshold(im_gray, thresh=254, maxval=255, type=cv.THRESH_BINARY)
mask = cv.cvtColor(thresh, cv.COLOR_GRAY2BGR)
im_thresh_color = cv.bitwise_and(im_color, mask)
contours, hierarchy = cv.findContours(thresh, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)
cv.drawContours(im_thresh_color, contours, -1, (0, 0, 255), 2)
counter = 0
for c in contours:
area = cv.contourArea(c)
if area > 400:
x, y, w, h = cv.boundingRect(c)
im = cv.rectangle(im_thresh_color, (x, y), (x+w, y+h), (255, 0, 0), 2)
# -1 is to draw all the contour, 0 is the 1st contour and so on
cv.drawContours(im_thresh_color, contours, -1, (0, 0, 255), 2)
text = cv.putText(im, 'Focused', (x, y), cv.FONT_HERSHEY_SIMPLEX, 0.5, (36, 255, 12), 2)
counter += 1
print("images:", counter)
while True:
cv.imshow("original image", im_color)
cv.imshow("Thresh color with contour", im_thresh_color)
# print("n:",len(im_thresh_color))
if cv.waitKey(1) == ord("n"):
break
cv.destroyAllWindows()
Answer question 2: no it is not possible to change it over a value of 255 bc the range of the r,g,b values is from 0 to 255.
I have been working with OpenCV in order to detect an squared obstacle. So far this is the image I get after applying filters and canny.
The obstacle I am trying to identify is the horizontal one, the three vertical rectangles are guide lines on the floor.My goal is to keep only the horizontal rectangle, separating it from the others, but after applying find Contours I only get I single object that includes all the shapes.This is the code I have been using in order to fin only the biggest rectangle by their area:
# find the biggest countour (c) by the area
if contours != 0:
if not contours:
print("Empty")
else:
bigone = max(contours, key=cv2.contourArea) if max else None
area = cv2.contourArea(bigone)
if area > 10000:
x, y, w, h = cv2.boundingRect(bigone)
cv2.rectangle(img, (x, y), (x+w, y+h), (0, 0, 255), 2)
cv2.putText(img, "Obstacle", (x+w/2, y-20),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)
pts = np.array(
[[[x, y], [x+w, y], [x+w, y+h], [x, y+h]]], dtype=np.int32)
cv2.fillPoly(mask, pts, (255, 255, 255))
#values = img[np.where((mask == (255, 255, 255)).all(axis=2))]
res = cv2.bitwise_and(img, mask) # View only the obstacle
obs_area = w*h
print(obs_area)
if obs_area <= 168000:
command_publisher.publish("GO")
cv2.putText(
img, "GO", (380, 400), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 255, 255), 1)
else:
command_publisher.publish("STOP")
cv2.putText(img, "STOP", (380, 400),
cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 255, 255), 1)
# show the output image
cv2.imshow("Image", img)
cv2.waitKey(1)
And this is the result I am getting:
Is there a way of separating my obstacle from the lines on the floor with some kind of filter or algorithm?
Here is an example image to work with:
Here is one way to do that using Python/OpenCV.
- Read the input
- Convert to HSV and extract only the saturation channel (black/white/gray have zero saturation)
- Threshold
- Apply morphology open and close to remove the extranous white regions
- Get the contour and approximate to simple polygon
- Draw the polygon on the input
- Save the results
Input:
import cv2
import numpy as np
# read image
img = cv2.imread('board.png')
# convert to HSV and extract saturation channel
sat = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)[:,:,1]
# threshold
thresh = cv2.threshold(sat, 90, 255, 0)[1]
# apply morphology close to fill interior regions in mask
kernel = np.ones((7,7), np.uint8)
morph = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel)
kernel = np.ones((13,13), np.uint8)
morph = cv2.morphologyEx(morph, cv2.MORPH_CLOSE, kernel)
# get contours (presumably only 1) and fit to simple polygon (quadrilateral)
cntrs = cv2.findContours(morph, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cntrs = cntrs[0] if len(cntrs) == 2 else cntrs[1]
c = cntrs[0]
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.05 * peri, True)
# draw polygon on input
result = img.copy()
cv2.polylines(result, [np.int32(approx)], True, (0,0,255), 1, cv2.LINE_AA)
# write result to disk
cv2.imwrite("board_saturation.png", sat)
cv2.imwrite("board_thresh.png", thresh)
cv2.imwrite("board_morph.png", morph)
cv2.imwrite("board_contour.png", result)
# display it
cv2.imshow("IMAGE", img)
cv2.imshow("SAT", sat)
cv2.imshow("THRESH", thresh)
cv2.imshow("MORPH", morph)
cv2.imshow("RESULT", result)
cv2.waitKey(0)
Saturation channel image:
Thresholded image:
Morphology cleaned image:
Contour on input:
In your image the problem seems white rectangles. My approach is checking each line and if line consist many pixels which are close to white(255,255,255) then make the line black.
Here is my code:
import cv2
import numpy as np
import random as rng
img=cv2.imread("/ur/image/directory/obstacle.png")
height, width, channels = img.shape
cv2.imshow('Source',img)
# Check each line and eliminate white rectangles(if line consist white pixels more than limit)
for x in range(0,height):
white_counter = 0
for y in range(0,width):
if img[x,y,0] >= 180 and img[x,y,1] >= 180 and img[x,y,2] >= 180:
white_counter = white_counter + 1
if white_counter>10:
for y in range(0,width):
img[x,y,0] = 0
img[x,y,1] = 0
img[x,y,2] = 0
cv2.imshow('Elimination White Rectangles', img)
# Find contours and draw rectangle for each
src_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
threshold = 300
canny_output = cv2.Canny(src_gray, threshold, threshold * 2)
contours, _ = cv2.findContours(canny_output, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
contours_poly = [None]*len(contours)
boundRect = [None]*len(contours)
for i, c in enumerate(contours):
contours_poly[i] = cv2.approxPolyDP(c, 3, True)
boundRect[i] = cv2.boundingRect(contours_poly[i])
rng.seed(12345)
drawing = np.zeros((canny_output.shape[0], canny_output.shape[1], 3), dtype=np.uint8)
for i in range(len(contours)):
color = (rng.randint(0,256), rng.randint(0,256), rng.randint(0,256))
cv2.rectangle(drawing, (int(boundRect[i][0]), int(boundRect[i][1])), \
(int(boundRect[i][0]+boundRect[i][2]), int(boundRect[i][1]+boundRect[i][3])), color, 2)
cv2.imshow('Output', drawing)
cv2.waitKey(0)
cv2.destroyAllWindows()
Eliminate White Rectangles:
Result:
I am importing the attached image. After importing the image, I want to remove horizontal lines, detect the signature and then extract it, create rectangle around signature, crop the rectangle and save it. I am struggling to identify entire region of a signature as one contour or a group of contours.
I have already tried findcontour and then various ways to detect signature region. Please refer the code below.
Python Script:
imagePath
#read image
image = cv2.imread(imagePath,cv2.COLOR_BGR2RGB)
#Convert to greyscale
gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY) # grayscale
#Apply threshold
ret,thresh1 = cv2.threshold(gray, 0, 255,cv2.THRESH_OTSU|cv2.THRESH_BINARY_INV)
plt.imshow(thresh1,cmap = 'gray')
#preprocessing
rect_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (15,15))
dilation = cv2.dilate(thresh1, rect_kernel, iterations = 1)
plt.imshow(dilation,cmap = 'gray')
#Detect contours
contours, hierarchy = cv2.findContours(dilation, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
contours[0]
height, width, _ = image.shape
min_x, min_y = width, height
max_x = max_y = 0
for contour, hier in zip(contours, hierarchy):
(x,y,w,h) = cv2.boundingRect(contour)
min_x, max_x = min(x, min_x), max(x+w, max_x)
min_y, max_y = min(y, min_y), max(y+h, max_y)
if w > 80 and h > 80:
cv2.rectangle(frame, (x,y), (x+w,y+h), (255, 0, 0), 2)
if max_x - min_x > 0 and max_y - min_y > 0:
fin=cv2.rectangle(image, (min_x, min_y), (max_x, max_y), (255, 0, 0), 2)
plt.imshow(fin)
final=cv2.drawContours(image, contours,-1,(0,0,255),6)
plt.imshow(final,cmap = 'gray')
Final objective is to create rectangle around entire signature
Trying to generalize on the other image:
Instead of removing the horizontal lines, it may be easier to perform HSV color thresholding. The idea is to isolate the signature onto a mask and then extract it. We convert the image to HSV format then use a lower/upper color threshold to generate a mask
lower = np.array([90, 38, 0])
upper = np.array([145, 255, 255])
mask = cv2.inRange(image, lower, upper)
Mask
To detect the signature, we can get the combined bounding box for all of the contours with np.concatenate() then use cv2.boundingRect() to obtain the coordinates
Now that we have the bounding box coordinates, we can use Numpy slicing to crop and extract the ROI
import numpy as np
import cv2
# Load image and HSV color threshold
image = cv2.imread('1.jpg')
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
lower = np.array([90, 38, 0])
upper = np.array([145, 255, 255])
mask = cv2.inRange(hsv, lower, upper)
result = cv2.bitwise_and(image, image, mask=mask)
result[mask==0] = (255, 255, 255)
# Find contours on extracted mask, combine boxes, and extract ROI
cnts = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
cnts = np.concatenate(cnts)
x,y,w,h = cv2.boundingRect(cnts)
cv2.rectangle(image, (x, y), (x + w, y + h), (36,255,12), 2)
ROI = result[y:y+h, x:x+w]
cv2.imshow('result', result)
cv2.imshow('mask', mask)
cv2.imshow('image', image)
cv2.imshow('ROI', ROI)
cv2.waitKey()
Note: The lower/upper color ranges were obtained from choosing the correct upper and lower HSV boundaries for color detection with cv::inRange (OpenCV)
Objective: Copy the bigger blobs in another mask image
I have a threshold image with blobs as shown:
How could I copy the bigger blobs into a mask image and leave out the one-pixel blobs?
My code (but I am not getting the desired result):
import numpy as np
import cv2
ref_img = cv2.imread('threshold.jpg', 0)
thresh = np.copy(ref_img)
cnts,_ = cv2.findContours(ref_img, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
mask = np.zeros(ref_img.shape, dtype="uint8")
for c in cnts:
(x,y),radius = cv2.minEnclosingCircle(c)
area = cv2.contourArea(c)
if int(area) < 1:
cv2.circle(mask, (int(x), int(y)), int(radius), (255, 255, 255), -1)
cv2.imshow('img', mask)
cv2.waitKey(0)
Note: Using OpenCV 2.4.x
Here is one of the method you can use to achieve your goal. Explanations are provided in the comments of the code
import numpy as np
import cv2
ref_img = cv2.imread('threshold.jpg', 0)
img, cnts,_ = cv2.findContours(ref_img, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
mask = np.zeros(ref_img.shape, dtype="uint8")
for c in cnts:
# Get the bounding rect surrounding your blobs
x,y,w,h = cv2.boundingRect(c)
# Calculating the area of the rect is similar to the area of the blob minus the complexity
area = w*h
# For box area bigger than one, copy the information from the source image to the mask.
# Since the bounding box contains all the relevant information, just copy the entire box to the mask.
if int(area) > 1 :
mask[y:y+h,x:x+w] = ref_img[y:y+h,x:x+w]
cv2.imshow('img', mask)
cv2.waitKey(0)
I have this type of image from that I only want to extract the characters.
After binarization, I am getting this image
img = cv2.imread('the_image.jpg')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
thresh = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 11, 9)
Then find contours on this image.
(im2, cnts, _) = cv2.findContours(thresh.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
cnts = sorted(cnts, key=cv2.contourArea, reverse=True)
for contour in cnts[:2000]:
x, y, w, h = cv2.boundingRect(contour)
aspect_ratio = h/w
area = cv2.contourArea(contour)
cv2.drawContours(img, [contour], -1, (0, 255, 0), 2)
I am getting
I need a way to filter the contours so that it selects only the characters. So I can find the bounding boxes and extract roi.
I can find contours and filter them based on the size of areas, but the resolution of the source images are not consistent. These images are taken from mobile cameras.
Also as the borders of the boxes are disconnected. I can't accurately detect the boxes.
Edit:
If I deselect boxes which has an aspect ratio less than 0.4. Then it works up to some extent. But I don't know if it will work or not for different resolution of images.
for contour in cnts[:2000]:
x, y, w, h = cv2.boundingRect(contour)
aspect_ratio = h/w
area = cv2.contourArea(contour)
if aspect_ratio < 0.4:
continue
print(aspect_ratio)
cv2.drawContours(img, [contour], -1, (0, 255, 0), 2)
Not so difficult...
import cv2
img = cv2.imread('img.jpg')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
cv2.imshow('gray', gray)
ret, thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_OTSU)
cv2.imshow('thresh', thresh)
im2, ctrs, hier = cv2.findContours(thresh.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
sorted_ctrs = sorted(ctrs, key=lambda ctr: cv2.boundingRect(ctr)[0])
for i, ctr in enumerate(sorted_ctrs):
x, y, w, h = cv2.boundingRect(ctr)
roi = img[y:y + h, x:x + w]
area = w*h
if 250 < area < 900:
rect = cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2)
cv2.imshow('rect', rect)
cv2.waitKey(0)
Result
You can tweak the code like you want (here it can save ROI using original image; for eventually OCR recognition you have to save them in binary format - better methods than sorting by area are available)
Source: Extract ROI from image with Python and OpenCV and some of my knowledge.
Just kidding, take a look at my questions/answers.