OpenCv HoughLinesP bad lines - python

I'm a beginner and I'm trying to do some line detection in-game.
This is the photo in which I'm trying to detect lanes
This is the result
The HoughLinesP code: ```
lines = cv2.HoughLinesP(cropped_image, 2, np.pi / 180, 100, np.array([]), minLineLength=50, maxLineGap=5)
# The displaying function:
def displayLines(image, lines):
line_image = np.zeros_like(image)
if lines is not None:
for line in lines:
x1, x2, y1, y2 = line.reshape(4)
cv2.line(line_image, (x1, x2), (x2, y2), (0,255,0), 10)
return line_image```
# Here is the cropping function:
def region(image):
height = image.shape[0]
polygons = np.array([[
(570, 640), (1600, 700), (863, 520)
]])
mask = np.zeros_like(image)
cv2.fillPoly(mask, polygons, 255)
masked_image = cv2.bitwise_and(canny, mask)
return masked_image
#As input I'm giving image with edges displayed. Function:
def canny(image):
gray = cv2.cvtColor(lane_image, cv2.COLOR_RGB2GRAY)
blur = cv2.GaussianBlur(gray, (5,5),0)
canny = cv2.Canny(blur, 50, 150)
return canny
I don't know what is the problem

I recommend you mask out the noise before attempting to detect lines:
import cv2
import numpy as np
img = cv2.imread("driving_game.jpg")
img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
lower = np.array([18, 120, 200])
upper = np.array([30, 255, 255])
mask = cv2.inRange(img_hsv, lower, upper)
img_masked = cv2.bitwise_and(img, img, mask=mask)
cv2.imshow("Mask", mask)
cv2.imshow("Masked Image", img_masked)
cv2.waitKey(0)
Output:
Where the
lower = np.array([18, 120, 200])
upper = np.array([30, 255, 255])
are the lower and upper values for the HSV color mask. With the above mask, you won't even need the cv2.HoughLinesP method; you can simply detect the contours of the non-masked object and approximate the results:
import cv2
import numpy as np
def process(img):
img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
lower = np.array([18, 120, 200])
upper = np.array([30, 255, 255])
mask = cv2.inRange(img_hsv, lower, upper)
mask_canny = cv2.Canny(mask, 50, 50)
kernel = np.ones((2, 2))
img_dilate = cv2.dilate(mask_canny, kernel, iterations=7)
return cv2.erode(img_dilate, kernel, iterations=7)
def draw_lines(img):
contours, _ = cv2.findContours(process(img), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
peri = cv2.arcLength(cnt, True)
approx = cv2.approxPolyDP(cnt, 0.13 * peri, True)
cv2.drawContours(img, [approx], -1, (0, 0, 255), 5)
img = cv2.imread("driving_game.jpg")
draw_lines(img)
cv2.imshow("Lines Detected", img)
cv2.waitKey(0)
Output:

Related

Python - Draw Bounding Box Around A Group Of Contours

Hi I have a code that finds and draws contours around objects that are Yellow.
Here is the code:
import cv2
import numpy as np
from PIL import ImageGrab
lower_yellow = np.array([20, 100, 100])
upper_yellow = np.array([30, 255, 255])
def test():
while True:
imgDef = ImageGrab.grab()
image = np.array(imgDef)
rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
hsv = cv2.cvtColor(rgb, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv, lower_yellow, upper_yellow)
kernel = np.ones((5,5),np.uint8)
mask = cv2.dilate(mask, kernel, iterations=1)
mask = cv2.erode(mask, kernel, iterations=1)
contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cv2.drawContours(image, contours, -1, (0,255,0), 3)
cv2.imshow('test', image)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cv2.destroyAllWindows()
if __name__ == "__main__":
test()
Right now the output is as follows:
I wish to group contours that are in close proximity to one another and draw a bounding box around them like so:
How can I achieve this? Am I right to be looking into the scikit KMeans function to group them?
You can use cv2.kmeans for that. I suggest to change cv2.RETR_EXTERNAL by cv2.RETR_TREE. I share here a possible solution, the only problem is you need to know the number of cluster before use cv2.kmeans.
import cv2
import numpy as np
from PIL import ImageGrab
import random
lower_yellow = np.array([20, 100, 100])
upper_yellow = np.array([30, 255, 255])
def get_contours_by_zones(contours, number_of_zones):
center_points = []
for cnt in contours:
cx = cnt[:,0,0].mean()
cy = cnt[:,0,1].mean()
center_points.append((cx, cy))
center_points = np.array(center_points).astype(np.float32)
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)
_,_,cluster_centers=cv2.kmeans(center_points, number_of_zones, None, criteria, 10, cv2.KMEANS_RANDOM_CENTERS)
contour_zones = [[] for _ in range(number_of_zones)]
for i, (cnt_x, cnt_y) in enumerate(center_points):
tmp = cluster_centers - (cnt_x, cnt_y)
distances = np.sqrt( (tmp**2).sum(axis=1) )
cluster_index = np.argmin(distances)
contour_zones[cluster_index].append( contours[i] )
return contour_zones
def test():
while True:
imgDef = ImageGrab.grab()
image = np.array(imgDef)
rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
hsv = cv2.cvtColor(rgb, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv, lower_yellow, upper_yellow)
kernel = np.ones((5,5),np.uint8)
mask = cv2.dilate(mask, kernel, iterations=1)
mask = cv2.erode(mask, kernel, iterations=1)
contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# ignore outer contour
contour_per_zone = get_contours_by_zones(contours=contours[1:], number_of_zones=3)
canvas = np.empty_like(image)
canvas[...]=255
for zone in contour_per_zone:
# add the corresponding code to get xmin, xmax, ymin, ymax from zone,
# so you can draw the corresponding reactangle
# draw each zone with a different color
cv2.drawContours(canvas, zone, -1, (random.randint(0, 256), random.randint(0, 256), random.randint(0, 256)), 3)
cv2.imshow('test', image)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cv2.destroyAllWindows()
if __name__ == "__main__":
test()
The get_contours_by_zones() function uses cv2.kmeans to assign each contour to a group.

Removing blood vessels - denoising the vessels into the background colour

Problem statement:
fundus image, i need to suppress the blood vessels so that they do not appear on the image for the classifier being used.
There were a few thoughts; but easiest approach was to segment out a rough outline of the vasculature. This is achieved.
Next was to get the colour for the surround area and use it to blend it to the white area found; then merge it back to the original image.
Any suggestions for identifying the colour and blending it in. then merging it back to the original image so that the vessels are not visible. One thought was to do a contour of the each white area and find the corresponding colour outside and use it to blend it back.
Any alternate approach is also welcome.
eyepac image - vessels are visible; want to make it the colour of its surrounding so that they are not visible
Here is the code:
def apply_threshold_with_denoising(image):
image = cv2.adaptiveThreshold(image, 250, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 115, 1)
image = cv2.fastNlMeansDenoising(image, 1.5, 5, 5)
return image
def delete_small_components(image, size):
_, blackAndWhite = cv2.threshold(image, 127, 255, cv2.THRESH_BINARY_INV)
nlabels, labels, stats, centroids = cv2.connectedComponentsWithStats(blackAndWhite, None, None, None, 8, cv2.CV_32S)
sizes = stats[1:, -1] # get CC_STAT_AREA component
image = np.zeros(labels.shape, np.uint8)
for i in range(0, nlabels - 1):
if sizes[i] >= 150: # filter small dotted regions
image[labels == i + 1] = 255
return cv2.bitwise_not(image)
def kernel(num1, num2):
return np.ones((num1, num2), np.uint8)
def resize(img):
scale_percent = 50 # percent of original size
width = int(img.shape[1] * scale_percent / 100)
height = int(img.shape[0] * scale_percent / 100)
dim = (width, height)
# resize image
return cv2.resize(img, dim, interpolation=cv2.INTER_AREA)
def get_large_vessels(image):
struct_kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 4))
opening = cv2.morphologyEx(image, cv2.MORPH_OPEN, struct_kernel, iterations=1)
cnts = cv2.findContours(opening, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
for c in cnts:
area = cv2.contourArea(c)
if area < 250:
cv2.drawContours(opening, [c], -1, (0, 0, 0), -1)
return opening
def get_small_vessels(both, large):
large = cv2.dilate(large, kernel(3, 3), iterations=5)
subtract = cv2.subtract(both, large)
return subtract
def remove_background(image, mask):
image = cv2.bitwise_and(cv2.bitwise_not(image), cv2.bitwise_not(image), mask=mask)
image = cv2.morphologyEx(image, cv2.MORPH_CLOSE, kernel(2, 2))
return image
img = cv2.imread('2001_left.jpeg')
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
l_b = np.array([0, 0, 30])
u_b = np.array([255, 255, 255])
mask = cv2.inRange(hsv, l_b, u_b)
mask = cv2.erode(mask, kernel(2, 2), iterations=5)
grayscale = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
threshold = apply_threshold_with_denoising(grayscale)
kernel22 = cv2.dilate(threshold, kernel(2, 2), iterations=2)
remove_small1kernel22 = delete_small_components(kernel22, 5)
remove_small1kernel22)
dilation = cv2.dilate(threshold, kernel(2, 1), iterations=2)
remove_small1 = delete_small_components(dilation, 150)
dilation = cv2.dilate(threshold, kernel(1, 2), iterations=2)
remove_small2 = delete_small_components(dilation, 150)
merge = cv2.addWeighted(remove_small1, 0.5, remove_small2, 0.5, 0)
threshold_merge = apply_threshold_with_denoising(merge)
remove_small3 = delete_small_components(threshold_merge, 150)
large_vessels = get_large_vessels(remove_background(remove_small3, mask))
cv2.imshow('Large blood vasculature', large_vessels)
small_vessels = get_small_vessels(remove_background(remove_small3, mask), large_vessels)
cv2.imshow('small vasculature', small_vessels)
I tried the above code.
It produces the vessels - small and large.
I need a way to use this map to recolour using a mean colour value outside each contour such that it blends in when put back.
Is there a way to just identify the segment - colour it and then merge back.

Opencv rectangle detection on noisy image

One question, is it possible to dectect rectangle on image when it touch noise lines and other shapes
This is my function to detect contoures on image:
def findContours(img_in):
w, h, c = img_in.shape # img_in is the input image
resize_coeff = 0.25
img_in = cv2.resize(img_in,(int(resize_coeff * h), int(resize_coeff * w)))
img_in = ip.findObjects(img_in)
blr = cv2.GaussianBlur(img_in, (9, 9), 0)
img = cv2.Canny(blr, 50, 250, L2gradient=False)
kernel = np.ones((5, 5), np.uint8)
img_dilate = cv2.dilate(img, kernel, iterations=1)
img = cv2.erode(img_dilate, kernel, iterations=1)
contours, hierarchy = cv2.findContours(img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
max_index, max_area = max(enumerate([cv2.contourArea(x) for x in contours]), key=lambda x: x[1])
max_contour = contours[max_index]
img_out = cv2.resize(img, (int(resize_coeff * h), int(resize_coeff * w)))
cv2.drawContours(img_in, [max_contour], 0, (0, 0, 255), 2)
re.rectangle(img, [max_contour])
cv2.imshow("test",img_in)
cv2.imshow("test1",img)
cv2.waitKey()
return img
I got this result:
The result I want:
When I use shape detecion I got result that it have 15 angles and not four. Function:
def rectangle(img, contours):
for contour in contours:
approx = cv2.approxPolyDP(contour, 0.01 * cv2.arcLength(contour, True), True)
print(len(approx))
x = approx.ravel()[0]
y = approx.ravel()[1] - 5
if len(approx) == 4:
print("Rect")
x, y, w, h = cv2.boundingRect(approx)
aspectRatio = float(w) / h
print(aspectRatio)
cv2.putText(img, "rectangle", (x, y), cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 0))
EDIT:
Original image:
What if you can remove noise around that shape? I think your mask is good for more processing:
import numpy as np
import sys
import cv2
# Load the mask
dir = sys.path[0]
im = cv2.imread(dir+'/img.png')
H, W = im.shape[:2]
# Make gray scale image
gry = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
# Make binary image
bw = cv2.threshold(gry, 127, 255, cv2.THRESH_BINARY)[1]
bw = ~bw
# Focuse on edges
bw = cv2.erode(bw, np.ones((5, 5)))
# Use flood fill to remove noise
cv2.floodFill(bw, np.zeros((H+2, W+2), np.uint8), (0, 0), 0)
bw = cv2.medianBlur(bw, 7)
# Remove remained noise with another flood fill
nonRectArea = bw.copy()
cv2.floodFill(nonRectArea, np.zeros((H+2, W+2), np.uint8), (W//2, H//2), 0)
bw[np.where(nonRectArea == 255)] = 0
# Find contours and sort them by width
cnts, _ = cv2.findContours(bw, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
cnts.sort(key=lambda p: cv2.boundingRect(p)[2], reverse=True)
# Find biggest blob
x, y, w, h = cv2.boundingRect(cnts[0])
cv2.rectangle(im, (x, y), (x+w, y+h), 127, 1)
# Save output
cv2.imwrite(dir+'/img_1.png', im)
cv2.imwrite(dir+'/img_2.png', bw)
cv2.imwrite(dir+'/img_3.png', nonRectArea)

How to detect largest rectangle in a grid using opencv?

I have some images which are in form of a grid. I have a code that works to find the largest rectangle in the grid. However it works in some images and completely fails in doing so in others, I need help fine-tuning the code to work in all the cases. Ideally I'd like the contours exactly on the border.
The code:
import cv2
import numpy as np
img = cv2.imread('3.jpg')
frame = cv2.resize(img,(1000,500))
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
lower_red = np.array([0, 10, 120])
upper_red = np.array([15, 255, 255])
mask = cv2.inRange (hsv, lower_red, upper_red)
cv2.imshow("a",mask)
cv2.waitKey(0)
contours, _ = cv2.findContours(mask.copy(),
cv2.RETR_TREE,
cv2.CHAIN_APPROX_SIMPLE)
if len(contours) > 0:
red_area = max(contours, key=cv2.contourArea)
x, y, w, h = cv2.boundingRect(red_area)
cv2.rectangle(frame,(x, y),(x+w, y+h),(0, 0, 255), 2)
cv2.imshow('frame', frame)
cv2.imshow('mask', mask)
cv2.waitKey(0)
Image in which it works correctly:
Image in which the code does not work:
your problem in finding the max contour after thresolding can be solved simply by identifying the max contour corners:
import cv2
import numpy as np
img = cv2.imread('2.png')
frame = cv2.resize(img,(1000,500))
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
lower_red = np.array([0, 10, 120])
upper_red = np.array([15, 255, 255])
mask = cv2.inRange (hsv, lower_red, upper_red)
y0 = np.min(np.where(mask>0)[0])
x0 = np.min(np.where(mask>0)[1])
y1 = np.max(np.where(mask>0)[0])
x1 = np.max(np.where(mask>0)[1])
cv2.rectangle(frame,(x0, y0),(x1, y1),(0, 0, 255), 2)
cv2.imshow('frame', frame)
cv2.imshow('mask', mask)
cv2.waitKey(0)

Separate objects countours with OpenCV

I have been working with OpenCV in order to detect an squared obstacle. So far this is the image I get after applying filters and canny.
The obstacle I am trying to identify is the horizontal one, the three vertical rectangles are guide lines on the floor.My goal is to keep only the horizontal rectangle, separating it from the others, but after applying find Contours I only get I single object that includes all the shapes.This is the code I have been using in order to fin only the biggest rectangle by their area:
# find the biggest countour (c) by the area
if contours != 0:
if not contours:
print("Empty")
else:
bigone = max(contours, key=cv2.contourArea) if max else None
area = cv2.contourArea(bigone)
if area > 10000:
x, y, w, h = cv2.boundingRect(bigone)
cv2.rectangle(img, (x, y), (x+w, y+h), (0, 0, 255), 2)
cv2.putText(img, "Obstacle", (x+w/2, y-20),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)
pts = np.array(
[[[x, y], [x+w, y], [x+w, y+h], [x, y+h]]], dtype=np.int32)
cv2.fillPoly(mask, pts, (255, 255, 255))
#values = img[np.where((mask == (255, 255, 255)).all(axis=2))]
res = cv2.bitwise_and(img, mask) # View only the obstacle
obs_area = w*h
print(obs_area)
if obs_area <= 168000:
command_publisher.publish("GO")
cv2.putText(
img, "GO", (380, 400), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 255, 255), 1)
else:
command_publisher.publish("STOP")
cv2.putText(img, "STOP", (380, 400),
cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 255, 255), 1)
# show the output image
cv2.imshow("Image", img)
cv2.waitKey(1)
And this is the result I am getting:
Is there a way of separating my obstacle from the lines on the floor with some kind of filter or algorithm?
Here is an example image to work with:
Here is one way to do that using Python/OpenCV.
- Read the input
- Convert to HSV and extract only the saturation channel (black/white/gray have zero saturation)
- Threshold
- Apply morphology open and close to remove the extranous white regions
- Get the contour and approximate to simple polygon
- Draw the polygon on the input
- Save the results
Input:
import cv2
import numpy as np
# read image
img = cv2.imread('board.png')
# convert to HSV and extract saturation channel
sat = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)[:,:,1]
# threshold
thresh = cv2.threshold(sat, 90, 255, 0)[1]
# apply morphology close to fill interior regions in mask
kernel = np.ones((7,7), np.uint8)
morph = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel)
kernel = np.ones((13,13), np.uint8)
morph = cv2.morphologyEx(morph, cv2.MORPH_CLOSE, kernel)
# get contours (presumably only 1) and fit to simple polygon (quadrilateral)
cntrs = cv2.findContours(morph, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cntrs = cntrs[0] if len(cntrs) == 2 else cntrs[1]
c = cntrs[0]
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.05 * peri, True)
# draw polygon on input
result = img.copy()
cv2.polylines(result, [np.int32(approx)], True, (0,0,255), 1, cv2.LINE_AA)
# write result to disk
cv2.imwrite("board_saturation.png", sat)
cv2.imwrite("board_thresh.png", thresh)
cv2.imwrite("board_morph.png", morph)
cv2.imwrite("board_contour.png", result)
# display it
cv2.imshow("IMAGE", img)
cv2.imshow("SAT", sat)
cv2.imshow("THRESH", thresh)
cv2.imshow("MORPH", morph)
cv2.imshow("RESULT", result)
cv2.waitKey(0)
Saturation channel image:
Thresholded image:
Morphology cleaned image:
Contour on input:
In your image the problem seems white rectangles. My approach is checking each line and if line consist many pixels which are close to white(255,255,255) then make the line black.
Here is my code:
import cv2
import numpy as np
import random as rng
img=cv2.imread("/ur/image/directory/obstacle.png")
height, width, channels = img.shape
cv2.imshow('Source',img)
# Check each line and eliminate white rectangles(if line consist white pixels more than limit)
for x in range(0,height):
white_counter = 0
for y in range(0,width):
if img[x,y,0] >= 180 and img[x,y,1] >= 180 and img[x,y,2] >= 180:
white_counter = white_counter + 1
if white_counter>10:
for y in range(0,width):
img[x,y,0] = 0
img[x,y,1] = 0
img[x,y,2] = 0
cv2.imshow('Elimination White Rectangles', img)
# Find contours and draw rectangle for each
src_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
threshold = 300
canny_output = cv2.Canny(src_gray, threshold, threshold * 2)
contours, _ = cv2.findContours(canny_output, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
contours_poly = [None]*len(contours)
boundRect = [None]*len(contours)
for i, c in enumerate(contours):
contours_poly[i] = cv2.approxPolyDP(c, 3, True)
boundRect[i] = cv2.boundingRect(contours_poly[i])
rng.seed(12345)
drawing = np.zeros((canny_output.shape[0], canny_output.shape[1], 3), dtype=np.uint8)
for i in range(len(contours)):
color = (rng.randint(0,256), rng.randint(0,256), rng.randint(0,256))
cv2.rectangle(drawing, (int(boundRect[i][0]), int(boundRect[i][1])), \
(int(boundRect[i][0]+boundRect[i][2]), int(boundRect[i][1]+boundRect[i][3])), color, 2)
cv2.imshow('Output', drawing)
cv2.waitKey(0)
cv2.destroyAllWindows()
Eliminate White Rectangles:
Result:

Categories

Resources