Related
I want to implement simple traffic light detection algorithm in Python with help of OpenCV. Of course if we want to get high accuracy, we should use some pre-trained deep learning models, but now I want just simplest and not comprehensive approach. Namely I know that traffic lights are (green, red , yellow), therefore I found following link which contains code about three color detection.
I know once again, that it is not an accurate method, just self learning. I have tested this code on one video. Here is a cropped frame from the corresponding video:
After running my code, I got following image:
As you can see, the lowest part of the image is ignored and upper part is considered.
How can I adapt or change my code such that it should check whole image and detect actual traffic lights as well?
Should I resize the image to a lower resolution?
Should I use some other approach?
import numpy as np
import cv2
import warnings
warnings.filterwarnings("ignore")
# Capturing video through webcam
live_video = cv2.VideoCapture("traffic_light.mp4")
# Start a while loop
while (1):
# Reading the video from the
# webcam in image frames
_, imageFrame = live_video .read()
# Convert the imageFrame in
# BGR(RGB color space) to
# HSV(hue-saturation-value)
# color space
hsvFrame = cv2.cvtColor(imageFrame, cv2.COLOR_BGR2HSV)
# Set range for red color and
# define mask
red_lower = np.array([136, 87, 111], np.uint8)
red_upper = np.array([180, 255, 255], np.uint8)
red_mask = cv2.inRange(hsvFrame, red_lower, red_upper)
# Set range for green color and
# define mask
green_lower = np.array([25, 52, 72], np.uint8)
green_upper = np.array([102, 255, 255], np.uint8)
green_mask = cv2.inRange(hsvFrame, green_lower, green_upper)
# Set range for blue color and
# define mask
blue_lower = np.array([94, 80, 2], np.uint8)
blue_upper = np.array([120, 255, 255], np.uint8)
blue_mask = cv2.inRange(hsvFrame, blue_lower, blue_upper)
# Morphological Transform, Dilation
# for each color and bitwise_and operator
# between imageFrame and mask determines
# to detect only that particular color
kernal = np.ones((5, 5), "uint8")
# For red color
red_mask = cv2.dilate(red_mask, kernal)
res_red = cv2.bitwise_and(imageFrame, imageFrame,
mask=red_mask)
# For green color
green_mask = cv2.dilate(green_mask, kernal)
res_green = cv2.bitwise_and(imageFrame, imageFrame,
mask=green_mask)
# For blue color
blue_mask = cv2.dilate(blue_mask, kernal)
res_blue = cv2.bitwise_and(imageFrame, imageFrame,
mask=blue_mask)
# Creating contour to track red color
contours, hierarchy = cv2.findContours(red_mask,
cv2.RETR_TREE,
cv2.CHAIN_APPROX_SIMPLE)
for pic, contour in enumerate(contours):
area = cv2.contourArea(contour)
if (area > 300):
x, y, w, h = cv2.boundingRect(contour)
imageFrame = cv2.rectangle(imageFrame, (x, y),
(x + w, y + h),
(0, 0, 255), 2)
cv2.putText(imageFrame, "Red Colour", (x, y),
cv2.FONT_HERSHEY_SIMPLEX, 1.0,
(0, 0, 255))
# Creating contour to track green color
contours, hierarchy = cv2.findContours(green_mask,
cv2.RETR_TREE,
cv2.CHAIN_APPROX_SIMPLE)
for pic, contour in enumerate(contours):
area = cv2.contourArea(contour)
if (area > 300):
x, y, w, h = cv2.boundingRect(contour)
imageFrame = cv2.rectangle(imageFrame, (x, y),
(x + w, y + h),
(0, 255, 0), 2)
cv2.putText(imageFrame, "Green Colour", (x, y),
cv2.FONT_HERSHEY_SIMPLEX,
1.0, (0, 255, 0))
# Creating contour to track blue color
contours, hierarchy = cv2.findContours(blue_mask,
cv2.RETR_TREE,
cv2.CHAIN_APPROX_SIMPLE)
for pic, contour in enumerate(contours):
area = cv2.contourArea(contour)
if (area > 300):
x, y, w, h = cv2.boundingRect(contour)
imageFrame = cv2.rectangle(imageFrame, (x, y),
(x + w, y + h),
(255, 0, 0), 2)
cv2.putText(imageFrame, "Blue Colour", (x, y),
cv2.FONT_HERSHEY_SIMPLEX,
1.0, (255, 0, 0))
# Program Termination
cv2.imshow("Multiple Color Detection in Real-TIme", imageFrame)
if cv2.waitKey(10) & 0xFF == ord('q'):
live_video .release()
cv2.destroyAllWindows()
break
I am thinking about resizing of frames, but first I would like to listen your opinions. The main tricky part, I think, is located in the following code, since it tries to locate cursor in the specific contours determined by coordinates. How can I change it?
I have two connected rectangles with a diamond in between them. In Entity Relationship Diagram (ERD) we have the same scenario. My aim is to detect both the rectangles and the diamond which is in between them.
import cv2
import numpy as np
from matplotlib import pyplot as plt
# reading image
img = cv2.imread('pic4.jpeg')
# converting image into grayscale image
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# setting threshold of gray image
_, threshold = cv2.threshold(gray, 127, 255, cv2.THRESH_BINARY)
# using a findContours() function
contours, _ = cv2.findContours(
threshold, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
i = 0
# list for storing names of shapes
for contour in contours:
# here we are ignoring first counter because
# findcontour function detects whole image as shape
if i == 0:
i = 1
continue
# cv2.approxPloyDP() function to approximate the shape
approx = cv2.approxPolyDP(
contour, 0.01 * cv2.arcLength(contour, True), True)
# using drawContours() function
cv2.drawContours(img, [contour], 0, (0, 0, 0), 5)
# finding center point of shape
M = cv2.moments(contour)
if M['m00'] != 0.0:
x = int(M['m10']/M['m00'])
y = int(M['m01']/M['m00'])
# putting shape name at center of each shape
if len(approx) == 3:
cv2.putText(img, 'Triangle', (x, y),
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 0, 0), 2)
elif len(approx) == 4:
cv2.putText(img, 'Rectangle', (x, y),
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 0, 0), 2)
elif len(approx) == 5:
cv2.putText(img, 'Pentagon', (x, y),
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 0, 0), 2)
elif len(approx) == 6:
cv2.putText(img, 'Hexagon', (x, y),
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 0, 0), 2)
else:
cv2.putText(img, 'circle', (x, y),
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 0, 0), 2)
# displaying the image after drawing contours
cv2.imshow('pic4', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
Input image
Output image after shapes detection
The output image is not detecting correct shape. It must have to write a rectangle once in both rectangle shapes.
I am new to OpenCV. I have two questions to ask.
I am trying to print the no of contours available after applying the area. I am getting the correct output in imshow but not in the print statement. I understood that print(len(contours)) gives the total number of contours but I need the no of contours in the given area > 400. You can check the below python code for more details. Please help me with this.
Is it possible to change the threshold value above 255, whenever I change it to above 255, I am getting the black image even if I increase the max value?
Thank you!
import cv2 as cv
import numpy as np
im_color = cv.imread("D:\python_project\Focus_detection_1/_00005_cells.png", cv.IMREAD_COLOR)
im_gray = cv.cvtColor(im_color, cv.COLOR_BGR2GRAY)
_, thresh = cv.threshold(im_gray, thresh=254, maxval=255, type=cv.THRESH_BINARY)
mask = cv.cvtColor(thresh, cv.COLOR_GRAY2BGR)
im_thresh_color = cv.bitwise_and(im_color,mask)
contours, hierarchy = cv.findContours(thresh, cv.RETR_TREE,cv.CHAIN_APPROX_SIMPLE)
cv.drawContours(im_thresh_color,contours,-1,(0,0,255),2)
for c in contours:
area = cv.contourArea(c)
if area > 400:
x,y,w,h = cv.boundingRect(c)
im = cv.rectangle(im_thresh_color, (x,y), (x+w, y+h), (255,0,0), 2)
cv.drawContours(im_thresh_color,contours, -1,(0,0,255),2) #-1 is to draw all the contour, 0 is the 1st contour and so on
text = cv.putText(im, 'Focused', (x, y), cv.FONT_HERSHEY_SIMPLEX, 0.5, (36, 255, 12), 2)
no_of_images = len(contours)
print("images:", no_of_images)
while True:
cv.imshow("original image", im_color)
cv.imshow("Thresh color with contour", im_thresh_color)
#print("n:",len(im_thresh_color))
if cv.waitKey(1) == ord("n"):
break
cv.destroyAllWindows()
Here is the updated code for your first question: This code counts how many contours are in the area and then prints the number of contours in that area
import cv2 as cv
import numpy as np
im_color = cv.imread("D:\python_project\Focus_detection_1/_00005_cells.png", cv.IMREAD_COLOR)
im_gray = cv.cvtColor(im_color, cv.COLOR_BGR2GRAY)
_, thresh = cv.threshold(im_gray, thresh=254, maxval=255, type=cv.THRESH_BINARY)
mask = cv.cvtColor(thresh, cv.COLOR_GRAY2BGR)
im_thresh_color = cv.bitwise_and(im_color, mask)
contours, hierarchy = cv.findContours(thresh, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)
cv.drawContours(im_thresh_color, contours, -1, (0, 0, 255), 2)
counter = 0
for c in contours:
area = cv.contourArea(c)
if area > 400:
x, y, w, h = cv.boundingRect(c)
im = cv.rectangle(im_thresh_color, (x, y), (x+w, y+h), (255, 0, 0), 2)
# -1 is to draw all the contour, 0 is the 1st contour and so on
cv.drawContours(im_thresh_color, contours, -1, (0, 0, 255), 2)
text = cv.putText(im, 'Focused', (x, y), cv.FONT_HERSHEY_SIMPLEX, 0.5, (36, 255, 12), 2)
counter += 1
print("images:", counter)
while True:
cv.imshow("original image", im_color)
cv.imshow("Thresh color with contour", im_thresh_color)
# print("n:",len(im_thresh_color))
if cv.waitKey(1) == ord("n"):
break
cv.destroyAllWindows()
Answer question 2: no it is not possible to change it over a value of 255 bc the range of the r,g,b values is from 0 to 255.
I have cropped the original photo to the surrounding area but it is dependent on the mouse position as well and will be different every time. Therefore I do not want a static crop of dimensions as it will vary.
The contours do not want to detect the rectangle I want because it is an incomplete rectangle since there is a cursor in the way.
Currently, my code is able to detect the contours but will only grab the outer rectangle and make a small crop.
I would like to make it detect the inner rectangle, do I have to somehow complete the rectangle first?
Source image
Contours detected
What I would LIKE to crop
What it is currently cropping
import cv2
import imutils # https://pypi.org/project/imutils/
import numpy as np
...
def crop_hover_list(cursor_coord_x, cursor_coord_y):
img_test = cv2.imread("source.jpg")
grayimage = cv2.cvtColor(img_test, cv2.COLOR_BGR2GRAY)
ret, mask = cv2.threshold(grayimage, 235, 255, cv2.THRESH_BINARY)
contours, hierarchy = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
cv2.drawContours(img_test, contours, -1, (0,255,0), 3)
cv2.imshow("test", img_test)
cv2.waitKey(0)
for contour in contours:
if cv2.contourArea(contour) < 250:
continue
print(list(contour))
ext_left = tuple(contour[contour[:, :, 0].argmin()][0])
ext_right = tuple(contour[contour[:, :, 0].argmax()][0])
ext_top = tuple(contour[contour[:, :, 1].argmin()][0])
ext_bot = tuple(contour[contour[:, :, 1].argmax()][0])
(x,y,w,h) = cv2.boundingRect(contour)
cv2.rectangle(img_test, (x, y), (x + w, y + h), (0, 0, 255), 3)
cv2.imshow("test", img_test)
cv2.waitKey(0)
I have your test image in images/ folder. Change it as you need to test the script.
img_test = cv2.imread("images/MCeLz.png")
Please pay attention here:
ret ,contours, hierarchy = cv2.findContours ...
I need 'ret' to work in my opencv version 3.2. May be you not.
My solution is use 2 parameters:
minimum area as you did , and color in a range. Then check that cursor is inside the contour.
import cv2
import imutils # https://pypi.org/project/imutils/
import numpy as np
def crop_hover_list(cursor_coord_x, cursor_coord_y):
img_test = cv2.imread("images/MCeLz.png")
cimg = img_test.copy()
mask = cv2.inRange(img_test, (200,200,200), (235, 235,235))
cv2.imshow("test", img_test)
cv2.imshow('mask',mask)
ret ,contours, hierarchy = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
for contour in contours:
if cv2.contourArea(contour) < 2000:
continue
(x,y,w,h) = cv2.boundingRect(contour)
is_cursor_inside = x <= cursor_coord_x <= (x+w) and y <= cursor_coord_y <= (y+h)
if is_cursor_inside:
cv2.rectangle(cimg, (x, y), (x + w, y + h), (0, 0, 255), 4)
cv2.drawContours(cimg, [contour], 0, (0,255,0), 3)
crop = img_test[y:y+h,x:x+w]
cv2.imshow("crop", crop)
cv2.imshow("cimg", cimg)
cv2.imshow("test", img_test)
cv2.waitKey(0)
crop_hover_list(300, 50)
I have been working with OpenCV in order to detect an squared obstacle. So far this is the image I get after applying filters and canny.
The obstacle I am trying to identify is the horizontal one, the three vertical rectangles are guide lines on the floor.My goal is to keep only the horizontal rectangle, separating it from the others, but after applying find Contours I only get I single object that includes all the shapes.This is the code I have been using in order to fin only the biggest rectangle by their area:
# find the biggest countour (c) by the area
if contours != 0:
if not contours:
print("Empty")
else:
bigone = max(contours, key=cv2.contourArea) if max else None
area = cv2.contourArea(bigone)
if area > 10000:
x, y, w, h = cv2.boundingRect(bigone)
cv2.rectangle(img, (x, y), (x+w, y+h), (0, 0, 255), 2)
cv2.putText(img, "Obstacle", (x+w/2, y-20),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)
pts = np.array(
[[[x, y], [x+w, y], [x+w, y+h], [x, y+h]]], dtype=np.int32)
cv2.fillPoly(mask, pts, (255, 255, 255))
#values = img[np.where((mask == (255, 255, 255)).all(axis=2))]
res = cv2.bitwise_and(img, mask) # View only the obstacle
obs_area = w*h
print(obs_area)
if obs_area <= 168000:
command_publisher.publish("GO")
cv2.putText(
img, "GO", (380, 400), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 255, 255), 1)
else:
command_publisher.publish("STOP")
cv2.putText(img, "STOP", (380, 400),
cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 255, 255), 1)
# show the output image
cv2.imshow("Image", img)
cv2.waitKey(1)
And this is the result I am getting:
Is there a way of separating my obstacle from the lines on the floor with some kind of filter or algorithm?
Here is an example image to work with:
Here is one way to do that using Python/OpenCV.
- Read the input
- Convert to HSV and extract only the saturation channel (black/white/gray have zero saturation)
- Threshold
- Apply morphology open and close to remove the extranous white regions
- Get the contour and approximate to simple polygon
- Draw the polygon on the input
- Save the results
Input:
import cv2
import numpy as np
# read image
img = cv2.imread('board.png')
# convert to HSV and extract saturation channel
sat = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)[:,:,1]
# threshold
thresh = cv2.threshold(sat, 90, 255, 0)[1]
# apply morphology close to fill interior regions in mask
kernel = np.ones((7,7), np.uint8)
morph = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel)
kernel = np.ones((13,13), np.uint8)
morph = cv2.morphologyEx(morph, cv2.MORPH_CLOSE, kernel)
# get contours (presumably only 1) and fit to simple polygon (quadrilateral)
cntrs = cv2.findContours(morph, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cntrs = cntrs[0] if len(cntrs) == 2 else cntrs[1]
c = cntrs[0]
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.05 * peri, True)
# draw polygon on input
result = img.copy()
cv2.polylines(result, [np.int32(approx)], True, (0,0,255), 1, cv2.LINE_AA)
# write result to disk
cv2.imwrite("board_saturation.png", sat)
cv2.imwrite("board_thresh.png", thresh)
cv2.imwrite("board_morph.png", morph)
cv2.imwrite("board_contour.png", result)
# display it
cv2.imshow("IMAGE", img)
cv2.imshow("SAT", sat)
cv2.imshow("THRESH", thresh)
cv2.imshow("MORPH", morph)
cv2.imshow("RESULT", result)
cv2.waitKey(0)
Saturation channel image:
Thresholded image:
Morphology cleaned image:
Contour on input:
In your image the problem seems white rectangles. My approach is checking each line and if line consist many pixels which are close to white(255,255,255) then make the line black.
Here is my code:
import cv2
import numpy as np
import random as rng
img=cv2.imread("/ur/image/directory/obstacle.png")
height, width, channels = img.shape
cv2.imshow('Source',img)
# Check each line and eliminate white rectangles(if line consist white pixels more than limit)
for x in range(0,height):
white_counter = 0
for y in range(0,width):
if img[x,y,0] >= 180 and img[x,y,1] >= 180 and img[x,y,2] >= 180:
white_counter = white_counter + 1
if white_counter>10:
for y in range(0,width):
img[x,y,0] = 0
img[x,y,1] = 0
img[x,y,2] = 0
cv2.imshow('Elimination White Rectangles', img)
# Find contours and draw rectangle for each
src_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
threshold = 300
canny_output = cv2.Canny(src_gray, threshold, threshold * 2)
contours, _ = cv2.findContours(canny_output, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
contours_poly = [None]*len(contours)
boundRect = [None]*len(contours)
for i, c in enumerate(contours):
contours_poly[i] = cv2.approxPolyDP(c, 3, True)
boundRect[i] = cv2.boundingRect(contours_poly[i])
rng.seed(12345)
drawing = np.zeros((canny_output.shape[0], canny_output.shape[1], 3), dtype=np.uint8)
for i in range(len(contours)):
color = (rng.randint(0,256), rng.randint(0,256), rng.randint(0,256))
cv2.rectangle(drawing, (int(boundRect[i][0]), int(boundRect[i][1])), \
(int(boundRect[i][0]+boundRect[i][2]), int(boundRect[i][1]+boundRect[i][3])), color, 2)
cv2.imshow('Output', drawing)
cv2.waitKey(0)
cv2.destroyAllWindows()
Eliminate White Rectangles:
Result: