Detect open or closed eye using openCV in python - python

I'm trying to detect whether the user's eyes are open or closed in a live video, using haar cascade algorithm in python.
Unfortunately it doesn't work well.
I understood that "haarcascade_eye.xml" is used to detect open eyes and "haarcascade_lefteye_2splits" is used to detect an eye (closed or open).
I wanted to compare the open eyes and eyes in general in the video but it makes false recognition of closed eyes. Are there other\more ways to improve it?
Here is my code:
import numpy as np
import cv2
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
eye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml')
lefteye_cascade = cv2.CascadeClassifier('haarcascade_lefteye_2splits.xml')
cap = cv2.VideoCapture(0)
while True:
ret, img = cap.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x, y, w, h) in faces:
cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)
# regions of interest
roi_gray = gray[y:y + h, (x+w)/2:x + w]
roi_color = img[y:y + h, (x+w)/2:x + w]
eye = 0
openEye = 0
counter = 0
openEyes = eye_cascade.detectMultiScale(roi_gray)
AllEyes = lefteye_cascade.detectMultiScale(roi_gray)
for (ex, ey, ew, eh) in openEyes:
openEye += 1
cv2.rectangle(roi_color, (ex, ey), (ex + ew, ey + eh), (0, 255, 0),2)
for (ex, ey, ew, eh) in AllEyes:
eye += 1
cv2.rectangle(roi_color, (ex, ey), (ex + ew, ey + eh), (0, 0, 40),2)
if (openEye != eye):
print ('alert')
cv2.imshow('img', img)
k = cv2.waitKey(30) & 0xff
if k == 27:
break
cap.release()
cv2.destroyAllWindows()

eventually I used DLib library for recognizing facial landmarks :)

Check this out. it gives the eye status. change the threshold according to lightning condictions.
resource : https://www.pyimagesearch.com/2017/04/24/eye-blink-detection-opencv-python-dlib/
# import the necessary packages
from scipy.spatial import distance as dist
from imutils.video import FileVideoStream
from imutils.video import VideoStream
from imutils import face_utils
import numpy as np
import argparse
import imutils
import time
import dlib
import cv2
def eye_aspect_ratio(eye):
# compute the euclidean distances between the two sets of
# vertical eye landmarks (x, y)-coordinates
A = dist.euclidean(eye[1], eye[5])
B = dist.euclidean(eye[2], eye[4])
# compute the euclidean distance between the horizontal
# eye landmark (x, y)-coordinates
C = dist.euclidean(eye[0], eye[3])
# compute the eye aspect ratio
ear = (A + B) / (2.0 * C)
# return the eye aspect ratio
return ear
# frames the eye must be below the threshold
EYE_AR_THRESH = 0.35
EYE_AR_CONSEC_FRAMES = 3
# initialize the frame counters and the total number of blinks
COUNTER = 0
TOTAL = 0
# initialize dlib's face detector (HOG-based) and then create
# the facial landmark predictor
print("[INFO] loading facial landmark predictor...")
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")
# grab the indexes of the facial landmarks for the left and
# right eye, respectively
(lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
(rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]
vs = VideoStream(src=0).start()
# vs = VideoStream(usePiCamera=True).start()
time.sleep(1.0)
# loop over frames from the video stream
while True:
# if this is a file video stream, then we need to check if
# there any more frames left in the buffer to process
# grab the frame from the threaded video file stream, resize
# it, and convert it to grayscale
# channels)
frame = vs.read()
frame = imutils.resize(frame, width=450)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# detect faces in the grayscale frame
rects = detector(gray, 0)
# loop over the face detections
for rect in rects:
# determine the facial landmarks for the face region, then
# convert the facial landmark (x, y)-coordinates to a NumPy
# array
shape = predictor(gray, rect)
shape = face_utils.shape_to_np(shape)
# extract the left and right eye coordinates, then use the
# coordinates to compute the eye aspect ratio for both eyes
leftEye = shape[lStart:lEnd]
rightEye = shape[rStart:rEnd]
leftEAR = eye_aspect_ratio(leftEye)
rightEAR = eye_aspect_ratio(rightEye)
# average the eye aspect ratio together for both eyes
ear = (leftEAR + rightEAR)
# compute the convex hull for the left and right eye, then
# visualize each of the eyes
leftEyeHull = cv2.convexHull(leftEye)
rightEyeHull = cv2.convexHull(rightEye)
cv2.drawContours(frame, [leftEyeHull], -1, (0, 255, 0), 1)
cv2.drawContours(frame, [rightEyeHull], -1, (0, 255, 0), 1)
# check to see if the eye aspect ratio is below the blink
# threshold, and if so, increment the blink frame counter
if ear < EYE_AR_THRESH:
cv2.putText(frame, "Eye: {}".format("close"), (10, 30),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
cv2.putText(frame, "EAR: {:.2f}".format(ear), (300, 30),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
# otherwise, the eye aspect ratio is not below the blink
# threshold
else:
cv2.putText(frame, "Eye: {}".format("Open"), (10, 30),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
cv2.putText(frame, "EAR: {:.2f}".format(ear), (300, 30),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
# draw the total number of blinks on the frame along with
# the computed eye aspect ratio for the frame
# show the frame
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
# if the `q` key was pressed, break from the loop
if key == ord("q"):
break
# do a bit of cleanup
cv2.destroyAllWindows()
vs.stop()

# Import the necessary packages
import datetime as dt
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from EAR_calculator import *
from imutils import face_utils
from imutils.video import VideoStream
import matplotlib.pyplot as plt
import matplotlib.animation as animate
from matplotlib import style
import imutils
import dlib
import time
import cv2
from playsound import playsound
from scipy.spatial import distance as dist
import os
import csv
import numpy as np
import pandas as pd
from datetime import datetime
# Declare a constant which will work as the threshold for EAR value, below which it will be regared as a blink
EAR_THRESHOLD = 0.2
# Declare another costant to hold the consecutive number of frames to consider for a blink
CONSECUTIVE_FRAMES = 20
# Another constant which will work as a threshold for MAR value
MAR_THRESHOLD = 14
# Now, intialize the dlib's face detector model as 'detector' and the landmark predictor model as 'predictor'
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")
# Grab the indexes of the facial landamarks for the left and right eye respectively
(lstart, lend) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
(rstart, rend) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]
(mstart, mend) = face_utils.FACIAL_LANDMARKS_IDXS["mouth"]
image = cv2.imread("images/raja_sleepy.jpg")
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Detect faces
rects = detector(image, 1)
if len(rects)==1:
for (i, rect) in enumerate(rects):
shape = predictor(gray, rect)
# Convert it to a (68, 2) size numpy array
shape = face_utils.shape_to_np(shape)
# Draw a rectangle over the detected face
(x, y, w, h) = face_utils.rect_to_bb(rect)
cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)
# Put a number
leftEye = shape[lstart:lend]
rightEye = shape[rstart:rend]
mouth = shape[mstart:mend]
# Compute the EAR for both the eyes
leftEAR = eye_aspect_ratio(leftEye)
rightEAR = eye_aspect_ratio(rightEye)
# Take the average of both the EAR
EAR = (leftEAR + rightEAR) / 2.0
#live datawrite in csv
# Compute the convex hull for both the eyes and then visualize it
leftEyeHull = cv2.convexHull(leftEye)
rightEyeHull = cv2.convexHull(rightEye)
# Draw the contours
cv2.drawContours(image, [leftEyeHull], -1, (0, 255, 0), 1)
cv2.drawContours(image, [rightEyeHull], -1, (0, 255, 0), 1)
cv2.drawContours(image, [mouth], -1, (0, 255, 0), 1)
MAR = mouth_aspect_ratio(mouth)
# Check if EAR < EAR_THRESHOLD, if so then it indicates that a blink is taking place
# Thus, count the number of frames for which the eye remains closed
if EAR < EAR_THRESHOLD:
cv2.drawContours(image, [leftEyeHull], -1, (0, 0, 255), 1)
cv2.drawContours(image, [rightEyeHull], -1, (0, 0, 255), 1)
cv2.putText(image, "Sleepy", (270, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
else:
FRAME_COUNT = 0
# Check if the person is yawning
if MAR > MAR_THRESHOLD:
cv2.drawContours(image, [mouth], -1, (0, 0, 255), 1)
cv2.putText(image, "Yawn ", (270, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
cv2.imshow("Output", image)
cv2.waitKey(0)
elif len(rects)==0:
print("Face not available")
else:
print("Multiple face detected")

Related

Shapes(rectangle,diamond) detection using OpenCV library

I have two connected rectangles with a diamond in between them. In Entity Relationship Diagram (ERD) we have the same scenario. My aim is to detect both the rectangles and the diamond which is in between them.
import cv2
import numpy as np
from matplotlib import pyplot as plt
# reading image
img = cv2.imread('pic4.jpeg')
# converting image into grayscale image
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# setting threshold of gray image
_, threshold = cv2.threshold(gray, 127, 255, cv2.THRESH_BINARY)
# using a findContours() function
contours, _ = cv2.findContours(
threshold, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
i = 0
# list for storing names of shapes
for contour in contours:
# here we are ignoring first counter because
# findcontour function detects whole image as shape
if i == 0:
i = 1
continue
# cv2.approxPloyDP() function to approximate the shape
approx = cv2.approxPolyDP(
contour, 0.01 * cv2.arcLength(contour, True), True)
# using drawContours() function
cv2.drawContours(img, [contour], 0, (0, 0, 0), 5)
# finding center point of shape
M = cv2.moments(contour)
if M['m00'] != 0.0:
x = int(M['m10']/M['m00'])
y = int(M['m01']/M['m00'])
# putting shape name at center of each shape
if len(approx) == 3:
cv2.putText(img, 'Triangle', (x, y),
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 0, 0), 2)
elif len(approx) == 4:
cv2.putText(img, 'Rectangle', (x, y),
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 0, 0), 2)
elif len(approx) == 5:
cv2.putText(img, 'Pentagon', (x, y),
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 0, 0), 2)
elif len(approx) == 6:
cv2.putText(img, 'Hexagon', (x, y),
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 0, 0), 2)
else:
cv2.putText(img, 'circle', (x, y),
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 0, 0), 2)
# displaying the image after drawing contours
cv2.imshow('pic4', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
Input image
Output image after shapes detection
The output image is not detecting correct shape. It must have to write a rectangle once in both rectangle shapes.

How to find the length of the contour selected with area in python?

I am new to OpenCV. I have two questions to ask.
I am trying to print the no of contours available after applying the area. I am getting the correct output in imshow but not in the print statement. I understood that print(len(contours)) gives the total number of contours but I need the no of contours in the given area > 400. You can check the below python code for more details. Please help me with this.
Is it possible to change the threshold value above 255, whenever I change it to above 255, I am getting the black image even if I increase the max value?
Thank you!
import cv2 as cv
import numpy as np
im_color = cv.imread("D:\python_project\Focus_detection_1/_00005_cells.png", cv.IMREAD_COLOR)
im_gray = cv.cvtColor(im_color, cv.COLOR_BGR2GRAY)
_, thresh = cv.threshold(im_gray, thresh=254, maxval=255, type=cv.THRESH_BINARY)
mask = cv.cvtColor(thresh, cv.COLOR_GRAY2BGR)
im_thresh_color = cv.bitwise_and(im_color,mask)
contours, hierarchy = cv.findContours(thresh, cv.RETR_TREE,cv.CHAIN_APPROX_SIMPLE)
cv.drawContours(im_thresh_color,contours,-1,(0,0,255),2)
for c in contours:
area = cv.contourArea(c)
if area > 400:
x,y,w,h = cv.boundingRect(c)
im = cv.rectangle(im_thresh_color, (x,y), (x+w, y+h), (255,0,0), 2)
cv.drawContours(im_thresh_color,contours, -1,(0,0,255),2) #-1 is to draw all the contour, 0 is the 1st contour and so on
text = cv.putText(im, 'Focused', (x, y), cv.FONT_HERSHEY_SIMPLEX, 0.5, (36, 255, 12), 2)
no_of_images = len(contours)
print("images:", no_of_images)
while True:
cv.imshow("original image", im_color)
cv.imshow("Thresh color with contour", im_thresh_color)
#print("n:",len(im_thresh_color))
if cv.waitKey(1) == ord("n"):
break
cv.destroyAllWindows()
Here is the updated code for your first question: This code counts how many contours are in the area and then prints the number of contours in that area
import cv2 as cv
import numpy as np
im_color = cv.imread("D:\python_project\Focus_detection_1/_00005_cells.png", cv.IMREAD_COLOR)
im_gray = cv.cvtColor(im_color, cv.COLOR_BGR2GRAY)
_, thresh = cv.threshold(im_gray, thresh=254, maxval=255, type=cv.THRESH_BINARY)
mask = cv.cvtColor(thresh, cv.COLOR_GRAY2BGR)
im_thresh_color = cv.bitwise_and(im_color, mask)
contours, hierarchy = cv.findContours(thresh, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)
cv.drawContours(im_thresh_color, contours, -1, (0, 0, 255), 2)
counter = 0
for c in contours:
area = cv.contourArea(c)
if area > 400:
x, y, w, h = cv.boundingRect(c)
im = cv.rectangle(im_thresh_color, (x, y), (x+w, y+h), (255, 0, 0), 2)
# -1 is to draw all the contour, 0 is the 1st contour and so on
cv.drawContours(im_thresh_color, contours, -1, (0, 0, 255), 2)
text = cv.putText(im, 'Focused', (x, y), cv.FONT_HERSHEY_SIMPLEX, 0.5, (36, 255, 12), 2)
counter += 1
print("images:", counter)
while True:
cv.imshow("original image", im_color)
cv.imshow("Thresh color with contour", im_thresh_color)
# print("n:",len(im_thresh_color))
if cv.waitKey(1) == ord("n"):
break
cv.destroyAllWindows()
Answer question 2: no it is not possible to change it over a value of 255 bc the range of the r,g,b values is from 0 to 255.

Opencv (opencv-contrib) is changing the color of image after using object detection module (cv2.dnn.readNet)

I trained a YoloV4 model and used opencv-contrib because openCv does not support YoloV4 yet. You can use it with yolov3. Thee are 2 problems with the code:
When the final image is seen using cv2.imshow, it shoes the traingle as Yellow instead of blue. I need to extract that triangle and pass to some other Network so I can not use the yellow image.
It is giving results only and only when we use scale=1/255. else, it provides bad results. Why is that?
I want to ask why is it changing the colours and how can I prevent it? I know that it corresponds to BGR format of opencv but how can it be resolved.
import requests
import numpy as np
from PIL import Image
from io import BytesIO
import cv2
CONFIDENCE_THRESHOLD = 0.5
NMS_THRESHOLD = 0.5
COLORS = [(0, 255, 255), (255, 255, 0), (0, 255, 0), (255, 0, 0)]
net = cv2.dnn.readNet("./yolov4-obj_best_1_class.weights", "./yolov4-custom_1_class.cfg")
model = cv2.dnn_DetectionModel(net)
model.setInputParams(size=(416, 416),scale=1/255.)
url = 'https://instasolv1.s3.ap-south-1.amazonaws.com/QuestionBank/5e9ad4b1e1c473f2bce0e4ff/crop_image.png' # Check original image
response = requests.get(url)
img = Image.open(BytesIO(response.content))
classes, scores, boxes = model.detect(img_array, CONFIDENCE_THRESHOLD, NMS_THRESHOLD)
box = boxes[0]
(x, y) = (box[0], box[1])
(w, h) = (box[2], box[3])
cv2.rectangle(img_array, (x, y), (x + w, y + h), (0,255,0), 2)
text = "Text"
cv2.putText(img_array, text, (x, y - 5), cv2.FONT_HERSHEY_SIMPLEX,
0.5, (255,0,255), 2)
cv2.imshow("Show",img_array) # given result has yellow triangle
cv2.waitKey()
cv2.destroyAllWindows()
Run the following code:
import requests
import numpy as np
from PIL import Image
from io import BytesIO
import cv2
CONFIDENCE_THRESHOLD = 0.5
NMS_THRESHOLD = 0.5
COLORS = [(0, 255, 255), (255, 255, 0), (0, 255, 0), (255, 0, 0)]
#net = cv2.dnn.readNet("./yolov4-obj_best_1_class.weights", "./yolov4-custom_1_class.cfg")
#model = cv2.dnn_DetectionModel(net)
#model.setInputParams(size=(416, 416),scale=1/255.)
#url = 'https://instasolv1.s3.ap-south-1.amazonaws.com/QuestionBank/5e9ad4b1e1c473f2bce0e4ff/crop_image.png' # Check original image
#response = requests.get(url)
#img = Image.open(BytesIO(response.content))
img=Image.open('RGBY_example.png')
#classes, scores, boxes = model.detect(img_array, CONFIDENCE_THRESHOLD, NMS_THRESHOLD)
img_array_original=np.array(img)
img_array=img_array_original
#img_array=cv2.cvtColor(img_array_original,cv2.COLOR_BGR2RGB)
#box = boxes[0]
box=[100,100,50,60]
(x, y) = (box[0], box[1])
(w, h) = (box[2], box[3])
cv2.rectangle(img_array, (x, y), (x + w, y + h), (0,255,0), 2)
text = "Text"
cv2.putText(img_array, text, (x, y - 5), cv2.FONT_HERSHEY_SIMPLEX,
0.5, (255,0,255), 2)
cv2.imshow("Show",img_array) # given result has yellow triangle
cv2.waitKey()
cv2.destroyAllWindows()
...and you get erratic colors:
...as a result.
To solve that, remove # from line 26:
...and you will see the correct colors. This way you see the idea, and can handle whatever color-spaces with your images in future.
Answer for question n:o 2 "It is giving results only and only when we use scale=1/255. else, it provides bad results. Why is that?"
The required input scale for the inputs of the DNN is from 0 to 1. In RGB-images each pixel color value is between 0 and 255. Thus, to preprocess the image for the DNN each pixel value is multiplied by 1/225.

Detect person wearing red in a video

I am working on a video with many people where few of them are wearing red colored t-shirt. I have all the persons detected and tracked with person detection and tracking models. How can I distinguish the persons wearing red from the others.
I am reading the frames in OpenCV format. If I know the coordinates, suppose x,y is a coordinate of the body where the color is red. How can I get the color information from the coordinate in OpenCV format and check whether that comes under the red color range?
I only need to highlight the bounding box of the persons wearing red from others.
Can someone help me in figuring out a solution.
Thank you!
The better way to change the colour space into HSV and find the Hue value range for colour.
Take each frame of the video
Detect humans first then extract the human region (source)
Convert from BGR to HSV color-space
Threshold the HSV image for a range of red colour
Identifying red colour t-shirt guys in Video
We can identify the human region in images using the following code
import time
import cv2
import imutils
import numpy as np
from imutils.video import FPS
# import the necessary packages
from imutils.video import VideoStream
def get_centered_contours(mask):
# find contours
cntrs = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cntrs = cntrs[0] if len(cntrs) == 2 else cntrs[1]
sorted_contours = sorted(cntrs, key=cv2.contourArea, reverse=True)
filterd_contours = []
if sorted_contours != []:
for k in range(len(sorted_contours)):
if cv2.contourArea(sorted_contours[k]) < 1000.0:
filterd_contours = sorted_contours[0:k]
return filterd_contours
return filterd_contours
def check_red_colour_person(roi):
hsv = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
# define range of blue color in HSV
lower_red = np.array([0, 50, 50])
upper_red = np.array([10, 255, 255])
# Threshold the HSV image to get only blue colors
mask = cv2.inRange(hsv, lower_red, upper_red)
cnts = get_centered_contours(mask)
if cnts != []:
return True
else:
return False
# construct the argument parse and parse the arguments
prototxt = 'MobileNetSSD_deploy.prototxt.txt'
model = 'MobileNetSSD_deploy.caffemodel'
confidence_level = 0.8
# initialize the list of class labels MobileNet SSD was trained to
# detect, then generate a set of bounding box colors for each class
CLASSES = ["person"]
COLORS = np.random.uniform(0, 255, size=(len(CLASSES), 3))
# load our serialized model from disk
print("[INFO] loading model...")
net = cv2.dnn.readNetFromCaffe(prototxt, model)
# initialize the video stream, allow the cammera sensor to warmup,
# and initialize the FPS counter
print("[INFO] starting video stream...")
vs = VideoStream(src=0).start()
time.sleep(2.0)
fps = FPS().start()
# loop over the frames from the video stream
while True:
try:
# grab the frame from the threaded video stream and resize it
# to have a maximum width of 400 pixels
frame = vs.read()
frame = imutils.resize(frame, width=400)
# grab the frame dimensions and convert it to a blob
(h, w) = frame.shape[:2]
blob = cv2.dnn.blobFromImage(cv2.resize(frame, (300, 300)),
0.007843, (300, 300), 127.5)
# pass the blob through the network and obtain the detections and
# predictions
net.setInput(blob)
detections = net.forward()
# loop over the detections
for i in np.arange(0, detections.shape[2]):
# extract the confidence (i.e., probability) associated with
# the prediction
confidence = detections[0, 0, i, 2]
# filter out weak detections by ensuring the `confidence` is
# greater than the minimum confidence
if confidence > confidence_level:
# extract the index of the class label from the
# `detections`, then compute the (x, y)-coordinates of
# the bounding box for the object
idx = int(detections[0, 0, i, 1])
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
roi = frame[startY:endY, startX:endX]
# cv2.imwrite('roi_{}_{}_{}_{}.png'.format(startX,startY,endX,endY),roi)
if check_red_colour_person(roi):
label = "{}: {:.2f}%".format(' Red T-shirt person',
confidence * 100)
cv2.imwrite(
'Red-T-shirt_guy_{}_{}_{}_{}.png'.format(startX, startY, endX,
endY), roi)
cv2.rectangle(frame, (startX, startY), (endX, endY),
(0, 0, 255), 2)
else:
cv2.rectangle(frame, (startX, startY), (endX, endY),
(255, 0, 0), 2)
y = startY - 15 if startY - 15 > 15 else startY + 15
cv2.putText(frame, label, (startX, y),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 2)
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
# if the `q` key was pressed, break from the loop
if key == ord("q"):
break
# update the FPS counter
fps.update()
except Exception as e:
print("Exception is occured")
continue
# stop the timer and display FPS information
fps.stop()
print("[INFO] elapsed time: {:.2f}".format(fps.elapsed()))
print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
# do a bit of cleanup
cv2.destroyAllWindows()
vs.stop()
You can set the color boundaries
boundaries = [
([17, 15, 100], [50, 56, 200])]
So here tuple ([17, 15, 100], [50, 56, 200]) .
Here, we are saying that all pixels in our image that have a R >= 100, B >= 15, and G >= 17 along with R <= 200, B <= 56, and G <= 50 will be considered red.
You can implement like follow:
for (lower, upper) in boundaries:
lower = np.array(lower, dtype = "uint8")
upper = np.array(upper, dtype = "uint8")
# find the colors within the specified boundaries and apply
# the mask
mask = cv2.inRange(image, lower, upper)
output = cv2.bitwise_and(image, image, mask = mask)
# show the images
cv2.imshow("images", np.hstack([image, output]))

How can I detect circle by using openCV in python?

I want to detect circle in a picture by using haar cascade. I created an cascade xml.
import cv2
import numpy as np
img = cv2.imread("C://OpenCVcascade//resimler//coins.jpg")
circles_cascade = cv2.CascadeClassifier("C://Cascade//dairetanima.xml")
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
circles = circles_cascade.detectMultiScale(gray, 1.1, 1)
if circles is not None:
circles = np.uint16(np.around(circles))
for (x, y, w, h) in circles:
center = (x + w // 2, y + h // 2)
radius = (w + h) // 4
cv2.circle(img, center, radius, (255, 0, 0), 2)
cv2.imshow('image', img)
cv2.waitKey()
cv2.destroyAllWindows()
My result:
I already know, there are different method to detect circle. But I am trying to do with cascade method. Because after this part, I will use it for real time detection.

Categories

Resources