Detectmultiscale() returns an empty tuple sometimes - python

I am trying to use the haar-cascade in OpenCV 4.0 to detect faces for emotion, gender & age estimation. sometimes the detectmultiscale() function returns an empty tuple which raises an error in the later parts of recognition.
I tried creating a while loop until the face is detected, but it seems once the face is not detected it is not being detected again(in the same captured frame), I get empty tuples returned. the weird thing is that sometimes the program works flawlessly.
the detection model is being loaded correctly, since cv2.CascadeClassifier.empty(face_cascade) returns False.
there seems to be no problem with the captured frame since I can display it properly.
after searching I found that detectmultiscale() does, in fact, return an empty tuple when no faces are detected.
Python OpenCV face detection code sometimes raises `'tuple' object has no attribute 'shape'`
face_cascade = cv2.CascadeClassifier(
'C:\\Users\\kj\\Desktop\\jeffery 1\\trained_models\\detection_models\\haarcascade_frontalface_alt.xml')
retval = cv2.CascadeClassifier.empty(face_cascade)
print(retval)
returns False
def video_cap(out_queue):
video_capture = cv2.VideoCapture(0, cv2.CAP_DSHOW)
#video_capture.set(3, 768)
#video_capture.set(4, 1024)
while True:
ret, bgr_image = video_capture.read()
cv2.imshow('frame',bgr_image)
cv2.waitKey(1000)
cv2.destroyAllWindows()
if video_capture.isOpened() == False :
video_capture.open(0)
if(ret):
gray_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2GRAY)
rgb_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2RGB)
faces = detect_faces(face_detection, gray_image)
ret_list = [gray_image, rgb_image, faces]
print("DEBUG: VIDEO_CAPTURE MODULE WORKING")
out_queue.put(ret_list)
return
video_cap function is threaded
def detect_faces(detection_model, gray_image_array):
faces1 = detection_model.detectMultiScale(gray_image_array, scaleFactor= 2, minNeighbors=10,minSize=(64,64))
while(len(faces1)== 0 ):
faces1 = detection_model.detectMultiScale(gray_image_array, scaleFactor=2, minNeighbors=10, minSize=(64, 64))
print(faces1)
if(len(faces1)!=0):
break
return faces1
I get the output:
()
()
()
()....
goes on until I terminate.
how do I fix the problem?

This is a snippet of the code I used. I removed the ARGUMENTS in the detectMultiScale() function and it ran fine.
Also, make sure you have the correct path to the xml files.
classifier = cv2.CascadeClassifier("../../../l-admin/anaconda3/lib/python3.6/site-packages/cv2/data/haarcascade_frontalface_default.xml")
img = cv2.imread('../Tolulope/Adetula Tolulope (2).jpg')
face = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = classifier.detectMultiScale(face)
print(type(faces), faces)
for (x, y, w, h) in faces:
img = cv2.imwrite("facesa.png", cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 3))
On a Secondary note, the reason my own did work might be because my camera did locate my face due to the lightning. So I suggest you try it out with a picture first before using the video.

I have a similar issue when I use jpg format but the main problem is always in the format of the image as when i used png it automatically give the tuple with correct values.
classifier = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
# reading the image
img = cv2.imread('i.png')
# showing the image
#cv2.imshow('shaswat face detection ',img)
# making image to gray scale as black and white
grayscaled_img = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
# cv2.imshow('shaswat face detection ',grayscaled_img)
# detecting the image
# return top left and bottom right points
faces = classifier.detectMultiScale(grayscaled_img)
print(faces)
#cv2.rectangle(img , face_coordinates[0] , face_coordinates[1] , (255,0,0) , 10)
the output shows
[[ 87 114 361 361]]

Related

Opencv Python3 when attempting to switch from saved images to live video feed the program hangs

Hello there people of the internet,
The code in question is using python 3.8.5 and opencv 4 (I do not know how to check the exact version but I know its opencv 4). My team and I are attempting to take a live video feed from a usb webcam and determine the distance between the camera and the object in the video feed. We had some success in reading the distance with image stills taken from the same camera and read via the imutils library. But now we want to attempt to calculate that data live.
Our code is below.
from imutils import paths
import numpy as np
import imutils
import cv2
import time
import os
def find_marker(image):
#conver the image into grayscales, blurs it then detects edges
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (5, 5), 0)
edged = cv2.Canny(gray, 35, 125)
#find the contours in the edged image and keep the largest one;
#w'll assume that this our piece of paper in the image
cnts = cv2.findContours(edged.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
c = max(cnts, key = cv2.contourArea)
#compute the bounding box of the paper region and return it
return cv2.minAreaRect(c)
def distance_to_camera(knownWidth, focalLength, perWidth):
#compute and return the distance from the marker to the camera
return (knownWidth * focalLength) / perWidth
#intialize the known distances from the camera to the object
KNOWN_DISTANCE = 22
#initialize the known object width, which in this case the piece of paper is 12 inches
KNOWN_WIDTH = 11
#load the first image that contains an object that is known to be 2 feet
#from our camera, the find the paper marker in the image and
#initialize the focal length
rootimage = cv2.imread("/Volumes/404/final_rov_code/Python/images/2ft.jpg")
marker1 = find_marker(rootimage)
marker2 = marker1[0][1] - marker1[1][1]
focalLength = (marker2 * KNOWN_DISTANCE) / KNOWN_WIDTH
print(marker1)
print(marker2)
image = cv2.VideoCapture(0)
#Loop over the image
while True:
#load the image, find the marker in the image then compute the
#distance to the marker from the camera
frame, ret = image.read()
marker = find_marker(ret)
inches = distance_to_camera(KNOWN_WIDTH, focalLength, marker[1][0])
print(inches)
#draw a bounding box around the image and display it
box = cv2.cv.BoxPoints(marker) if imutils.is_cv2() else cv2.boxPoints(marker)
box = np.int0(box)
cv2.drawContours(frame, [box], -1, (0, 255, 0), 2)
cv2.putText(ret, "%.2fin" % inches,
(ret.shape[1] - 200, ret.shape[0] - 20), cv2.FONT_HERSHEY_SIMPLEX,
2.0, (0, 255, 0), 3)
cv2.imshow("image", ret)
# if cv2.waitKey(33) == ord('q'):
# os.system('pause')
I understand that it should be as minimalistic as possible but since we have no idea what could be causing the program to hang upon reading the first frame of the video feed. Could it be the fact that the processing is taking too many resources from the single thread? (We're all newbies to the advanced sides of opencv and python 3)
There is no other errors that we are aware of at the moment so no leads in the terminal of where it could be coming from.
Thank you in advance.
Your problem is likely a result of not including the waitkey() statement at the end of your while loop. It takes time for openCV to load the image, so if the program doesn't pause for long enough for the image to be drawn, the display just doesn't update. Check out this other StackOverflow question for more details.
In addition, you have your ret and frame variables mixed up. ret should be the first one and frame should be the second. Right now, the drawContours() method isn't going to do anything because you're passing it a boolean instead of an image.
Making those changes fixed this for me using Python 3.9 and OpenCV 4.5.

Cut and save an object recognized by color

So i would like to make a program which can detect an object by color, position and sharpness.
Now I am there that I could detect the object by color and draw its contour and bounding box.
My problem is that i really dont know how to cut out the object from the picture and save it as picture file when the program recognise its contour or bounding box.
here's a picture of what my camera is seeing
input
output
I would like to cut out what is inside of the green colored boundig box as many times as fps in the video and as long as you can see it in the video. So if the video is 30 fps and the object is visible for 10 seconds it needs to take 300 pictures.
Here is the code:
i know it looks bad, im just trying to figure out what to use to make it work
import cv2 as cv
import numpy as np
import os
import uuid
cap = cv.VideoCapture(1)
font = cv.FONT_HERSHEY_COMPLEX
path = os.getcwd()
print(path)
def createFolder(directory):
try:
if not os.path.exists(directory):
os.makedirs(directory)
except OSError:
print('Error: Creating directory. ' + directory)
createFolder("./data")
# folderName = '%s' % (str(uuid.uuid4()))
while cap.isOpened():
_, frame = cap.read()
hsv = cv.cvtColor(frame, cv.COLOR_BGR2HSV)
# blue is the chosen one for now
lower_color = np.array([82, 33, 39])
upper_color = np.array([135, 206, 194])
mask = cv.inRange(hsv, lower_color, upper_color)
kernel = np.ones((5, 5), np.uint8)
mask = cv.erode(mask, kernel)
contours, hierarchy = cv.findContours(mask, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)
# find contour
for contour in contours:
area = cv.contourArea(contour)
x, y, h, w = cv.boundingRect(contour)
if area > 100:
# bounding box
# cv.rectangle(frame, (x - 40, y - 30), (x + h * 3, y + w * 3), (0, 255, 0), 1)
# cutting and saving
ext_left = tuple(contour[contour[:, :, 0].argmin()][0] - 20)
ext_right = tuple(contour[contour[:, :, 0].argmax()][0] + 20)
ext_top = tuple(contour[contour[:, :, 1].argmin()][0] - 20)
ext_bot = tuple(contour[contour[:, :, 1].argmax()][0] + 20)
outfile = '%s.jpg' % (str(uuid.uuid4()))
cropped_image = frame[ext_top[1]:ext_bot[1], ext_left[0]:ext_right[0]]
# write images to a specified folder
cv.imwrite(os.path.join(path, "/data/", outfile), cropped_image)
# outputs
cv.imshow("Frame", frame)
cv.imshow("Mask", mask)
key = cv.waitKey(1)
if key == 27:
break
cap.release()
cv.destroyAllWindows()
Focusing on the question and ignoring the code style, I can say you are close to achieving your goal :)
For cropping the object, you can use the Mat copyTo method. Here is the official OpenCV documentation and here is an example from the OpenCV forums.
Now, for creating the mask from the contours, you can use the same drawCountours method you already use, but provide a negative value for the thickness parameters (for example, thickness=CV_FILLED). You can see a code snippet in this stackoverflow post and check details in the official documentation.
For saving the image to disk you can use imwrite.
So, in a nutshell, draw filled contours to a mask and use that mask to copy only the object pixels from the video frame to another mat that you can save the disk.
Instead of posting code, I will share this very similar question with an accepted answer that may have the code snippet you are looking for.

If a car is recognized take a picture

Run this code for Python and OpenCv. What i want to do, is to save in my dataset/test all the images of all the cars that the tool is detecting.
Run my code with
python3 car_detection y0d$ python3 build_car_dataset.py -c cars.xml -o dataset/test
So when i detect the face and put the rectangle on the face, i created an if function that is saying that if the face is recognized and has the rectangle on the image, then please save the pic of that face to my desired output
if rects:
p = os.path.sep.join([args["output"], "{}.png".format(str(total).zfill(5))])
cv2.imwrite(p, orig)
total += 1
So the error i got is: ValueError: The truth value of an array with more than one element is ambiguous. Use a.any() or a.all() What should i do? Thank you in advance!
My full code is:
# USAGE
# python3 build_car_dataset.py --cascade haarcascade_frontalface_default.xml --output dataset/test
# python3 build_face_dataset.py -c haarcascade_licence_plate_rus_16stages_original.xml -o dataset/test
#python3 build_face_dataset.py -c haarcascade_licence_plate_rus_16stages_original.xml -o dataset/test
#python3 build_car_dataset.py -c cars.xml -o dataset/test
from imutils.video import VideoStream
import argparse, imutils, time, cv2, os
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-c", "--cascade", required=True,
help = "path to where the face cascade resides")
ap.add_argument("-o", "--output", required=True,
help="path to output directory")
args = vars(ap.parse_args())
# load OpenCV's Haar cascade for face detection from disk
detector = cv2.CascadeClassifier(args["cascade"])
# initialize the video stream, allow the camera sensor to warm up and initialize the total number of example faces written to disk thus far
print("[INFO] starting video stream...")
vs = VideoStream(src=0).start()
# vs = VideoStream(usePiCamera=True).start()
time.sleep(2.0)
total = 0
# loop over the frames from the video stream
while True:
# grab the frame from the threaded video stream, clone it, (just in case we want to write it to disk), and then resize the frame
# so we can apply face detection faster
frame = vs.read()
orig = frame.copy()
frame = imutils.resize(frame, width=400)
# detect faces in the grayscale frame
rects = detector.detectMultiScale(
cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY), scaleFactor=1.1,
minNeighbors=5, minSize=(30, 30))
# loop over the face detections and draw them on the frame
for (x, y, w, h) in rects:
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
if rects:
p = os.path.sep.join([args["output"], "{}.png".format(str(total).zfill(5))])
cv2.imwrite(p, orig)
total += 1
# show the output frame
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
# if the `q` key was pressed, break from the loop
if key == ord("q"):
break
# do a bit of cleanup
print("[INFO] {} face images stored".format(total))
print("[INFO] cleaning up...")
cv2.destroyAllWindows()
vs.stop()
Replace:
if rects:
with:
if rects is not None :
or with:
if rects != None :
and you'll be golden =)
I mean, you still won't be able to detect cars, but at least the error will go away. For the car detection I'd recommend to use CNN (convolution neural networks), google for "YOLO CNN" or "SSD CNN" -- there are plenty of already existing projects that detect cars, you may easily give yourself a good head start.
Let's say rects = [[1, 2, 3, 4], [3,4, 5, 6]]
for (x, y, w, h) in rects:
print("I got here:", x, y, w, h)
would print:
I got here: 1 2 3 4
I got here: 3 4 5 6
But if rects = None, you'd get the error, 'NoneType' object is not iterable
If rects = [] you get no output and nothing inside the loop runs.
Basically what I'm saying is that because your if rects code is inside a loop that is looping through rects, you are already guaranteed that rects has info in it since your code needed rects to be a non-empty iterable to get that far.
What you probably really want to do is check if rects prior to looping over it. To be pythonic, we'll ask forgiveness rather than permission:
rects = None
try:
for (x, y, w, h) in rects:
print("I got here:", x, y, w, h)
except TypeError:
print("no rects")
# no rects
Note that your error has little to do with the majority of your code. Be sure to try to reduce your problem to the smallest possible reproducible example that has the same issue. Often by doing so, it helps solve the issue.

HOG Descriptor using Python + OpenCV

I am trying to implement HOG Descriptor with OpenCV to detect Pedestrians in a video. I am currently using the pre-made dataset by OpenCV hogcascade_pedestrians.xml. Unfortuntley the documentation on this part is very poor on the internet although the HOG Descriptor is very effective for human detection. I have been writing a code for pedestrians detection with Python, and I have stopped at the following code:
import cv2
import numpy as np
import imutils
VidCap = cv2.VideoCapture('pedestrians.mp4')
HOGCascade = cv2.HOGDescriptor('hogcascade_pedestrians.xml')
HOGCascade.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())
while True:
_ , image = VidCap.read()
image = imutils.resize(image, width=700)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
clahe = cv2.createCLAHE(clipLimit=15.0,tileGridSize=(8,8))
gray = clahe.apply(gray)
winStride = (8,8)
padding = (16,16)
scale = 1.05
meanshift = -1
(rects, weights) = HOGCascade.detectMultiScale(gray, winStride=winStride,
padding=padding,
scale=scale,
useMeanshiftGrouping=meanshift)
for (x, y, w, h) in rects:
cv2.rectangle(image, (x, y), (x+w, y+h), (0,200,255), 2)
cv2.imshow('Image', image)
if cv2.waitKey(5) == 27:
break
VidCap.release()
cv2.destroyAllWindows()
I presume that the code scripting would be something like codes written for Haar Cascades. But I have tried that and I got errors. Do anyone have any idea of how to implement the HOG Descriptor on OpenCV with Python.
I have read the following question, but I get nothing from the second answer.
My problem is that I can't find the way to write the code, as the documentation about this part is very poor.
Note: I am using OpenCV 3.1.0-dev with Python 2.7.11
HOGCascade = cv2.HOGDescriptor()
If you want to use this .xml, You have lots of preparation work to do.
When u finally get the available descriptor, you should replace the cv2.HOGDescriptor_getDefaultPeopleDetector() in
setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())

how to crop the detected face in opencv and save roi as image in opencv python

Im using opencv in python and this is my code in detecting the face and saving the face..but it does not save the roi(the face detected),i've been having trouble doing this.please help me how to fix this.
TRAINSET = "data/lbpcascades/lbpcascade_frontalface.xml"
DOWNSCALE = 4
cam = cv2.VideoCapture(0) #capture a video
cv2.namedWindow("preview")
classifier = cv2.CascadeClassifier(TRAINSET)
Compare_images=[]
for file in os.listdir("images"):
if file.endswith(".jpg"):
Compare_images.append(file)
while True: # try to get the first frame
_, frame = cam.read()
key = cv2.waitKey(20)
if(key==32):
print "Name of Image:"
n= raw_input()
value=len(Compare_images)
cv2.imwrite('images/image'+str(n)+'.jpg', frame)
saved_image=cv2.imread("images/image"+str(n)+".jpg")
minisize = (saved_image.shape[1]/DOWNSCALE,saved_image.shape[0]/DOWNSCALE)
miniframe = cv2.resize(saved_image, minisize)
faces = classifier.detectMultiScale(miniframe)
for f in faces:
x, y, w, h = [ v*DOWNSCALE for v in f ]
print x
print y,w,h
x0,y0=int(x),int(y)
x1,y1=int(x+w),int(y+h)
print x0,y0,y1,y0
image = cv2.rectangle(saved_image, (x0,y0), (x1,y1), (0,0,255),2)
roi=saved_image[y0:y1,x1:x0]#crop
cv2.imwrite('roi.jpg',roi)
cv2.imshow("adsa", saved_image)
cv2.putText(frame, "Press ESC to close.", (5, 25),
cv2.FONT_HERSHEY_SIMPLEX, 1.0, (255,255,255))
cv2.imshow("preview", frame)
Do you mean?:
.
.
.
print x0,y0,x1,y1
.
.
.
roi=saved_image[y0:y1,x0:x1]
The indentation above and below the while statement seems incorrect.
Triple quotes should only be used temporarily for block quotes as they can cause problems.
Maybe use # instead:
#x0,y0=x,y
#x1,y1=x+w,y+h
Unless that is how the help for that function is suppose to read.
Including errors in your question would be helpful too.

Categories

Resources