I have a face detection program.
I tried to run the code but it's not working.
import cv2
import numpy as np
recognizer = cv2.face.LBPHFaceRecognizer_create()
recognizer.read("trainer/trainer.yml")
cascadePath = "haarcascade_frontalface_default.xml"
faceCascade = cv2.CascadeClassifier(cascadePath);
cam = cv2.VideoCapture(0)
fontFace = cv2.FONT_HERSHEY_SIMPLEX
fontScale = 1
fontColor = (255, 255, 255)
while True:
ret, im =cam.read()
gray=cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)
faces=faceCascade.detectMultiScale(gray, 1.2,5)
for(x,y,w,h) in faces:
cv2.rectangle(im,(x,y),(x+w,y+h),(225,0,0),2)
Id, conf = recognizer.predict(gray[y:y+h,x:x+w])
if(conf<50):
if(Id==1):
Id="chandra"
elif(Id==2):
Id="vamsi"
else:
Id="Unknown"
cv2.putText(im,str(Id), (x,y+h),fontFace, 255)
cv2.imshow('im',im)
if (cv2.waitKey(10) == ord('q')):
break
cam.release()
cv2.destroyAllWindows()
I got this error:
(-215:Assertion failed) !empty() in function 'cv::CascadeClassifier::detectMultiScale'
I'm using opencv2 and python 3.7
Maybe this will help:
try:
ret, im =cam.read()
except:
continue
Please, use this code below to check if you have any images coming from your camera:
import numpy as np
import cv2
cap = cv2.VideoCapture(0)
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
# Our operations on the frame come here
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Display the resulting frame
cv2.imshow('frame',gray)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
Related
The code shows correct image, but show error message after image 'frame' playback. so I couldn't get 'res' image
It just shows me 'No Object Files' error message.
Which part should I fix to make it work?
import cv2
import numpy as np
cap = cv2.VideoCapture('ObjectTrack.mp4')
while cap.isOpened():
ret, frame = cap.read()
if not ret:
print("No Object Files")
break
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
lower_orange = np.array([100,200,200])
upper_orange = np.array([140,255,255])
mask_orange = cv2.inRange(hsv, lower_orange, upper_orange)
res = cv2.bitwise_and(frame,frame,mask = mask_orange)
cv2.imshow('frame',frame)
cv2.imshow('res',res)
if cv2.waitKey(50) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
The reason behind your error is that the frame is None(Null). and your code enters into this if
if not ret:
print("No Object Files")
break
and then gets out of the while loop ( while cap.isOpened(): ... ).
Just change the indentation and also the if condition
like this:
while cap.isOpened():
ret, frame = cap.read()
if ret: # if frame is not None:
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
lower_orange = np.array([100, 200, 200])
upper_orange = np.array([140, 255, 255])
mask_orange = cv2.inRange(hsv, lower_orange, upper_orange)
res = cv2.bitwise_and(frame, frame, mask=mask_orange)
cv2.imshow('frame', frame)
cv2.imshow('res', res)
if cv2.waitKey(50) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
The reason is further discussed here.
i Have to make a code that caputres screenshot when a face is detected in a video so that the image can be used for image recognition dataset
i made a program that captures all frames but i need to make it capture only when a face is detected
import cv2
cap = cv2.VideoCapture('test.mp4')
count = 0
while cap.isOpened():
ret,frame = cap.read()
cv2.imshow('window-name',frame)
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x, y, w, h) in faces:
cv2.imwrite("frame%d.jpg" % count, frame)
count = count + 1
if cv2.waitKey(10) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows() # destroy all the opened windows
I just tried your code and it has 1 small mistake. You don't specify the XML classifier path correctly. I fixed the path using full path where the XML file is located and works successfully.
import cv2
import numpy as np
cap = cv2.VideoCapture('test.mp4')
#cap = cv2.VideoCapture(0) # I tried using webcam and works
count = 0
while cap.isOpened():
ret,frame = cap.read()
cv2.imshow('window-name',frame)
# Below you have to insert the full path of XML file, below is mine
face_cascade = cv2.CascadeClassifier('C:/ProgramData/Anaconda2/pkgs/opencv-3.2.0-np111py27_0/Library/etc/haarcascades/haarcascade_frontalface_default.xml')
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x, y, w, h) in faces:
cv2.imwrite("frame%d.jpg" % count, frame)
count = count + 1
if cv2.waitKey(10) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows() # destroy all the opened windows
The below code is supposed to capture a video and save it.
import cv2
import numpy as np
from skimage.filters import gaussian
capture = cv2.VideoCapture(0)
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('My video.avi', fourcc, 10, (640,480))
while capture.isOpened:
ret, frame = capture.read()
if ret==True:
frame = gaussian(frame, sigma=5, multichannel=True)
out.write(frame)
cv2.imshow('My video', frame)
if cv2.waitKey(1) == 27:
break
capture.release()
out.release()
cv2.destroyAllWindows()
However, I get the following error:
error: OpenCV(3.4.3) D:\Build\OpenCV\opencv-3.4.3\modules\videoio\src\cap_ffmpeg.cpp:296: error: (-215:Assertion failed) image.depth() == CV_8U in function 'cv::`anonymous-namespace'::CvVideoWriter_FFMPEG_proxy::write'
If I remove the gaussian blur, the code works. What is wrong?
import cv2
import numpy as np
from skimage.filters import gaussian
capture = cv2.VideoCapture(0)
fourcc = cv2.VideoWriter_fourcc('X', 'V', 'I', 'D')
videoWriter = cv2.VideoWriter(
r'output_video_path', fourcc, 10.0, (640, 480))
while (True):
ret, frame = capture.read()
if ret:
frame = gaussian(frame, sigma=5, multichannel=True)
cv2.imshow('video', frame)
frame = np.uint8(255 * frame)
videoWriter.write(frame)
if cv2.waitKey(1) == 27:
break
capture.release()
videoWriter.release()
cv2.destroyAllWindows()
Here is my code:
import cv2
import numpy as np
cap = cv2.VideoCapture(1)
fourcc = cv2.VideoWriter_fourcc('M','J','P','G')
framesize = (640,480)
out = cv2.VideoWriter('dump.avi',fourcc,60.0,framesize)
font = cv2.FONT_HERSHEY_SIMPLEX
while True:
ret, frame = cap.read()
gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
cv2.imshow('frame',frame)
cv2.rectangle(gray, (0,0),(640,480),(255,255,255),3)
cv2.putText(gray, "gray", (0,130),font, 5,(255,255,255),2, cv2.LINE_AA)
cv2.imshow('fr',gray)
Here I am trying to color a specific square area on the live image feed
#gray[100:105,110:115] = [255,255,255]
io = gray[37:111,107:194]
Here I am cloning an are into another
gray[200:200,270:283] = io
out.write(frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
out.release()
cv2.destroyAllWindows()
How can I color a specific area? As my attempt is not working.
I need help finding circles in a live video feed from my webcam. I just need feedback from python that a circle has or has not been detected. Also what is the best method for finding the size of the circle in pixels for better detection. My code so far
import cv2
import numpy as np
import sys
cap = cv2.VideoCapture(0)
while(True):
ret, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.medianBlur(gray,5)
cimg = frame.copy()
circles = cv2.HoughCircles(gray, cv2.HOUGH_GRADIENT, 1, 10, np.array([]), 200, 100, 100, 200)
if circles == 1:
print('Circle true')
else:
print('No circle')
cv2.imshow('video',gray)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
There it is !
import cv2
import numpy as np
import sys
cap = cv2.VideoCapture(0)
while(True):
gray = cv2.medianBlur(cv2.cvtColor(cap.read()[1], cv2.COLOR_BGR2GRAY),5)
cirles=cv2.HoughCircles(gray, cv2.HOUGH_GRADIENT, 1, 10)# ret=[[Xpos,Ypos,Radius],...]
if cirles!=None:print "Circle There !"
cv2.imshow('video',gray)
if cv2.waitKey(1)==27:# esc Key
break
cap.release()
cv2.destroyAllWindows()