OpenCv imread error while trying it in real-time - python

code:
import face_recognition as fr
import os
import cv2
import face_recognition
import numpy as np
from time import sleep
def get_encoded_faces():
encoded = {}
for dirpath, dnames, fname in os.walk("./faces"):
for f in fname:
if f.endswith(".jpg") or f.endswith(".png"):
face = fr.load_image_file("faces/" + f)
encoding = fr.face_encodings(face)[0]
encoded[f.split(".")[0]] = encoding
return encoded, fname
def unknown_image_encoded(img):
face = fr.load_image_file("faces/" + img)
encoding = fr.face_encodings(face)[0]
return encoding
def classify_face(im):
faces, fname = get_encoded_faces()
faces_encoded = list(faces.values())
known_face_names = list(faces.keys())
img = cv2.imread(im, 1)
face_locations = face_recognition.face_locations(img)
unknown_face_encodings = face_recognition.face_encodings(img, face_locations)
face_names = []
for face_encoding in unknown_face_encodings:
matches = face_recognition.compare_faces(faces_encoded, face_encoding)
name = "Unknown"
face_distances = face_recognition.face_distance(faces_encoded, face_encoding)
best_match_index = np.argmin(face_distances)
if matches[best_match_index]:
name = known_face_names[best_match_index]
face_names.append(name)
for (top, right, bottom, left), name in zip(face_locations, face_names):
cv2.rectangle(img, (left-20, top-20), (right+20, bottom+20), (255, 0, 0), 2)
cv2.rectangle(img, (left-20, bottom -15), (right+20, bottom+20), (255, 0, 0), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(img, name, (left -20, bottom + 15), font, 1.0, (255, 255, 255), 2)
return face_names, fname
cap = cv2.VideoCapture(0)
while True:
ret, image = cap.read()
recog, fname = classify_face(image)
print(recog)
cv2.imshow(fname, image)
if cv2.waitKey(1) & 0xFF == ord("q"):
break
video.release()
cv2.destroyAllWindows()
Error:
Traceback (most recent call last):
File "face.py", line 70, in <module>
recog, fname = classify_face(image)
File "face.py", line 37, in classify_face
img = cv2.imread(im, 1)
SystemError: <built-in function imread> returned NULL without setting an error
[ WARN:0] global C:\Users\appveyor\AppData\Local\Temp\1\pip-req-build-wbmte9m7\opencv\modules\videoio\src\cap_msmf.cpp (435) `anonymous-namespace'::SourceReaderCB::~SourceReaderCB terminating async callback
The code works properly while using an image but now when I tried using it with video/real-time its throwing this error
I guess it requires the path instead of the image that is passed on to it, is there any other work around
I am trying to recognize faces in real time and the major issue with it was detecting unknown faces so when I started coding for real time I got this error.

The code and the error message don't agree. Are you running an older version of the code?
Error message:
File "face.py", line 37, in classify_face
img = cv2.imread(im, 1)
Code:
img = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
For debugging it may be helpful to display the received frame from the camera with code like the following:
ret, image = cap.read()
grey = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
cv2.imshow('frame', grey)
cv2.waitKey()

cv2.imread(im, 1) requires im to be the filename (datatype: string) of the image that you want to read.
Using cap = cv2.VideoCapture(0), you don't need to read images from files anymore, since the image that you want to classify is returned as an array from cap.read().
To fix your code for using cv2.VideoCapture, remove img = cv2.imread(im, 1) from your classify_face method and change the method definition to
def classify_face(img):
instead of
def classify_face(im):
Note, that the 0 option of cv2.VideoCapture refers to reading the live video stream from a camera with index 0.

Related

a am getting an OpenCV Error in python... and my code is given below?? Please resolve it fast.Thnks in advance

plese solve this guys,
when i am running this code it is giving me an unaccepted error =
""OpenCV Error: Assertion failed (ssize.width > 0 && ssize.height > 0) in resize, file /build/opencv-L2vuMj/opencv-3.2.0+dfsg/modules/imgproc/src/imgwarp.cpp, line 3492""
Code:
import numpy as np
import dlib
import cv2
import face_recognition
import os
path = 'images'
image = []
classNames = []
myList = os.listdir(path)
for cl in myList:
curImg = cv2.imread(f'{path}/{cl}')
image.append(curImg)
classNames.append(os.path.splitext(cl)[0])
# print(classNames)
def findEncodings(image):
encodeList = []
for img in image:
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
encode = face_recognition.face_encodings(img)[0]
encodeList.append(encode)
return encodeList
encodeListKnown = findEncodings(image)
print(len("Encoding Complete"))
cap = cv2.VideoCapture()
# print(cap)
while True:
success, img = cap.read()
imgS = cv2.resize(img, (0,0), None, 0.25, 0.25)
imgS = cv2.cvtColor(imgS, cv2.COLOR_BGR2RGB)
facesCurFrames = face_recognition.face_locations(imgS)
encodesCurFrames = face_recognition.face_encodings(imgS,
facesCurFrames)
for encodeFace,faceLoc in zip(encodesCurFrames,
facesCurFrames):
matches =
face_recognition.compare_faces(encodeListKnown,encodeFace)
faceDis =
face_recognition.face_distance(encodeListKnown,encodeFace)
print(faceDis)
(0, 0) is not a valid (width, height) tuple for resizing an image. It looks like you wanted to resize the image by scaling, in which case you can pass None instead of the size tuple:
imgS = cv2.resize(img, None, fx=0.25, fy=0.25)
will scale the image by 0.25 in both dimensions.

src.empty() in function 'cv::cvtColor' & Cap.read()' is not reading the video frame

Python 3.8, Opencv 4.4.0
I have loaded correctly video file it's running, I think Cap.read() is not reading the video frame. I'm working on mask detection project and want to test in pre-recorded wearing mask Video.
Full Code
# import packages
import cv2
from tensorflow.keras.models import load_model
from tensorflow.keras.preprocessing.image import load_img, img_to_array
import numpy as np
model = load_model('model_02.h5')
img_width, img_hight = 200, 200
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
cap = cv2.VideoCapture('wearingmask.mp4') # for video
img_count_full = 0
#parameters for text
font = cv2.FONT_HERSHEY_SIMPLEX
org = (1, 1)
class_lable=' '
fontScale = 1
# Blue color in BGR
color = (255, 0, 0)
thickness = 2 #1
while True:
img_count_full += 1
response, color_img = cap.read()
if response == False:
break
gray_img = cv2.cvtColor(color_img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray_img, 1.1, 6)
FULL ERROR
error Traceback (most recent call last)
<ipython-input-8-d7656fcb48d9> in <module>
39 break
40
---> 41 gray_img = cv2.cvtColor(color_img, cv2.COLOR_BGR2GRAY)
42 faces = face_cascade.detectMultiScale(gray_img, 1.1, 6)
error: OpenCV(4.4.0) C:\Users\appveyor\AppData\Local\Temp\1\pip-req-build-9d_dfo3_\opencv\modules\imgproc\src\color.cpp:182: error: (-215:Assertion failed) !_src.empty() in function 'cv::cvtColor'
It's pretty self explanatory. Your code's core logic is basically loop through images in the video until there are no more images, and then apply cvtColor on the last image.
Your while loop breaks when response is False, that is, you've reached the end of your video file. However, when response is False, the corresponding color_img is actually None. To actually get the last valid image, you need to do something like:
while True:
img_count_full += 1
response, frame = cap.read()
if response == False:
break
color_img = frame
gray_img = cv2.cvtColor(color_img, cv2.COLOR_BGR2GRAY)

Visible Deprecation warnings and Value Error when trying out face recognition

I am a beginner to programming and i am trying to model face recognition attendance programming in python,and i am not able to get the face distance list of webcam. the webcam light would come on but i won't even see images.
i dont know what i am doing wrong.
here is the codes
import cv2
import numpy as np
import face_recognition
import os
path = 'ImagesAttendance'
images = []
staffNames = []
myList = os.listdir(path)
print(myList)
for st in myList:
curImg = cv2.imread(f'{path}/{st}')
images.append(curImg)
staffNames.append(os.path.splitext(st)[0])
print(staffNames)
#encoding functions finding begins automatically
def findEncodings(images):
encodeList = []
for img in images:
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
#finding encodings
encode = face_recognition.face_encodings(img)
encodeList.append(encode)
return encodeList
encodeListKnown = findEncodings(images)
#printing the number or length of pictures in the folder
#print(len(encodeListKnown))
print('Encoding Complete')
#Initializing webcam to match images in the folder
cap = cv2.VideoCapture(0)
while True:
success, img = cap.read()
#because its real time capture, we wld reduce the size of image to speed up the process
imgS = cv2.resize(img,(0,0),None,0.25,0.25)
#realtime image size has been divided by 4 using 0.25
imgS = cv2.cvtColor(imgS, cv2.COLOR_BGR2RGB)
facesCurFrame = face_recognition.face_locations(imgS)
encodeCurFrame = face_recognition.face_encodings(imgS,facesCurFrame)
#finding matches
for encodeFace,faceLoc in zip(encodeCurFrame,facesCurFrame):
matches = face_recognition.compare_faces(encodeListKnown,encodeFace)
faceDis = face_recognition.face_distance(encodeListKnown,encodeFace)
print(faceDis)
here are the errors
C:\Users\AAA\PycharmProjects\FaceRecognitionProject\venv\Scripts\python.exe C:/Users/AAA/PycharmProjects/FaceRecognitionProject/AttendanceProject.py
['2LT Chinonso.jpg', 'Hadizah Abdul.jpg', 'Nosa Igiemwin.jpg']
['2LT Chinonso', 'Hadizah Abdul', 'Nosa Igiemwin']
Encoding Complete
C:\Users\AAA\PycharmProjects\FaceRecognitionProject\venv\lib\site-packages\face_recognition\api.py:75: VisibleDeprecationWarning: Creating an ndarray from ragged nested sequences (which is a list-or-tuple of lists-or-tuples-or ndarrays with different lengths or shapes) is deprecated. If you meant to do this, you must specify 'dtype=object' when creating the ndarray
return np.linalg.norm(face_encodings - face_to_compare, axis=1)
Traceback (most recent call last):
File "C:/Users/AAA/PycharmProjects/FaceRecognitionProject/AttendanceProject.py", line 46, in <module>
matches = face_recognition.compare_faces(encodeListKnown,encodeFace)
File "C:\Users\AAA\PycharmProjects\FaceRecognitionProject\venv\lib\site-packages\face_recognition\api.py", line 226, in compare_faces
return list(face_distance(known_face_encodings, face_encoding_to_check) <= tolerance)
File "C:\Users\AAA\PycharmProjects\FaceRecognitionProject\venv\lib\site-packages\face_recognition\api.py", line 75, in face_distance
return np.linalg.norm(face_encodings - face_to_compare, axis=1)
ValueError: operands could not be broadcast together with shapes (3,) (128,)
[ WARN:0] global C:\projects\opencv-python\opencv\modules\videoio\src\cap_msmf.cpp (674) SourceReaderCB::~SourceReaderCB terminating async callback
Things i did to fix errors, now its working perfectly in my pc
Inside def findingEncodings(images) check 4th line i.e. encode = face_recognition.face_encodings(img) replace it with encode = face_recognition.face_encodings(img)[0]
I wrote the below mentioned loop(last four lines) again as it was showing
unreachable as an error might b due to improper indentation
for encodeFace,faceLoc in zip(encodeCurFrame,facesCurFrame):
matches = face_recognition.compare_faces(encodeListKnown,encodeFace)
faceDis = face_recognition.face_distance(encodeListKnown,encodeFace)
print(faceDis)
I completed your code as well you can use this for comparison
import cv2
import numpy as np
import face_recognition
import os
path = 'ImagesAttendance'
images = []
staffNames = []
myList = os.listdir(path)
print(myList)
for st in myList:
curImg = cv2.imread(f'{path}/{st}')
images.append(curImg)
staffNames.append(os.path.splitext(st)[0])
print(staffNames)
def findEncodings(images):
encodeList = []
for img in images:
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
#finding encodings
encode = face_recognition.face_encodings(img)[0]
encodeList.append(encode)
return encodeList
encodeListKnown = findEncodings(images)
cap = cv2.VideoCapture(0)
while True:
success, img = cap.read()
#because its real time capture, we wld reduce the size of image to speed up the process
imgS = cv2.resize(img,(0,0),None,0.25,0.25)
#realtime image size has been divided by 4 using 0.25
imgS = cv2.cvtColor(imgS, cv2.COLOR_BGR2RGB)
facesCurFrame = face_recognition.face_locations(imgS)
encodeCurFrame = face_recognition.face_encodings(imgS,facesCurFrame)
#finding matches
for encodeFace,faceLoc in zip(encodeCurFrame, facesCurFrame):
matches =face_recognition.compare_faces(encodeListKnown,encodeFace)
faceDis = face_recognition.face_distance(encodeListKnown,encodeFace)
print(faceDis)
matchIndex = np.argmin(faceDis)
print('matchIndex', matchIndex)
if matches[matchIndex]:
name = staffNames[matchIndex].upper()
print(name)
y1, x2, y2, x1 = faceLoc
y1, x2, y2, x1 = y1 * 4, x2 * 4, y2 * 4, x1 * 4
cv2.rectangle(img, (x1, y1), (x2, y2), (0, 255, 0), 2)
cv2.rectangle(img, (x1, y2 - 35), (x2, y2), (0, 0, 0), cv2.FILLED)
cv2.putText(img, name, (x1 + 6, y2 - 6), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 255), 2)
cv2.imshow('Webcam', img)
#press'esc' to close program
if cv2.waitKey(1) == 27:
break
#release camera
cap.release()
cv2.destroyAllWindows()

Passing and Receiving Images from Function

I'm creating a function (in Python) that expects/receives a single image of multiple human faces in it, and returns multiple smaller images (one image per human face). I am able to do a cv2.imshow inside the function and see the expected smaller images, but when I attempt a cv2.imshow from outside the function, it does not work (unable to see the smaller image, and get a TypeError instead). Would appreciate some guidance.
def stills(user_image):
#sub_frames = []
fqp_image_src = (user_image)
raw_pic = cv2.imread(fqp_image_src)
mpic = cv2.resize(raw_pic,(0,0), fx=0.30, fy=0.30)
mpic_rgb = cv2.cvtColor(mpic, cv2.COLOR_BGR2RGB)
face_boxes = haar_cascade_face.detectMultiScale(mpic_rgb, scaleFactor = 1.2, minNeighbors = 5)
count = int(len(face_boxes))
for i in range(count):
face_box = face_boxes[i]
final = cv2.rectangle(mpic, (face_box[0], face_box[1]), ((face_box[0]+face_box[2]),(face_box[1]+face_box[3])), (0,255,0),2)
sub_frame = final[face_box[1]:(face_box[1]+face_box[3]), face_box[0]:(face_box[0]+face_box[2])]
#sub_frames.append(sub_frame)
cv2.imshow('frame', sub_frame) # this works
cv2.waitKey()
return (sub_frame, final)
# calling the function
something = stills("abc.jpg")
cv2.imshow('frame',something) # this does not work
cv2.waitKey()
TypeError: Expected cv::UMat for argument 'mat'
This will do what you expected, just whit some simplification and with full file paths
.
One of the key erros was give detectMultiScale a colored image, the imput shuld have 1 dimension, with brigtness (gray scales).
In order to display a colored image with the faces in a box a copy of the image is needed to convert into gar scales and detect, giving coordenates to draw in the colored image.
import cv2
import os
# Take as a global the dir in witch is this file
PATH = os.path.dirname(os.path.abspath(__file__))
haar_cascade_face = cv2.CascadeClassifier(os.path.join(PATH, 'haarcascade_frontalface_alt.xml'))
def stills(user_image):
image = os.path.join(PATH, user_image)
image = cv2.imread(image)
image = cv2.resize(image, (0, 0), fx=0.30, fy=0.30)
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
face_boxes = haar_cascade_face.detectMultiScale(gray_image, scaleFactor=1.073, minNeighbors=8)
final = image # make the funtion alwais give a image
sub_frames = []
# Check if there are faces
if len(face_boxes) > 0:
for x, y, w, h in face_boxes:
final = cv2.rectangle(image, (x, y), (x+w, y+h), (0, 255, 0), 2)
sub_frame = image[y:y+h, x:x+w]
sub_frames.append([x, y, x+w, y+h])
cv2.imshow('sub_frame', sub_frame)
# cv2.waitKey() # No need to wait the user
else:
print('No faces found')
return (sub_frames, final)
if __name__ == '__main__':
fragments, final = stills("abc.jpg")
cv2.imshow('frame', final)
cv2.waitKey()

how to load data from for in loop on python?

hello o try to combine detect.multiscale code wit calc.hist code. i try to run this program but i can't access 'w' in for in loop.??
import cv2
import numpy as np
from matplotlib import pyplot as plt
import time
import sys
import serial
#execfile("/home/arizal/Documents/Sorting Jeruk/motor1.py")
#ser = serial.Serial('/dev/ttyACM0', 9600)
#Cascade jeruk
jeruk_cascade = cv2.CascadeClassifier('cascade.xml')
camera = cv2.VideoCapture(1)
base1 = cv2.imread('base11.jpg')
base2 = cv2.imread('base22.jpg')
base3 = cv2.imread('base33.jpg')
#Set hist parameters
hist_height = 64
hist_width = 256
nbins = 32
bin_width = hist_width/nbins
hrange = [0,180]
srange = [0,256]
ranges = hrange+srange # ranges = [0,180,0,256]
#Create an empty image for the histogram
e = np.zeros((hist_height,hist_width))
#print ("h : ",h)
#print type(h)
#x=1
this is for detect.multiscale loop
while 1:
grabbed, img = camera.read()
cam = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
if not grabbed:
"Camera could not be started."
break
# add this
# image, reject levels level weights.
jeruks = jeruk_cascade.detectMultiScale(cam, 1.03, 5)
this for cascade for in loop, for give rectangle mark on the object
# add this
for (x,y,w,h) in jeruks:
cv2.rectangle(img,(x,y),(x+w,y+h),(17,126,234),2)
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(img,'Jeruk',(x+w,y+h), font, 1, (17,126,234), 2, cv2.LINE_AA) #---write the text
roi_gray = cam[y:y+h, x:x+w]
roi_color = img[y:y+h, x:x+w]
and calc the histogram when the object detected
if w > 250 :
print ('w', w)
histcam = cv2.calcHist([cam], [0], None, [nbins], [0,256])
cv2.normalize(histcam,histcam, hist_height, cv2.NORM_MINMAX)
hist=np.int32(np.around(histcam))
but i got this error :
Traceback (most recent call last):
File "/home/arizal/Documents/Sorting Jeruk/doalcoba.py", line 65, in <module>
if w > 250 :
NameError: name 'w' is not defined
anyone can help me ?
I think the problem that your code has is of indentation. In the code -
for (x,y,w,h) in jeruks:
....
And
if w > 250 :
....
Are on same level of indentation. (x,y,w,h) are only available for the for loop, not outside of it. Fix you indentation -
for (x,y,w,h) in jeruks:
....
if w > 250 :
print ('w', w)
Let me know if that works

Categories

Resources