NameError: name 'img2' is not define. OpenCV Augmented Reality - python

Hello I'm getting an error of NameError: name 'img2' is not defined in line 42. At first I thought it was an indentation problem but now I think it has something to do with the matrix because it did not display the needed information and that caused img2 to become undefined if I try to call on the matrix variable. I tried everything and there are only so many things I could do as a beginner.
Here's my full code:
import cv2
import numpy as np
cap = cv2.VideoCapture(0)
myVid = cv2.VideoCapture("video.mp4")
imgTarget = cv2.imread("TargetImage.jpg")
success, imgVideo = myVid.read()
hT, wT, cT = imgTarget.shape
imgVideo = cv2.resize(imgVideo, (wT, hT))
orb = cv2.ORB_create(nfeatures=200)
kp1, des1 = orb.detectAndCompute(imgTarget, None)
while True:
success, imgWebcam = cap.read()
imgAug = imgWebcam.copy()
kp2, des2 = orb.detectAndCompute(imgWebcam, None)
bf = cv2.BFMatcher()
matches = bf.knnMatch(des1, des2, k=2)
good = []
for m, n in matches:
if m.distance < 0.75 * n.distance:
good.append(m)
print(len(good))
imgFeatures = cv2.drawMatches(imgTarget, kp1, imgWebcam, kp2, good, None, flags=2)
if len(good) > 15:
srcPts = np.float32([kp1[m.queryIdx].pt for m in good]).reshape(-1,1,2)
dstPts = np.float32([kp2[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)
matrix, mask = cv2.findHomography(srcPts, dstPts, cv2.RANSAC,5.0)
print(matrix)
pts = np.float32([[0,0],[0,hT],[wT,hT],[wT,0]]).reshape(-1,1,2)
dst = cv2.perspectiveTransform(pts,matrix)
img2 = cv2.polylines(imgWebcam,[np.int32(dst)],True,(255,0,255),3, cv2.LINE_AA)
cv2.imshow('img2', img2)
cv2.imshow('ImgFeatures', imgFeatures)
cv2.imshow('ImgTarget', imgTarget)
cv2.imshow('myVid', imgVideo)
cv2.imshow('Webcam', imgWebcam)
cv2.waitKey(0)

You are receiving a name error because the img2 object doesn't exist. img2 is being defined inside an if-statement, where if the condition that the length of good is less than or equal to 15 is not met, then img2 will not be defined. You then call imshow on an object which doesn't exist.
I have edited the below code to only call the imshow commands if the if condition is met, or to print an error.
import cv2
import numpy as np
cap = cv2.VideoCapture(0)
myVid = cv2.VideoCapture("video.mp4")
imgTarget = cv2.imread("TargetImage.jpg")
success, imgVideo = myVid.read()
hT, wT, cT = imgTarget.shape
imgVideo = cv2.resize(imgVideo, (wT, hT))
orb = cv2.ORB_create(nfeatures=200)
kp1, des1 = orb.detectAndCompute(imgTarget, None)
while True:
success, imgWebcam = cap.read()
imgAug = imgWebcam.copy()
kp2, des2 = orb.detectAndCompute(imgWebcam, None)
bf = cv2.BFMatcher()
matches = bf.knnMatch(des1, des2, k=2)
good = []
for m, n in matches:
if m.distance < 0.75 * n.distance:
good.append(m)
print(len(good))
imgFeatures = cv2.drawMatches(imgTarget, kp1, imgWebcam, kp2, good, None, flags=2)
if len(good) > 15:
srcPts = np.float32([kp1[m.queryIdx].pt for m in good]).reshape(-1,1,2)
dstPts = np.float32([kp2[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)
matrix, mask = cv2.findHomography(srcPts, dstPts, cv2.RANSAC,5.0)
print(matrix)
pts = np.float32([[0,0],[0,hT],[wT,hT],[wT,0]]).reshape(-1,1,2)
dst = cv2.perspectiveTransform(pts,matrix)
img2 = cv2.polylines(imgWebcam,[np.int32(dst)],True,(255,0,255),3, cv2.LINE_AA)
cv2.imshow('img2', img2)
cv2.imshow('ImgFeatures', imgFeatures)
cv2.imshow('ImgTarget', imgTarget)
cv2.imshow('myVid', imgVideo)
cv2.imshow('Webcam', imgWebcam)
cv2.waitKey(0)
else:
print('Condition not met to show image')

Related

matcher.knnMatch() error: unsupported format or combination of formats

I was trying to simplify https://github.com/opencv/opencv/blob/4.x/samples/python/find_obj.py and make it use warpPerspective() and am now getting an error.
Before I share the error here's the code:
import sys
import cv2
import numpy as np
FLANN_INDEX_KDTREE = 1
def filter_matches(kp1, kp2, matches, ratio = 0.75):
mkp1, mkp2 = [], []
for m in matches:
if len(m) == 2 and m[0].distance < m[1].distance * ratio:
m = m[0]
mkp1.append( kp1[m.queryIdx] )
mkp2.append( kp2[m.trainIdx] )
p1 = np.float32([kp.pt for kp in mkp1])
p2 = np.float32([kp.pt for kp in mkp2])
kp_pairs = zip(mkp1, mkp2)
return p1, p2, list(kp_pairs)
def alignImages(im1, im2):
detector = cv2.AKAZE_create()
flann_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
matcher = cv2.FlannBasedMatcher(flann_params, {})
kp1, desc1 = detector.detectAndCompute(im1, None)
kp2, desc2 = detector.detectAndCompute(im2, None)
raw_matches = matcher.knnMatch(desc1, trainDescriptors = desc2, k = 2)
p1, p2, kp_pairs = filter_matches(kp1, kp2, raw_matches)
if len(p1) < 4:
print('%d matches found, not enough for homography estimation' % len(p1))
sys.exit()
height, width = im2.shape
imResult = cv2.warpPerspective(im1, H, (width, height))
return imResult
imRef = cv2.imread('ref.png', cv2.IMREAD_GRAYSCALE)
im = cv2.imread('new.png', cv2.IMREAD_GRAYSCALE)
imNew = alignImages(im, imRef)
cv2.imwrite('output.png', imNew)
Here's the error:
Traceback (most recent call last):
File "align.py", line 47, in <module>
imNew = alignImages(im, imRef)
File "align.py", line 32, in alignImages
raw_matches = matcher.knnMatch(desc1, trainDescriptors = desc2, k = 2)
cv2.error: OpenCV(4.4.0) /tmp/pip-req-build-sw_3pm_8/opencv/modules/flann/src/miniflann.cpp:315: error: (-210:Unsupported format or combination of formats) in function 'buildIndex_'
> type=0
>
Any ideas?
If you want to see the problem for yourself vs looking at the code then download https://www.google.com/images/branding/googlelogo/2x/googlelogo_color_272x92dp.png and save that as ref.png and new.png. That gives me the error.

Why my code always told the NotImplementedError?

I try to read two images but it told me NotImplementedError. I am working on the OpenCV on Python to stitch two images code inside the solution() function us but it is still NotImplementError in the HW code.
Here is my full code
import cv2
import numpy as np
import random
def solution(left_img, right_img):
# left_img = cv2.imread('left.jpg')
# right_img = cv2.imread('right.jpg')
global homography
left_image1 = cv2.cvtColor(left_img, cv2.COLOR_BGR2GRAY)
right_image1 = cv2.cvtColor(right_img, cv2.COLOR_BGR2GRAY)
sift = cv2.xfeatures2d.SIFT_create()
# find key points
keypoints_1, descriptors_1 = sift.detectAndCompute(right_image1, None)
keypoints_2, descriptors_2 = sift.detectAndCompute(left_image1, None)
FLANN_INDEX_KDTREE = 0
index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
search_params = dict(checks=50)
match = cv2.FlannBasedMatcher(index_params, search_params)
# match = cv2.BFMatcher()
matches = match.knnMatch(descriptors_1, descriptors_2, k=2)
good = []
for m, n in matches:
if m.distance < 0.03 * n.distance:
good.append(m)
draw_params = dict(matchColor=(0, 255, 0), singlePointColor=None, flags=2)
img3 = cv2.drawMatches(right_img, keypoints_1, left_img, keypoints_2, good, None, **draw_params)
MIN_MATCH_COUNT = 10
if len(good) > MIN_MATCH_COUNT:
src_pts = np.float32([keypoints_1[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)
dst_pts = np.float32([keypoints_2[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)
homography, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
h, w = right_image1.shape
pts = np.float32([[0, 0], [0, h - 1], [w - 1, h - 1], [w - 1, 0]]).reshape(-1, 1, 2)
dst = cv2.perspectiveTransform(pts, homography)
img2 = cv2.polylines(left_image1, [np.int32(dst)], True, 255, 3, cv2.LINE_AA)
dst = cv2.warpPerspective(right_img, homography, (left_img.shape[1] + right_img.shape[1], left_img.shape[0]))
dst[0:left_img.shape[0], 0:left_img.shape[1]] = left_img
# final_result = cv2.imshow("task1 result.jpg", dst)
# return final
raise NotImplementedError
if __name__ == "__main__":
left_img = cv2.imread('left.jpg')
right_img = cv2.imread('right.jpg')
result_image = solution(left_img, right_img)
cv2.imwrite('results/task1_result.jpg', result_image)
And the output is always NotImplementedError and O have no idea about the issue
Could anyone support me to fix the error?
Much Appreciate
Because you raise the NotImplementedError at the end of the solution function. Remove the line and possibly replace it with return final_result.

Python OpenCV Create Map with Video Frames

I would like to ask if someone can help me with my question.
I am currently working on making a map in real time with the frames captured from a camera. I succeed finding the keypoints and the homography by using ORB,Brute Force Matcher and findHomography() function.
Now i want to stitch the frames by using the Homography Matrix to create a map.
I am working on Windows environment with Python and OpenCV.
Any help is appreciated.
def homography(current_frame_gray, previous_frame_gray):
orb = cv2.ORB_create()
kpts1, descs1 = orb.detectAndCompute(previous_frame_gray, None)
kpts2, descs2 = orb.detectAndCompute(current_frame_gray, None)
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
matches = bf.match(descs1, descs2)
dmatches = sorted(matches, key=lambda x: x.distance)
src_pts = np.float32([kpts1[m.queryIdx].pt for m in dmatches]).reshape(-1, 1, 2)
dst_pts = np.float32([kpts2[m.trainIdx].pt for m in dmatches]).reshape(-1, 1, 2)
if dst_pts is None:
return False
M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
print(M)
print("M exact")
print(M[0,2])
h, w = previous_frame.shape[:2]
pts = np.float32([[0, 0], [0, h - 1], [w - 1, h - 1], [w - 1, 0]]).reshape(-1, 1, 2)
if current_frame is not None:
print("Not None")
print(type(current_frame))
else:
print("NONE!!!")
if previous_frame is not None:
print("Not None")
else:
print("NONE!!!")
res = cv2.drawMatches(previous_frame, kpts1, current_frame, kpts2, dmatches[:20], None, flags=2)
cv2.imshow("orb_match", res)
return res

Why ORB didn't give me better result than SURF?

I am trying to extract features for any given image or video frame. I have read many papers and articles about point feature based object tracking. Finally, I decided to use ORB. This is because, most of the people say in their papers that ORB is better than all of the feature extracting algorithms.
When I used ORB, it didn't give me good result. Then, I decided to try SURF method. When I used SURF, it gave me very good result.
I wonder that, what is wrong with my codes. I will share with you both codes surf and orb. Please, look at my codes and comment out about my mistakes.
SURF Result
ORB Result
# With SURF Detector
import cv2
import numpy as np
# Initialize parameters for Flann based matcher
FLANN_INDEX_KDTREE = 0
flann_params= dict(algorithm = FLANN_INDEX_KDTREE,
table_number = 6, # 12
key_size = 12, # 20
trees = 5,
multi_probe_level = 1) #2
draw_params = dict(matchesMask=None,
singlePointColor=None,
matchColor=(255, 0, 0),
flags=2)
MIN_MATCH_COUNT = 10
img_object = cv2.imread('form1.jpg', cv2.IMREAD_GRAYSCALE)
img_scene = cv2.imread('form2.jpg', cv2.IMREAD_GRAYSCALE)
if img_object is None or img_scene is None:
print('Could not open or find the images!')
exit(0)
#-- Step 1: Detect the keypoints using SURF Detector, compute the descriptors
minHessian = 400
detector = cv2.xfeatures2d_SURF.create(hessianThreshold=minHessian)
keypoints_obj, descriptors_obj = detector.detectAndCompute(img_object, None)
keypoints_scene, descriptors_scene = detector.detectAndCompute(img_scene, None)
#-- Step 2: Matching descriptor vectors with a FLANN based matcher
matcher = cv2.FlannBasedMatcher(flann_params, {})
knn_matches = matcher.knnMatch(descriptors_obj, descriptors_scene, 2)
#-- Filter matches using the Lowe's ratio test
ratio_thresh = 0.75
good_matches = []
for m,n in knn_matches:
if m.distance < ratio_thresh * n.distance:
good_matches.append(m)
#-- Draw matches
img_matches = np.empty((max(img_object.shape[0], img_scene.shape[0]), img_object.shape[1]+img_scene.shape[1], 3), dtype=np.uint8)
cv2.drawMatches(img_object, keypoints_obj, img_scene, keypoints_scene, good_matches[:10], img_matches, **draw_params)
if len(good_matches) > MIN_MATCH_COUNT:
obj_pts = np.float32([ keypoints_obj[good_match.queryIdx].pt for good_match in good_matches ]).reshape(-1,1,2)
scene_pts = np.float32([ keypoints_scene[good_match.trainIdx].pt for good_match in good_matches ]).reshape(-1,1,2)
H, _ = cv2.findHomography(obj_pts, scene_pts, cv2.RANSAC, 5.0)
#-- Get the corners from the image_1 ( the object to be "detected" )
obj_corners = np.empty((4,1,2), dtype=np.float32)
obj_corners[0,0,0] = 0
obj_corners[0,0,1] = 0
obj_corners[1,0,0] = img_object.shape[1] # img.shape[1] shows the column of the image
obj_corners[1,0,1] = 0
obj_corners[2,0,0] = img_object.shape[1]
obj_corners[2,0,1] = img_object.shape[0] # img.shape[0] shows the row of the image
obj_corners[3,0,0] = 0
obj_corners[3,0,1] = img_object.shape[0]
scene_corners = cv2.perspectiveTransform(obj_corners, H)
#-- Draw lines between the corners (the mapped object in the scene - image_2 )
cv2.line(img_matches, (int(scene_corners[0,0,0] + img_object.shape[1]), int(scene_corners[0,0,1])),\
(int(scene_corners[1,0,0] + img_object.shape[1]), int(scene_corners[1,0,1])), (0,255,0), 4)
cv2.line(img_matches, (int(scene_corners[1,0,0] + img_object.shape[1]), int(scene_corners[1,0,1])),\
(int(scene_corners[2,0,0] + img_object.shape[1]), int(scene_corners[2,0,1])), (0,255,0), 4)
cv2.line(img_matches, (int(scene_corners[2,0,0] + img_object.shape[1]), int(scene_corners[2,0,1])),\
(int(scene_corners[3,0,0] + img_object.shape[1]), int(scene_corners[3,0,1])), (0,255,0), 4)
cv2.line(img_matches, (int(scene_corners[3,0,0] + img_object.shape[1]), int(scene_corners[3,0,1])),\
(int(scene_corners[0,0,0] + img_object.shape[1]), int(scene_corners[0,0,1])), (0,255,0), 4)
#-- Show detected matches
cv2.imshow('Good Matches With SURF Detector', img_matches)
cv2.imwrite('SURF_Matches.jpg', img_matches)
cv2.waitKey()
# With ORB Detector
import cv2
import numpy as np
# Initialize parameters for Flann based matcher
FLANN_INDEX_KDTREE = 1
FLANN_INDEX_LSH = 6
flann_params= dict(algorithm = FLANN_INDEX_LSH,
table_number = 6, # 12
key_size = 12, # 20
multi_probe_level = 1) #2
draw_params = dict(matchesMask=None,
singlePointColor=None,
matchColor=(255, 0, 0),
flags=2)
MIN_MATCH_COUNT = 10
img_object = cv2.imread('form1.jpg', cv2.IMREAD_GRAYSCALE)
img_scene = cv2.imread('form2.jpg', cv2.IMREAD_GRAYSCALE)
if img_object is None or img_scene is None:
print('Could not open or find the images!')
exit(0)
#-- Step 1: Detect the keypoints using ORB Detector, compute the descriptors
detector = cv2.ORB_create(nfeatures=1500)
keypoints_obj = detector.detect(img_object, None)
keypoints_obj, descriptors_obj = detector.compute(img_object, keypoints_obj)
keypoints_scene = detector.detect(img_object, None)
keypoints_scene, descriptors_scene = detector.compute(img_scene, keypoints_scene)
#-- Step 2: Matching descriptor vectors with a BF based matcher
matcher = cv2.FlannBasedMatcher(flann_params, {})
knn_matches = matcher.knnMatch(descriptors_obj, descriptors_scene, k=2)
# Filter matches using the Lowe's ratio test
ratio_thresh = 0.75
good_matches = []
for m,n in knn_matches:
if m.distance < ratio_thresh * n.distance:
good_matches.append(m)
#-- Draw matches
img_matches = np.empty((max(img_object.shape[0], img_scene.shape[0]), img_object.shape[1]+img_scene.shape[1], 3), dtype=np.uint8)
cv2.drawMatches(img_object, keypoints_obj, img_scene, keypoints_scene, good_matches[:10], img_matches, **draw_params)
if len(good_matches) > MIN_MATCH_COUNT:
obj_pts = np.float32([ keypoints_obj[good_match.queryIdx].pt for good_match in good_matches ]).reshape(-1,1,2)
scene_pts = np.float32([ keypoints_scene[good_match.trainIdx].pt for good_match in good_matches ]).reshape(-1,1,2)
H, _ = cv2.findHomography(obj_pts, scene_pts, cv2.RANSAC, 5.0)
#-- Get the corners from the image_1 ( the object to be "detected" )
obj_corners = np.empty((4,1,2), dtype=np.float32)
obj_corners[0,0,0] = 0
obj_corners[0,0,1] = 0
obj_corners[1,0,0] = img_object.shape[1] # img.shape[1] shows the column of the image
obj_corners[1,0,1] = 0
obj_corners[2,0,0] = img_object.shape[1]
obj_corners[2,0,1] = img_object.shape[0] # img.shape[0] shows the row of the image
obj_corners[3,0,0] = 0
obj_corners[3,0,1] = img_object.shape[0]
scene_corners = cv2.perspectiveTransform(obj_corners, H)
#-- Draw lines between the corners (the mapped object in the scene - image_2 )
cv2.line(img_matches, (int(scene_corners[0,0,0] + img_object.shape[1]), int(scene_corners[0,0,1])),\
(int(scene_corners[1,0,0] + img_object.shape[1]), int(scene_corners[1,0,1])), (0,255,0), 4)
cv2.line(img_matches, (int(scene_corners[1,0,0] + img_object.shape[1]), int(scene_corners[1,0,1])),\
(int(scene_corners[2,0,0] + img_object.shape[1]), int(scene_corners[2,0,1])), (0,255,0), 4)
cv2.line(img_matches, (int(scene_corners[2,0,0] + img_object.shape[1]), int(scene_corners[2,0,1])),\
(int(scene_corners[3,0,0] + img_object.shape[1]), int(scene_corners[3,0,1])), (0,255,0), 4)
cv2.line(img_matches, (int(scene_corners[3,0,0] + img_object.shape[1]), int(scene_corners[3,0,1])),\
(int(scene_corners[0,0,0] + img_object.shape[1]), int(scene_corners[0,0,1])), (0,255,0), 4)
#-- Show detected matches
cv2.imshow('Good Matches With ORB Detector', img_matches)
cv2.imwrite('ORB_Matches.jpg', img_matches)
cv2.waitKey()
I have got very bad result for ORB. I tested them on the same images.
How can I get very good result by using ORB, as most people say?

Face Recognition with multiple faces don't detect multiple faces

I've written a small program, which detects faces and saves them to an Train file for the recognition.
I have some trouble with this algorithm. Sometimes it throws the error, that the LBPH::Train was feed with empty data, which is wrong.
OpenCV Error: Unsupported format or combination of formats (Empty training data was given. You'll need more than one sample to learn a model.) in cv::LBPH::train, file ........\opencv\modules\contrib\src\facerec.cpp, line 917
Traceback (most recent call last):
Moreover the algorithm detects multiple faces, but recognizes it just as the same face, which is wrong.
Could someone give me a hint on what I'm missing?
import cv2
import os
import numpy as np
import sys
i = 0
global allFaces
global first
first = True
allFaces = []
cap = cv2.VideoCapture(0)
faceCascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
recognizer = cv2.createLBPHFaceRecognizer()
font=cv2.cv.InitFont(cv2.cv.CV_FONT_HERSHEY_COMPLEX_SMALL,1,1,0,1)
id = 0
class Face:
def __init__(self, id, face):
self.id = id
self.face = face
self.gatheredFaces = []
def main(self):
print("main")
def getFace(self):
return self.face
def setKnownFace(self):
self.known = False
def getKownFace(self):
return self.knwon
def getId(self):
return self.id
def setFacesInfo(self, frame, face):
x,y,h,w = face
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
self.gatheredFaces.append(gray[y:y+h, x:x+w])
# count = 0
# while (count != 10):
# gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# cv2.imshow("frame in set", frame)
# faces = faceCascade.detectMultiScale(gray)
# for face in faces:
# self.gatheredFaces.append(gray[y:y+h,x:x+w])
# cv2.imshow("gathered Faces", self.gatheredFaces[0])
# cv2.imwrite("dataSet/User"+ str(self.getId()) +".jpg", gray)
# count = count+1
# cv2.waitKey(30)
def getFacesInfo(self):
return self.gatheredFaces
def trainDetector(self):
faceSamples = []
Ids = []
print("laenge von gathered FAces")
print(len(allFaces[0].getFacesInfo()))
for (i) in range(len(allFaces)):
temp = allFaces[i].getFacesInfo()
for (j) in range(len(temp)):
imageNP = np.array(temp[j], 'uint8')
id = allFaces[i].getId()
faces = faceCascade.detectMultiScale(imageNP)
for (x,y,h,w) in faces:
faceSamples.append(imageNP)
Ids.append(id)
recognizer.train(faceSamples, np.array(Ids))
recognizer.save('recognizer/train.yml')
def updateDetector(self):
recognizer.load('recognizer/train.yml')
faceSamples = []
Ids = []
for (i) in range(len(allFaces)):
temp = allFaces[i].getFacesInfo()
for (j) in range(len(temp)):
imageNP = np.array(temp[j], 'uint8')
id = allFaces[i].getId()
faces = faceCascade.detectMultiScale(imageNP)
for (x,y,h,w) in faces:
faceSamples.append(imageNP)
Ids.append(id)
recognizer.update(faceSamples, np.array(Ids))
recognizer.save('recognizer/train.yml')
while True:
ret, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
cv2.imshow("actual Frame", frame)
cv2.imshow("gray", gray)
faces = faceCascade.detectMultiScale(gray, 1.3, 5)
print(faces)
for face in faces:
x,y,h,w = face
temp = Face(id, frame[y:y+h,x:x+w])
allFaces.append(temp)
temp = None
id = id+1
###Detector
detector = cv2.SIFT()
FLANN_INDEX_KDTREE = 0
flannParam = dict(algorithm = FLANN_INDEX_KDTREE, tree = 5)
flann = cv2.FlannBasedMatcher(flannParam,{})
trainImg = allFaces[0].getFace()
trainKP, trainDecs = detector.detectAndCompute(trainImg, None)
if((len(allFaces)==1) and first):
print("only one object in allFaces")
for i in range(10):
print(i)
allFaces[0].setFacesInfo(frame, face)
allFaces[0].trainDetector()
first = False
else:
for(i) in range(len(allFaces)):
QueryImg = cv2.cvtColor(allFaces[i].getFace(), cv2.COLOR_BGR2GRAY)
queryKP, queryDesc = detector.detectAndCompute(QueryImg, None)
matches = flann.knnMatch(queryDesc, trainDecs, k = 2)
goodMatch = []
for m, n in matches:
if(m.distance < 0.75 * n.distance):
goodMatch.append(m)
if(len(goodMatch) > 30):
print("good match")
#allFaces[i].
tp = []
qp = []
for m in goodMatch:
tp.append(trainKP[m.trainIdx].pt)
qp.append(queryKP[m.queryIdx].pt)
tp, qp = np.float32((tp, qp))
H, status = cv2.findHomography(tp, qp, cv2.RANSAC, 3.0)
allFaces.pop(len(allFaces)-1)
break
else:
print ("bad match")
for i in range(10):
allFaces[len(allFaces)-1].setFacesInfo(frame, face)
allFaces[len(allFaces)-1].updateDetector()
cv2.waitKey(10)
for (x,y,w,h) in faces:
cv2.rectangle(frame, (x,y), (x+w,y+h), (0,0,255),2)
tempid, conf = recognizer.predict(gray[y:y+h,x:x+w])
cv2.cv.PutText(cv2.cv.fromarray(frame), str(tempid),(x,y+h),font,(0,0,255))
cv2.waitKey(30)
cv2.imshow("detectedFace", frame)
cv2.waitKey(30)

Categories

Resources