Error in list index python hand gesture recogniser opencv - python

I am making a hand gesture recognizer, using OPENCV in PYTHON. I refereed to a YouTube tutorial, and it worked fine there, but i have encountered an error. I am a new coder.
here is my code:
import cv2
import math
import numpy as np
capture = cv2.VideoCapture(0)
while capture.isOpened():
# while True:
ret, img = capture.read()
cv2.rectangle(img, (300, 300), (100, 100), (255, 255, 255), 2)
crop_img = img[100:300, 100:300]
grayscale = cv2.cvtColor(crop_img, cv2.COLOR_BGR2GRAY)
value = (35, 35)
blurry = cv2.GaussianBlur(grayscale, value, 0)
_, thresh1 = cv2.threshold(blurry, 127, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)
cv2.imshow('Threshold', thresh1)
contours, hierarchy = cv2.findContours(thresh1.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
cont = max(contours, key=lambda item: cv2.contourArea(item))
x, y, w, h = cv2.boundingRect(cont)
cv2.rectangle(crop_img, (x, y), (w, h), (0, 0, 255), 2)
hull = cv2.convexHull(cont)
drawing = np.zeros(crop_img.shape, np.uint8)
cv2.drawContours(drawing, [cont], 0, (0, 255, 0), 0)
cv2.drawContours(drawing, [hull], 0, (0, 0, 255), 0)
hull = cv2.convexHull(cont, returnPoints=False)
defects = cv2.convexityDefects(cont, hull)
count_defects = 0
cv2.drawContours(thresh1, contours, -1, (0, 255, 0), 3)
start = ()
end = ()
far = ()
for i in range(defects.shape[0]):
s, e, d, f = defects[i, 0]
start = tuple(cont[s][0])
end = tuple(cont[e][0])
far = tuple(cont[f][0])
a = math.sqrt((end[0] - start[0])**2 + (end[1] - start[1])**2)
b = math.sqrt((far[0] - start[0])**2 + (far[1] - start[1])**2)
c = math.sqrt((end[0] - far[0])**2 + (end[1] - far[1])**2)
angle = math.acos((b**2 + c**2 - a**2)/(2*b*c)) * 57
if angle <= 120:
count_defects += 1
cv2.circle(crop_img, far, 1, (0, 0, 255), -1)
cv2.line(crop_img, start, end, (0, 255, 0), 2)
if count_defects == 1:
cv2.putText(img, '1 Finger', (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, 2)
elif count_defects == 2:
cv2.putText(img, '2 Fingers', (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, 2)
elif count_defects == 3:
cv2.putText(img, '3 Fingers', (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, 2)
elif count_defects == 4:
cv2.putText(img, '4 Fingers', (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, 2)
elif count_defects == 5:
cv2.putText(img, '5 Fingers', (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, 2)
else:
cv2.putText(img, 'Unknown gesture', (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, 2)
cv2.imshow('Gesture', img)
all_img = np.hstack((drawing, crop_img))
cv2.imshow('Contours', all_img)
k = cv2.waitKey(10)
if k == 27:
break
this shows an error while executing at line 50:
line 50, in <module>
far = tuple(cont[f][0])
IndexError: index 1635 is out of bounds for axis 0 with size 596
I am making a hand gesture recogniser, and it works properly on my friend's python. Please help, thanks

I never use it but in some examples I see different order s, e, f, d instead of s, e, d, f
s, e, f, d = defects[i, 0]
and now your code works for me.

Related

I have an eye blink detector that sends a keyboard press in python. Whenever I load games, the game dosen't detect the keyboard press but notepad does

This code SHOULD BE detecting when my eyes blink, then press space. In certain games (Like SCP:CB) It does not press space, but in notepad and chrome it does.
import cv2
import pyautogui
face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades +
"haarcascade_frontalface_default.xml")
eyes_cascade = cv2.CascadeClassifier(cv2.data.haarcascades +
"haarcascade_eye_tree_eyeglasses.xml")
first_read = True
cap = cv2.VideoCapture(0)
ret, image = cap.read()
while ret:
ret, image = cap.read()
gray_scale = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray_scale = cv2.bilateralFilter(gray_scale, 5, 1, 1)
faces = face_cascade.detectMultiScale(gray_scale, 1.3, 5, minSize=(200, 200))
if len(faces) > 0:
for (x, y, w, h) in faces:
image = cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)
eye_face = gray_scale[y:y + h, x:x + w]# image
eye_face_clr = image[y:y + h, x:x + w]# get the eyes
eyes = eyes_cascade.detectMultiScale(eye_face, 1.3, 5, minSize=(50, 50))
if len(eyes) >= 2:
if first_read:
cv2.putText(image, "Eye's detected, press s to check blink", (70, 70), cv2.FONT_HERSHEY_SIMPLEX,
1, (0, 255, 0), 2)
else:
cv2.putText(image, "Eye's Open", (70, 70), cv2.FONT_HERSHEY_SIMPLEX,
1, (255, 255, 255), 2)
else:
if first_read:
cv2.putText(image, "No Eye's detected", (70, 70),
cv2.FONT_HERSHEY_SIMPLEX,
1, (255, 255, 255), 2)
else:
cv2.putText(image, "Blink Detected.....!!!!", (70, 70),
cv2.FONT_HERSHEY_SIMPLEX,
1, (0, 255, 0), 2)
pyautogui.press("space")
cv2.imshow('image',image)
cv2.waitKey(1)
print("Blink Detected.....!!!!")
else:
cv2.putText(image, "No Face Detected.", (70, 70), cv2.FONT_HERSHEY_SIMPLEX,
1, (0, 255, 0), 2)
cv2.imshow('image', image)
a = cv2.waitKey(1)
if a == ord('q'):
break
elif a == ord('s'):
first_read = False
# release the web-cam
cap.release()
cv2.destroyAllWindows()
This is the code and what it SHOULD do is make a game detect when I blink, then press space. Kinda like "Before Your Eyes"

How do I fix openCV cvtColor error in my project?

I am building handwriting recognition project but I am getting this error of cvtColor while trying to change the image from BGR2HSV.
cap = cv2.VideoCapture(0)
Lower_blue = np.array([110, 50, 50])
Upper_blue = np.array([130, 255, 255])
pred_class = 0
pts = deque(maxlen = 512)
blackboard = np.zeros((480, 640, 3), dtype = np.uint8)
digit = np.zeros((200, 200, 3), dtype = np.uint8)
while(cap.isOpened()):
ret, img = cap.read()
img = cv2.flip(img, 1)
imgHSV = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(imgHSV, Lower_blue, Upper_blue)
blur = cv2.medianBlur(mask, 15)
blur = cv2.GaussianBlur(blur, (5, 5), 0)
thresh = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]
cnts = cv2.findContours(thresh.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)[1]
center = None
if len(cnts) >= 1:
contour = max(cnts, key = cv2.contourArea)
if cv2.contourArea(contour) > 250:
((x, y), radius) = cv2.minEnclosingCircle(contour)
cv2.circle(img, (int(x), int(y)), int(radius), (0, 255, 255), 2)
cv2.circle(img, center, 5, (0, 255, 255), -1)
M = cv2.moments(contour)
center = (int(M['m10'] / M['m00']), int(M['m01'] / M['m00']))
pts.appendleft(center)
for i in range (1, len(pts)):
if pts[i - 1] is None or pts[i] is None:
continue
cv2.line(blackboard, pts[i - 1], pts[i], (255, 255, 255), 10)
cv2.line(img, pts[i - 1], pts[i], (0, 0,255), 5)
elif len(cnts) == 0:
if len(pts) != []:
blackboard_gray = cv2.cvtColor(blackboard, cv2.COLOR_BGR2GRAY)
blur1 = cv2.medianBlur(blackboard_gray, 15)
blur1 = cv2.GaussianBlur(blur1, (5, 5), 0)
thresh1 = cv2.threshold(blur1, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]
blackboard_cnts = cv2.findContours(thresh1.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)[1]
if len(blackboard_cnts) >= 1:
cnt = max(blackboard_cnts, key = cv2.contourArea)
print(cv2.contourArea(cnt))
if cv2.contourArea(cnt) > 2000:
x, y, w, h = cv2.boundingRect(cnt)
digit = blackboard_gray[y:y + h, x:x + w]
#new Image = process_letter(digit)
pred_probab, pred_class = keras_predict(model1, digit)
print(pred_class, pred_probab)
pts = deque(maxlen = 512)
blackboard = np.zeros((480, 640, 3), dtype = uint8)
cv2.putText(img, "Conv Network : " + str(letter_count [pred_class]), (10, 470),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
cv2.imshow("Frame", img)
cv2.imshow("Contours", thresh)
k = cv2.waitkey(10)
if k == 27:
break
How do I fix this issue of OpenCV error? Please help!
I am getting following error:
error: OpenCV(3.4.1) C:\Miniconda3\conda-bld\opencv-suite_1533128839831\work\modules\imgproc\src\color.cpp:11214: error: (-215) (scn == 3 || scn == 4) && (depth == 0 || depth == 5) in function cv::cvtColor
I think following error: error: (-215) (scn == 3 || scn == 4) && (depth == 0 || depth == 5) in function cv::cvtColor is because of camera unable to capture frame correctly. Check this by printing ret value. It camera is able to capture frame correctly, it will return True, else False. Above error is happening because None is passed to cv2.cvtColor function. You can use below code as a safety check:
if ret is True:
# your code goes here
else:
break
Moreover, please consider below points:
It's cv2.waitKey(10), not cv2.waitkey(10)[Capital 'K']
cv2.findContours function return 3 values(i.e. image, contours and hierarchy) in OpenCV 3.x but just 2 values(i.e. contours, hierarchy) in openCV 4.x
len(pts) != [] doesn't make sense. Here, you are trying to compare a number to an empty list. Change it to len(pts) != 0.
What's letter_count inside cv2.putText function. Kindly recheck it.
In place of re-initializing deque(pts = deque(maxlen = 512)) in case of len(cnts)==0 and len(pts) != 0, you can use the old pts by simply clearing it using pts.clear(). Just a thought!
Also, add below code at the end:
cap.release()
cv2.destroyAllWindows()
Try below modified code(Tested using openCV v4.0.0):
import cv2
import numpy as np
from collections import deque
cap = cv2.VideoCapture(0)
Lower_blue = np.array([110, 50, 50])
Upper_blue = np.array([130, 255, 255])
pred_class = 0
pts = deque(maxlen = 512)
blackboard = np.zeros((480, 640, 3), dtype = np.uint8)
digit = np.zeros((200, 200, 3), dtype = np.uint8)
while(cap.isOpened()):
ret, img = cap.read()
img = cv2.flip(img, 1)
imgHSV = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(imgHSV, Lower_blue, Upper_blue)
blur = cv2.medianBlur(mask, 15)
blur = cv2.GaussianBlur(blur, (5, 5), 0)
thresh = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]
cnts = cv2.findContours(thresh.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)[0]
center = None
if len(cnts) >= 1:
contour = max(cnts, key = cv2.contourArea)
if cv2.contourArea(contour) > 250:
((x, y), radius) = cv2.minEnclosingCircle(contour)
cv2.circle(img, (int(x), int(y)), int(radius), (0, 255, 255), 2)
cv2.circle(img, center, 5, (0, 255, 255), -1)
M = cv2.moments(contour)
center = (int(M['m10'] / M['m00']), int(M['m01'] / M['m00']))
pts.appendleft(center)
# print(pts)
for i in range (1, len(pts)):
if pts[i - 1] is None or pts[i] is None:
print("Continue")
continue
cv2.line(blackboard, pts[i - 1], pts[i], (255, 255, 255), 10)
cv2.line(img, pts[i - 1], pts[i], (0, 0,255), 5)
cv2.imshow("Frame", img)
cv2.imshow("Contours", thresh)
# press q to stop
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
if len(pts) != 0:
blackboard_gray = cv2.cvtColor(blackboard, cv2.COLOR_BGR2GRAY)
blur1 = cv2.medianBlur(blackboard_gray, 15)
blur1 = cv2.GaussianBlur(blur1, (5, 5), 0)
thresh1 = cv2.threshold(blur1, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]
print('Hello')
blackboard_cnts = cv2.findContours(thresh1.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)[0]
if len(blackboard_cnts) >= 1:
cnt = max(blackboard_cnts, key = cv2.contourArea)
print(cv2.contourArea(cnt))
if cv2.contourArea(cnt) > 2000:
x, y, w, h = cv2.boundingRect(cnt)
digit = blackboard_gray[y:y + h, x:x + w]
#new Image = process_letter(digit)
#pred_probab, pred_class = keras_predict(model1, digit)
print(digit.shape)
#pts = deque(maxlen = 512)
pts.clear()
blackboard = np.zeros((480, 640, 3), dtype = np.uint8)
#cv2.putText(img, "Conv Network : " + str(letter_count[pred_class]), (10, 470), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
cv2.putText(img, "Conv Network :", (10, 470), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
cv2.imshow("Frame", img)
cv2.imshow("Contours", thresh)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()

"no module named imutils" when it says imutils is installed?

I want to run this code on my Raspberry Pi 3. I have used pip install imutils on the Pi but, when I run the code via the CLI, it returns "No module named imutils". I do not wish to use virtual environments. I have cv2 running correctly on the Pi and that works no problem, is there a fix for this imutils problem?
Updating, upgrading, removing imutils but it is needed.
import numpy as np
import cv2
import Person
import time
import imutils
import datetime
cap = cv2.VideoCapture('testVideo.mp4')
fgbg = cv2.createBackgroundSubtractorMOG2(detectShadows=True) # Create the background substractor
kernelOp = np.ones((3, 3), np.uint8)
kernelOp1 = np.ones((7, 7), np.uint8)
kernelOp2 = np.ones((5, 5), np.uint8)
kernelCl = np.ones((11, 11), np.uint8)
kernelCl1 = np.ones((20, 20), np.uint8)
kernelCl2 = np.ones((25, 25), np.uint8)
# Variables
font = cv2.FONT_HERSHEY_SIMPLEX
persons = []
max_p_age = 5
pid = 1
areaTH = 5000
w_margin = 50
h_margin = 50
wmax = 500
import pdb;
pdb.set_trace() # debuginimo pradzia
# Atvaizdavimo kintamieji
cnt_up = 0
cnt_down = 0
line_down_color = (255, 0, 0)
line_up_color = (0, 0, 255)
pts_L1 = np.array([[0, 320], [480, 320]])
pts_L2 = np.array([[0, 400], [480, 400]])
counter = 0
while (cap.isOpened()):
ret, frame = cap.read() # read a frame
frame = imutils.resize(frame, width=min(640, frame.shape[1]))
fgmask = fgbg.apply(frame) # Use the substractor
try:
ret, imBin = cv2.threshold(fgmask, 200, 255, cv2.THRESH_BINARY)
mask0 = cv2.morphologyEx(imBin, cv2.MORPH_OPEN, kernelOp2)
mask = cv2.morphologyEx(mask0, cv2.MORPH_CLOSE, kernelCl2)
except:
# if there are no more frames to show...
print('EOF')
break
maskOriginal = mask
_, contours0, hierarchy = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
########if contour is too big cut in half
mask2_flag = 0
for cnt in contours0:
area = cv2.contourArea(cnt)
if area > areaTH:
M = cv2.moments(cnt)
cx = int(M['m10'] / M['m00'])
cy = int(M['m01'] / M['m00'])
x, y, w, h = cv2.boundingRect(cnt)
if w > wmax:
mask2 = cv2.line(mask, ((x + w / 2), 0), ((x + w / 2), 640), (0, 0, 0), 10)
mask2_flag = 1
if mask2_flag == 0:
mask2 = mask
cv2.imshow('Mask line', mask2)
cv2.imshow('mask to open', mask0)
cv2.imshow('Mask initialize', maskOriginal)
cv2.imshow('initial subtraction', imBin)
_, contours0, hierarchy = cv2.findContours(mask2, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
for cnt in contours0:
cv2.drawContours(frame, cnt, -1, (0, 255, 0), 3, 8)
area = cv2.contourArea(cnt)
for i in persons:
i.updateDingimas(i.getDingimas() + 1)
if i.getDingimas() > 25:
persons.remove(i)
if area > areaTH:
M = cv2.moments(cnt)
cx = int(M['m10'] / M['m00'])
cy = int(M['m01'] / M['m00'])
x, y, w, h = cv2.boundingRect(cnt)
print('x{} y{} w{} h{}'.format(x, y, w, h))
new = True
for i in persons:
if abs(x - i.getX()) <= w_margin and abs(y - i.getY()) <= h_margin:
new = False
i.updateCoords(cx, cy)
i.updateDingimas(0)
break
if new == True:
p = Person.MyPerson(pid, cx, cy, max_p_age)
persons.append(p)
pid += 1
cv2.circle(frame, (cx, cy), 5, (0, 0, 255), -1)
img = cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
cv2.drawContours(frame, cnt, -1, (0, 255, 0), 3)
cv2.imshow('img', img)
#########################
# Trajectory rendering
#########################
for i in persons:
if len(i.getTracks()) >= 2:
pts = np.array(i.getTracks(), np.int32)
pts = pts.reshape((-1, 1, 2))
frame = cv2.polylines(frame, [pts], False, i.getRGB())
if i.getDir() == None:
i.kurEina(pts_L2[0, 1], pts_L1[0, 1])
if i.getDir() == 'up':
cnt_up += 1
print('Timestamp: {:%H:%M:%S} UP {}'.format(datetime.datetime.now(), cnt_up))
elif i.getDir() == 'down':
cnt_down += 1
print('Timestamp: {:%H:%M:%S} DOWN {}'.format(datetime.datetime.now(), cnt_down))
cv2.putText(frame, str(i.getId()), (i.getX(), i.getY()), font, 0.7, i.getRGB(), 1, cv2.LINE_AA)
#########################
# Rendering
#########################
str_in = 'In: ' + str(cnt_up)
str_out = 'Out: ' + str(cnt_down)
frame = cv2.polylines(frame, [pts_L1], False, line_down_color, thickness=4)
frame = cv2.polylines(frame, [pts_L2], False, line_up_color, thickness=4)
cv2.putText(frame, str_in, (10, 50), font, 1, (0, 0, 255), 2, cv2.LINE_AA)
cv2.putText(frame, str_out, (10, 100), font, 1, (255, 0, 0), 2, cv2.LINE_AA)
cv2.imshow('Frame', frame)
# Abort and exit with 'Q' or ESC
k = cv2.waitKey(30) & 0xff
if k == 27:
break
cap.release() # release video file
cv2.destroyAllWindows() # close all openCV windows
I want to run this code without "No module named imutils" error.
If you intend to use the module with Python 3, you need to install it with pip3 so that it is installed in the correct location.

Control Mouse using Gesture

When I run the code below, I get the following error:
Traceback (most recent call last):
File "path\gesture_mouse.py", line 63, in
cv2.circle(img, (cx, cy), (w+h)/4, (0, 0, 255), 2)
TypeError: integer argument expected, got float
[ WARN:0] terminating async callback
I tried assigning the center and the radius separately pre-defined.
import cv2
import numpy as np
from pynput.mouse import Button, Controller
import wx
mouse = Controller()
app = wx.App(False)
(sx, sy) = wx.GetDisplaySize()
(camx, camy) = (320, 240)
lowerBound = np.array([33, 80, 40])
upperBound = np.array([102, 255, 255])
cam = cv2.VideoCapture(0)
kernelOpen = np.ones((5, 5))
kernelClose = np.ones((20, 20))
pinchFlag = 0
while True:
ret, img = cam.read()
img=cv2.resize(img, (340, 220))
# convert BGR to HSV
imgHSV = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
# create the Mask
mask = cv2.inRange(imgHSV, lowerBound, upperBound)
# morphology
maskOpen = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernelOpen)
maskClose = cv2.morphologyEx(maskOpen, cv2.MORPH_CLOSE, kernelClose)
maskFinal = maskClose
conts, h = cv2.findContours(maskFinal.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
if len(conts) == 2:
if pinchFlag == 1:
pinchFlag = 0
mouse.release(Button.left)
x1, y1, w1, h1 = cv2.boundingRect(conts[0])
x2, y2, w2, h2 = cv2.boundingRect(conts[1])
cv2.rectangle(img, (x1, y1), (x1+w1, y1+h1), (255, 0, 0), 2)
cv2.rectangle(img, (x2, y2), (x2+w2, y2+h2), (255, 0, 0), 2)
cx1 = x1+w1/2
cy1 = y1+h1/2
cx2 = x2+w2/2
cy2 = y2+h2/2
cx = (cx1+cx2)/2
cy = (cy1+cy2)/2
cv2.line(img, (cx1, cy1), (cx2, cy2), (255, 0, 0), 2)
cv2.circle(img, (cx, cy), 2, (0, 0, 255), 2)
mouseLoc = (sx-(cx*sx/camx), cy*sy/camy)
mouse.position = mouseLoc
while mouse.position != mouseLoc:
pass
elif len(conts) == 1:
x, y, w, h = cv2.boundingRect(conts[0])
if pinchFlag == 0:
pinchFlag = 1
mouse.press(Button.left)
cv2.rectangle(img, (x, y), (x+w, y+h), (255, 0, 0), 2)
cx = x+w/2
cy = y+h/2
cv2.circle(img, (cx, cy), (w+h)/4, (0, 0, 255), 2)
mouseLoc = (sx-(cx*sx/camx), cy*sy/camy)
mouse.position = mouseLoc
while mouse.position != mouseLoc:
pass
cv2.imshow("cam", img)
cv2.waitKey(5)
I was expecting that when two green color objects appear in the frame they will be scribed in a blue box, and a blue line from the center of the two box will appear where a red dot will be present signifying the location of the cursor(mouse).
Like the error says, the function needs int's as input, but you input floats. This is because the values are the result of division calculations.
print(type(10)) #<class 'int'>
print(type(5)) #<class 'int'>
print(type(10/5)) #<class 'float'>
print(10/5) #2.0
The solution is the convert the calculated values to integers:
cv2.circle(img, (int(cx), int(cy), int((w+h)/4), (0, 0, 255), 2)

Python - Face Recognition TypeError: 'int' object is not iterable

I am trying to implement a magic mirror facial recognition module for a project. When I run the tester on the file I am getting an error, I cannot seem to resolve it.
This is what displays in terminal:
Loading training data...
ALGORITHM: LBPH
Training data loaded!
Picam selected...
Traceback (most recent call last):
File "facerecognition.py", line 61, in <module>
label, confidence = model.predict(crop)
TypeError: 'int' object is not iterable
My picamera stays active but i can see another image which is my face in the background, Below is my code
import cv2 # OpenCV Library
import lib.face as face
import lib.config as config
import time
import os
# Load training data into model
print('Loading training data...')
if config.RECOGNITION_ALGORITHM == 1:
print "ALGORITHM: LBPH"
model = cv2.face.createLBPHFaceRecognizer(threshold=config.POSITIVE_THRESHOLD)
elif config.RECOGNITION_ALGORITHM == 2:
print "ALGORITHM: Fisher"
model = cv2.createFisherFaceRecognizer(threshold=config.POSITIVE_THRESHOLD)
else:
print "ALGORITHM: Eigen"
model = cv2.createEigenFaceRecognizer(threshold=config.POSITIVE_THRESHOLD)
model.load("training.xml")
print('Training data loaded!')
camera = config.get_camera()
time.sleep(1) # give the camera a second to warm up
while True:
# camera video feed
frame = camera.read()
image = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
faces = face.detect_face(image)
if faces is not None:
for i in range(0, len(faces)):
x, y, w, h = faces[i]
x_face = x
y_face = y
if config.RECOGNITION_ALGORITHM == 1:
crop = face.crop(image, x, y, w, h)
else:
crop = face.resize(face.crop(image, x, y, w, h))
label, confidence = model.predict(crop)
cv2.rectangle(frame, (x, y), (x + w, y + h), 255)
cv2.putText(frame, str(h), (x + w, y + h + 15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1)
if (label != -1 and label != 0):
# If person is close to the camera use smaller POSITIVE_THRESHOLD
if h > 190 and confidence < config.POSITIVE_THRESHOLD:
cv2.putText(frame, config.users[label - 1], (x - 3, y - 8), cv2.FONT_HERSHEY_SIMPLEX, 1.5, (255, 255, 255), 1)
cv2.putText(frame, str(confidence), (x - 2, y + h + 15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1)
print('User:' + config.users[label - 1])
# If person is further away from the camera but POSITIVE_THRESHOLD is still under 40 assume it is the person
elif h <= 190 and confidence < config.POSITIVE_THRESHOLD:
cv2.putText(frame, config.users[label - 1], (x - 3, y - 8), cv2.FONT_HERSHEY_SIMPLEX, 1.5, (255, 255, 255), 1)
cv2.putText(frame, str(confidence), (x - 2, y + h + 15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1)
print('User:' + config.users[label - 1])
# If person is further away from the camera be a bit more generous with the POSITIVE_THRESHOLD and add a not sure statement
elif h < 190:
cv2.putText(frame, "Guess: " + config.users[label - 1], (x - 3, y - 8), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1)
cv2.putText(frame, str(confidence), (x - 2, y + h + 15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1)
print('Guess:' + config.users[label - 1])
else:
cv2.putText(frame, "Unknown", (x, y - 5), cv2.FONT_HERSHEY_SIMPLEX, 1.5, (255, 255, 255), 1)
print('Unknown face')
else:
cv2.putText(frame, "Unknown", (x, y - 5), cv2.FONT_HERSHEY_SIMPLEX, 1.5, (255, 255, 255), 1)
print('Unknown face')
# If person is close enough
if h > 250:
eyes = face.detect_eyes(face.crop(image, x, y, w, h))
for i in range(0, len(eyes)):
x, y, w, h = eyes[i]
cv2.rectangle(frame, (x + x_face, y + y_face - 30), (x + x_face + w + 10, y + y_face + h - 40), (94, 255, 0))
cv2.putText(frame, "Eye " + str(i), (x + x_face, y + y_face - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1)
if ('DISPLAY' in os.environ):
# Display Image
cv2.imshow('Facial recognition', frame)
k = cv2.waitKey(1) & 0xFF
if k == ord('q'):
break
else:
print('writing face.jpg image')
cv2.imwrite('face.jpg',frame)
camera.stop()
break
# Release camera and close windows
camera.stop()
cv2.destroyAllWindows()
Any help would be appreciated, Can anyone help with this, I am really stuck.

Categories

Resources