Related
This is the error I'm getting right now and I just can't figure out what the problem is.
INFO: Created TensorFlow Lite XNNPACK delegate for CPU.
Traceback (most recent call last):
File "C:\AIproject\virMouse.py", line 18, in <module>
lmList, bbox = detector.findPosition(img)
ValueError: not enough values to unpack (expected 2, got 0)
Process finished with exit code 1
Here is my code for the virtual mouse program that I have written so far.
import cv2
import numpy as np
import time
import HTmodule as htm
import autopy
wCam, hCam = 640, 480
cap = cv2.VideoCapture(0)
cap.set(3, wCam)
cap.set(4, hCam)
pTime = 0
detector = htm.handDetector(maxHands=1)
while True:
success, img = cap.read()
img = detector.findHands(img)
lmList, bbox = detector.findPosition(img)
cTime = time.time()
fps = 1 / (cTime - pTime)
pTime = cTime
cv2.putText(img, f'FPS:{(int(fps))}', (5, 30), cv2.FONT_HERSHEY_DUPLEX, 1, (204, 0, 0), 2)
cv2.imshow("Image", img)
cv2.waitKey(1)
And here is the code for the hand tracking module that I wrote.
import cv2
import mediapipe as mp
import time
class handDetector():
def __init__(self, mode=False, maxHands=2,modelC=1, detectionCon=0.5, trackCon=0.5):
self.mode = mode
self.maxHands = maxHands
self.modelC = modelC
self.detectionCon = detectionCon
self.trackCon = trackCon
self.mpHands = mp.solutions.hands
self.hands = self.mpHands.Hands(self.mode, self.maxHands, self.modelC, self.detectionCon, self.trackCon)
self.mpDraw = mp.solutions.drawing_utils
def findHands(self, img, draw=True):
imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
self.results = self.hands.process(imgRGB)
# print(result.multi_hand_landmarks)
if self.results.multi_hand_landmarks:
for handLms in self.results.multi_hand_landmarks:
if draw:
self.mpDraw.draw_landmarks(img, handLms, self.mpHands.HAND_CONNECTIONS)
return img
def findPosition(self, img, handNo=0, draw=True):
lmList=[]
if self.results.multi_hand_landmarks:
myHand = self.results.multi_hand_landmarks[handNo]
for id, lm in enumerate(myHand.landmark):
# print(id,lm)
h, w, c = img.shape
cx, cy = int(lm.x * w), int(lm.y * h)
# print(id, cx, cy)
lmList.append([id, cx, cy])
# if id == 0:
if draw:
cv2.circle(img, (cx, cy), 3, (229, 25, 66), cv2.FILLED)
return lmList
def main():
pTime = 0
cTime = 0
cap = cv2.VideoCapture(0)
detector = handDetector()
while True:
success, img = cap.read()
img = detector.findHands(img)
lmList = detector.findPosition(img, draw=False)
# print(lmList) # PRINTS THE LIST OF LANDMARKS
if len(lmList) != 0:
print(lmList[4])
cTime = time.time()
fps = 1 / (cTime - pTime)
pTime = cTime
cv2.putText(img, f'FPS:{(int(fps))}', (5, 30), cv2.FONT_HERSHEY_DUPLEX, 1, (9, 9, 255), 1)
cv2.imshow("Image", img)
cv2.waitKey(1)
if __name__ == "__main__":
main()
I don't think it has anything to do with autopy I think it has something to do with findPosition() function but I can't figure out what it is. I just started studying computer vision with python and decided to make this project for my practice but this error I can't seem to debug. Any kind of help would be greatly appreciated. Thankyou very much.
I have some trouble running the module on python 3.10.1. This is my code:
import mediapipe as mp
import cv2
import time
class handDetector:
def __init__(self, mode=False, maxHands=2, complexity = 1, detectionCon=0.5, trackCon=0.5):
self.mode = mode
self.maxHands = maxHands
self.complexity = complexity
self.detectionCon = detectionCon
self.trackCon = trackCon
self.mpDraw = mp.solutions.drawing_utils
def FindHands(self, img, draw = True):
imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
results = self.hands.process(imgRGB)
landmarks = results.multi_hand_landmarks
#print(results.multi_hand_landmarks)
if landmarks:
for handLms in landmarks:
if draw:
self.mpDraw.draw_landmarks(img,
handLms,
self.mpHands.HAND_CONNECTIONS)
return img
#for id, lm in enumerate(handLms.landmark):
#print(id,lm)
# height, width, c = img.shape
# cx, cy = int(lm.x*width), int(lm.y*height)
# print(id, ", x=",cx, ", y=",cy)
# if id%10 == 0:
# cv2.circle(img, (cx,cy), 8, (255,0,255), cv2.FILLED)
def main():
pTime = 0
cTime = 0
cap = cv2.VideoCapture(1)
detector = handDetector()
while True:
success, img = cap.read()
img = detector.FindHands(img)
cTime = time.time()
fps = 1/(cTime-pTime)
pTime = cTime
cv2.putText(img, str(int(fps)), (10,70),
cv2.FONT_HERSHEY_PLAIN, 3,(255,0,255), 3)
cv2.imshow("Image",img)
cv2.waitKey(1)
if __name__ == '__main__':
main()
It returns me the following traceback:
Traceback (most recent call last):
File "C:\Users\Eduardo.PC\Documents\UNAM 2020-24\COMPU\HandTracker\HandTrackModule.py", line 65, in <module>
main()
File "C:\Users\Eduardo.PC\Documents\UNAM 2020-24\COMPU\HandTracker\HandTrackModule.py", line 55, in main
img = detector.FindHands(img)
File "C:\Users\Eduardo.PC\Documents\UNAM 2020-24\COMPU\HandTracker\HandTrackModule.py", line 21, in FindHands
imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
cv2.error: OpenCV(4.5.4) D:\a\opencv-python\opencv-python\opencv\modules\imgproc\src\color.cpp:182: error: (-215:Assertion failed) !_src.empty() in function 'cv::cvtColor'
I'm following a youtube online course for computer vision linked here. At around the 30 minute mark, the module presented above is added. I pretty much copied it and it doesn't work. What's happening?
Thanks and sorry for the long post
Try this
import mediapipe as mp
import cv2
import time
class handDetector:
def __init__(self, mode=False, maxHands=2, complexity = 1, detectionCon=0.5, trackCon=0.5):
self.mode = mode
self.maxHands = maxHands
self.complexity = complexity
self.detectionCon = detectionCon
self.trackCon = trackCon
self.mpDraw = mp.solutions.drawing_utils
def FindHands(self, img, draw = True):
imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
results = self.hands.process(imgRGB)
landmarks = results.multi_hand_landmarks
#print(results.multi_hand_landmarks)
if landmarks:
for handLms in landmarks:
if draw:
self.mpDraw.draw_landmarks(img,
handLms,
self.mpHands.HAND_CONNECTIONS)
return img
#for id, lm in enumerate(handLms.landmark):
#print(id,lm)
# height, width, c = img.shape
# cx, cy = int(lm.x*width), int(lm.y*height)
# print(id, ", x=",cx, ", y=",cy)
# if id%10 == 0:
# cv2.circle(img, (cx,cy), 8, (255,0,255), cv2.FILLED)
def main():
pTime = 0
cTime = 0
cap = cv2.VideoCapture(1)
detector = handDetector()
while True:
success, img = cap.read()
if not success:
break
img = detector.FindHands(img)
cTime = time.time()
fps = 1/(cTime-pTime)
pTime = cTime
cv2.putText(img, str(int(fps)), (10,70),
cv2.FONT_HERSHEY_PLAIN, 3,(255,0,255), 3)
cv2.imshow("Image",img)
cv2.waitKey(1)
if __name__ == '__main__':
main()
Hey everyone I keep getting an error when I run this code. I wrote this code from a online YouTube video about hand detection (Mediapipe), in the video there was no issue so I just assumed I missed something I have spent several hours trying to research and find the seemingly small error but being a beginner I can seem to get it, if you can please help me out here.
Video -> https://www.youtube.com/watch?v=NZde8Xt78Iw
Thank you
--------------------------------Code Below------------------------------------------------
import cv2
import mediapipe as mp
import time
class handDetector:
def __init__(self, mode=False, maxHands=2, detectionCon=0.5, trackCon=0.5):
self.mode = mode
self.maxHands = maxHands
self.detectionCon = detectionCon
self.trackCon = trackCon
self.mpHands = mp.solutions.hands
self.hands = self.mpHands.Hands(self.mode, self.maxHands,
self.detectionCon, self.trackCon)
self.mpDraw = mp.solutions.drawing_utils
def findHands(self, img, draw=True):
imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
self.results = self.hands.process(imgRGB)
# print(results.multi_hand_landmarks)
if self.results.multi_hand_landmarks:
for handLms in self.results.multi_hand_landmarks:
if draw:
self.mpDraw.draw_landmarks(img, handLms,
self.mpHands.HAND_CONNECTIONS)
return img
def findPosition(self, img, handNo=0, draw=True):
lmList = []
if self.results.multi_hand_landmarks:
myHand = self.results.multi_hand_landmarks(handNo)
for id, lm in enumerate(myHand.landmark):
# print(id, lm)
h, w, c = img.shape
cx, cy = int(lm.x * w), int(lm.y * h)
# print(id, cx, cy)
lmList.append(id, cx, cy)
if draw:
cv2.circle(img, (cx, cy), 15, (255, 0, 255), cv2.FILLED)
return lmList
def main():
pTime = 0
cTime = 0
cap = cv2.VideoCapture(1)
detector = handDetector
while True:
success, img = cap.read()
img = detector.findHands(img)
lmList = detector.findPosition(img)
if len(lmList) != 0:
print(lmList[4])
cTime = time.time()
fps = 1 / (cTime - pTime)
pTime = cTime
cv2.putText(img, str(int(fps)), (10, 70), cv2.FONT_HERSHEY_PLAIN, 3, (255, 0, 255), 3)
cv2.imshow("Image", img)
cv2.waitKey(1)
if __name__ == "__main__":
main()
---------------------Error Code Below------------------------
C:\Users\kimsa\PycharmProjects\ROBO\venv\Scripts\python.exe C:/Users/kimsa/PycharmProjects/ROBO/HandTrackingModule.py
[ WARN:0] global D:\a\opencv-python\opencv-python\opencv\modules\videoio\src\cap_msmf.cpp (438) `anonymous-namespace'::SourceReaderCB::~SourceReaderCB terminating async callback
Traceback (most recent call last):
File "C:/Users/kimsa/PycharmProjects/ROBO/HandTrackingModule.py", line 70, in <module>
main()
File "C:/Users/kimsa/PycharmProjects/ROBO/HandTrackingModule.py", line 54, in main
img = detector.findHands(img)
TypeError: findHands() missing 1 required positional argument: 'img'
Process finished with exit code 1
I am getting the following error whenever I try to run my code, I am following a tutorial for hand tracking, I followed the steps correctly but I still to have some sort of error.
Link to Video: https://www.youtube.com/watch?v=01sAkU_NvOY&t=2100s
Traceback (most recent call last):
File "C:/Users/aryan/Desktop/user/Study/Com_vis/Hand_T_M.py", line 56, in <module>
main()
File "C:/Users/aryan/Desktop/user/Study/Com_vis/Hand_T_M.py", line 41, in main
detector = handDetector()
File "C:/Users/aryan/Desktop/user/Study/Com_vis/Hand_T_M.py", line 12, in __init__
self.detectionCon, self.trackCon)
File "C:\Users\aryan\Desktop\user\Study\Com_vis\venv\lib\site-packages\mediapipe\python\solutions\hands.py", line 127, in __init__
outputs=['multi_hand_landmarks', 'multi_handedness'])
File "C:\Users\aryan\Desktop\user\Study\Com_vis\venv\lib\site-packages\mediapipe\python\solution_base.py", line 260, in __init__
for name, data in (side_inputs or {}).items()
File "C:\Users\aryan\Desktop\user\Study\Com_vis\venv\lib\site-packages\mediapipe\python\solution_base.py", line 260, in <dictcomp>
for name, data in (side_inputs or {}).items()
File "C:\Users\aryan\Desktop\user\Study\Com_vis\venv\lib\site-packages\mediapipe\python\solution_base.py", line 513, in _make_packet
return getattr(packet_creator, 'create_' + packet_data_type.value)(data)
TypeError: create_int(): incompatible function arguments. The following argument types are supported:
1. (arg0: int) -> mediapipe.python._framework_bindings.packet.Packet
Invoked with: 0.5
Process finished with exit code 1
I am unable to figure out the problem with the code. I am running python 3.7.9, and here is my code.
Filename: Hand_T_M.py
import cv2
import mediapipe as mp
import time
class handDetector():
def __init__(self, mode=False, maxHands=2, detectionCon=0.5, trackCon=0.5):
self.mode = mode
self.maxHands = maxHands
self.detectionCon = detectionCon
self.trackCon = trackCon
self.mpHands = mp.solutions.hands
self.hands = self.mpHands.Hands(self.mode, self.maxHands,
self.detectionCon, self.trackCon)
self.mpDraw = mp.solutions.drawing_utils
def findHands(self, img, draw=True):
imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
self.results = self.hands.process(imgRGB)
# print(results.multi_hand_landmarks)
if self.results.multi_hand_landmarks:
for handLms in self.results.multi_hand_landmarks:
if draw:
self.mpDraw.draw_landmarks(img, handLms,
self.mpHands.HAND_CONNECTIONS)
return img
def findPosition(self, img, handNo=0, draw=True):
lmList = []
if self.results.multi_hand_landmarks:
myHand = self.results.multi_hand_landmarks[handNo]
for id, lm in enumerate(myHand.landmark):
# print(id, lm)
h, w, c = img.shape
cx, cy = int(lm.x * w), int(lm.y * h)
# print(id, cx, cy)
lmList.append([id, cx, cy])
if draw:
cv2.circle(img, (cx, cy), 15, (255, 0, 255), cv2.FILLED)
return lmList
def main():
pTime = 0
cTime = 0
cap = cv2.VideoCapture(0)
detector = handDetector()
while True:
success, img = cap.read()
img = detector.findHands(img)
lmList = detector.findPosition(img)
if len(lmList) != 0:
print(lmList[4])
cTime = time.time()
fps = 1 / (cTime - pTime)
pTime = cTime
cv2.putText(img, str(int(fps)), (10, 70), cv2.FONT_HERSHEY_PLAIN, 3,
(255, 0, 255), 3)
cv2.imshow("Image", img)
cv2.waitKey(1)
if __name__ == "__main__":
main()
With the new framework for mediapipe you have to write:
self.complexity = complexity
self.hands = self.mpHands.Hands(self.mode, self.maxHands, self.complexity, self.detectionCon, self.trackCon)
you must replace your 'init' method to this:
class handDetector():
def __init__(self, mode=False, maxHands=3, detectionCon=False, trackCon = 0.5):
self.mode = mode
self.maxHands = maxHands
self.complexity = complexity
self.detectionCon = detectionCon
self.trackCon = trackCon
self.mpHands = mp.solutions.hands
self.hands = self.mpHands.Hands(self.mode, self.maxHands,self.complexity, self.detectionCon, self.trackCon)
self.mpDraw = mp.solutions.drawing_utils
Thanks.
You need to use correct the mediapipe, see also this GitHub source code:
from mediapipe.python._framework_bindings import packet
Go to PyCharm File->Settings->Project: YOUR-PROJECT -> Click to Python Interpreter
Uninstall current mediapipe package
Install mediapipe previous version (in my case it is 0.8.8)
Alternatively, if you can do it with pip command:
pip uninstall mediapipe
pip install mediapipe==0.8.8
Maybe you can try this code.
class handDetector():
def __init__(self, mode=False, maxHands=3,complexity=1, detectionCon=0.5, trackCon = 0.5):
self.mode = mode
self.maxHands = maxHands
self.complexity = complexity
self.detectionCon = detectionCon
self.trackCon = trackCon
self.mpHands = mp.solutions.hands
self.hands = self.mpHands.Hands(self.mode, self.maxHands,self.complexity, self.detectionCon, self.trackCon)
self.mpDraw = mp.solutions.drawing_utils
1.pip uninstall mediapipe
2.pip install mediapipe==0.8.3.1
This may solve your problem
I've Been Recently Learning Computer Vision using python, and when making a hand detector project, I encountered this error :-
Traceback (most recent call last):
File "c:\Users\idhant\OneDrive - 007lakshya\Idhant\Programming\Projects\MY MACHINE
LEARNING PROJECTS\Hand Tracking Module.py", line 64, in <module>
main()
File "c:\Users\idhant\OneDrive - 007lakshya\Idhant\Programming\Projects\MY MACHINE
LEARNING PROJECTS\Hand Tracking Module.py", line 41, in main
detector = handDetector()
File "c:\Users\idhant\OneDrive - 007lakshya\Idhant\Programming\Projects\MY MACHINE
LEARNING PROJECTS\Hand Tracking Module.py", line 13, in __init__
self.hands = self.mpHands.Hands(self.mode, self.maxHands, self.detectionCon,
self.trackCon)
File "C:\Users\idhant\AppData\Roaming\Python\Python39\site-
packages\mediapipe\python\solutions\hands.py", line 114, in __init__
super().__init__(
File "C:\Users\idhant\AppData\Roaming\Python\Python39\site-
packages\mediapipe\python\solution_base.py", line 258, in __init__
self._input_side_packets = {
File "C:\Users\idhant\AppData\Roaming\Python\Python39\site-
packages\mediapipe\python\solution_base.py", line 259, in <dictcomp>
name: self._make_packet(self._side_input_type_info[name], data)
File "C:\Users\idhant\AppData\Roaming\Python\Python39\site-
packages\mediapipe\python\solution_base.py", line 513, in _make_packet
return getattr(packet_creator, 'create_' + packet_data_type.value)(data)
TypeError: create_int(): incompatible function arguments. The following argument types
are supported:
1. (arg0: int) -> mediapipe.python._framework_bindings.packet.Packet
Invoked with: 0.5
[ WARN:0] global D:\a\opencv-python\opencv-
python\opencv\modules\videoio\src\cap_msmf.cpp (438) `anonymous-
namespace'::SourceReaderCB::~SourceReaderCB terminating async callback
And I tried Very Much Debugging it, But Not Successfull :(, So Please Help me, Here's The Code That I've Written:-
import cv2
import mediapipe as mp
import time
class handDetector():
def __init__(self, mode=False, maxHands = 2, detectionCon=0.5, trackCon = 0.5):
self.mode = mode
self.maxHands = maxHands
self.detectionCon = detectionCon
self.trackCon = trackCon
self.mpHands = mp.solutions.hands
self.hands = self.mpHands.Hands(self.mode, self.maxHands, self.detectionCon,
self.trackCon)
self.mpDraw = mp.solutions.drawing_utils
def findHands(self, img, draw=True):
imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
results = self.hands.process(imgRGB)
# print(results.multi_hand_landmarks)
if results.multi_hand_landmarks:
for handLms in results.multi_hand_landmarks:
if draw:
self.mpDraw.draw_landmarks(img, handLms, self.mpHands.HAND_CONNECTIONS)
return img
# for id, lm in enumerate(handLms.landmark):
# # print(id, lm)
# h, w, c = img.shape
# cx, cy = int(lm.x*w), int(lm.y*h)
# print(id, cx, cy)
# # if id == 4:
# cv2.circle(img, (cx, cy), 15, (255,0,255), cv2.FILLED)
def main():
pTime = 0
cTime = 0
cap = cv2.VideoCapture(0)
detector = handDetector()
while True:
success, img = cap.read()
img = detector.findHands(img)
cTime = time.time()
fps = 1/(cTime-pTime)
pTime = cTime
cv2.putText(img, str(int(fps)),(10, 70), cv2.FONT_HERSHEY_COMPLEX, 3, (255,0,255),3)
cv2.imshow("Image", img)
cv2.waitKey(1)
if __name__ == "__main__":
main()
I've Tried Making A Class of Hand Detector, Which Does the same thing to detect hand but we can also use it in our other files, That's why I've written this code, and encountered this issue!
In the def __init__(), at the code:
self.hands = self.mpHands.Hands(self.mode, self.maxHands, self.detectionCon, self.trackCon)
try adding model complexity for the third parameter in the Hands() as below:
self.hands = self.mpHands.Hands(self.mode, self.maxHands, self.modelComplex, self.detectionCon, self.trackCon)
So a total of five parameters in the self.mpHands.Hands()
Here is my full code that works for me:
class handDetector():
def __init__(self, mode=False, maxHands=1, modelComplexity=1, detectionCon=0.5, trackCon=0.5):
self.mode = mode
self.maxHands = maxHands
self.modelComplex = modelComplexity
self.detectionCon = detectionCon
self.trackCon = trackCon
self.mpHands = mp.solutions.hands
self.hands = self.mpHands.Hands(self.mode, self.maxHands, self.modelComplex,
self.detectionCon, self.trackCon)
I was facing the same issue, just add model_complexity to your init function and you are good to go:
def __init__(self, mode=False, model_complexity=1, upBody=False, smooth=True, detectionCon=0.5, trackCon=0.5):
You need to assign one more parameter in the __init__ method of the handDetector() class.
Your complete code may look like:
import cv2
import mediapipe as mp
import time
# class creation
class handDetector():
def __init__(self, mode=False, maxHands=2, detectionCon=0.5,modelComplexity=1,trackCon=0.5):
self.mode = mode
self.maxHands = maxHands
self.detectionCon = detectionCon
self.modelComplex = modelComplexity
self.trackCon = trackCon
self.mpHands = mp.solutions.hands
self.hands = self.mpHands.Hands(self.mode, self.maxHands,self.modelComplex,
self.detectionCon, self.trackCon)
self.mpDraw = mp.solutions.drawing_utils # it gives small dots onhands total 20 landmark points
def findHands(self,img,draw=True):
# Send rgb image to hands
imgRGB = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
self.results = self.hands.process(imgRGB) # process the frame
# print(results.multi_hand_landmarks)
if self.results.multi_hand_landmarks:
for handLms in self.results.multi_hand_landmarks:
if draw:
#Draw dots and connect them
self.mpDraw.draw_landmarks(img,handLms,
self.mpHands.HAND_CONNECTIONS)
return img
def findPosition(self,img, handNo=0, draw=True):
"""Lists the position/type of landmarks
we give in the list and in the list ww have stored
type and position of the landmarks.
List has all the lm position"""
lmlist = []
# check wether any landmark was detected
if self.results.multi_hand_landmarks:
#Which hand are we talking about
myHand = self.results.multi_hand_landmarks[handNo]
# Get id number and landmark information
for id, lm in enumerate(myHand.landmark):
# id will give id of landmark in exact index number
# height width and channel
h,w,c = img.shape
#find the position
cx,cy = int(lm.x*w), int(lm.y*h) #center
# print(id,cx,cy)
lmlist.append([id,cx,cy])
# Draw circle for 0th landmark
if draw:
cv2.circle(img,(cx,cy), 15 , (255,0,255), cv2.FILLED)
return lmlist
def main():
#Frame rates
pTime = 0
cTime = 0
cap = cv2.VideoCapture(0)
detector = handDetector()
while True:
success,img = cap.read()
img = detector.findHands(img)
lmList = detector.findPosition(img)
if len(lmList) != 0:
print(lmList[4])
cTime = time.time()
fps = 1/(cTime-pTime)
pTime = cTime
cv2.putText(img,str(int(fps)),(10,70), cv2.FONT_HERSHEY_PLAIN,3,(255,0,255),3)
cv2.imshow("Video",img)
if cv2.waitKey(1) == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
if __name__ == "__main__":
main()
I don't know if it helps by now or probably for future reference.
this is a known issues with the last version of mediapipe.
revert to version 0.8.8 will solve the problem.
It's a problem of indentation since findHands() needs to be part of the class handDetector(). So just tab findHands() and it should work
Solution:
def __init__(self, mode=False, maxHands = 2, detectionCon=0.5, trackCon = 0.5):
The problem is with this line 6 of the code. The library is updated, you have to initialize an integer value to "detectionCon".
for best results initialize detectionCon = 1 and trackCon = 0.5.
If this solves your problem upvote so others can see it easily.