so i'm working on a project that detect the hands move and use it to control the computer mouse , every thing went so good but now i need to convert it to exe file.
when i run my code in the pysharm or in the CMD console its working so good but when i convert i to exe file i cant open it and it keeps showing me some errors that i could not solve or find any solution online , i will be so greatfull for help cuz i really need it ( this code is not mine i took it online and modified it a little )
Sistem information :
python 3.7.8
pyinstaller 4.8
mediapipe 0.8.9.1
autopy 4.0.0
opencv 4.5.5.62
numpy 1.21.5
windows 10
My firts code:
import cv2
import mediapipe as mp
import math
class HandDetector:
def __init__(self,
mode=False,
max_num_hands=1,
complexity=1,
detection=0.5,
track=0.5):
self.mode = mode
self.maxHands = max_num_hands
self.complexity = complexity
self.detectionCon = detection
self.trackCon = track
self.mpHands = mp.solutions.hands
self.hands = self.mpHands.Hands(self.mode,
self.maxHands,
self.complexity,
self.detectionCon,
self.trackCon)
self.mpDraw = mp.solutions.drawing_utils
self.tipIds = [4, 8, 12, 16, 20]
def findHands(self, img, draw=True):
imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
self.results = self.hands.process(imgRGB)
if self.results.multi_hand_landmarks:
for handLms in self.results.multi_hand_landmarks:
if draw:
self.mpDraw.draw_landmarks(img, handLms,
self.mpHands.HAND_CONNECTIONS)
return img
def findPosition(self, img, handNo=0, draw=True):
xlist = []
ylist = []
bbox = []
self.lmList = []
if self.results.multi_hand_landmarks:
myHand = self.results.multi_hand_landmarks[handNo]
for id, lm in enumerate(myHand.landmark):
h, w, c = img.shape
cx, cy = int(lm.x * w), int(lm.y * h)
xlist.append(cx)
ylist.append(cy)
self.lmList.append([id, cx, cy])
if draw:
cv2.circle(img, (cx, cy), 5, (255, 0, 255), cv2.FILLED)
xmin, xmax = min(xlist), max(xlist)
ymin, ymax = min(ylist), max(ylist)
bbox = xmin, ymin, xmax, ymax
if draw:
cv2.rectangle(img, (xmin - 20, ymin - 20), (xmax + 20, ymax + 20),
(0, 255, 0), 2)
return self.lmList, bbox
def fingersUp(self):
fingers = []
# Thumb
if self.lmList[self.tipIds[0]][1] > self.lmList[self.tipIds[0] - 1][1]:
fingers.append(1)
else:
fingers.append(0)
# Fingers
for id in range(1, 5):
if self.lmList[self.tipIds[id]][2] < self.lmList[self.tipIds[id] - 2][2]:
fingers.append(1)
else:
fingers.append(0)
return fingers
def findDistance(self, p1, p2, img, draw=True, r=15, t=3):
x1, y1 = self.lmList[p1][1:]
x2, y2 = self.lmList[p2][1:]
cx, cy = (x1 + x2) // 2, (y1 + y2) // 2
if draw:
cv2.line(img, (x1, y1), (x2, y2), (255, 0, 255), t)
cv2.circle(img, (x1, y1), r, (255, 0, 255), cv2.FILLED)
cv2.circle(img, (x2, y2), r, (255, 0, 255), cv2.FILLED)
cv2.circle(img, (cx, cy), r, (0, 0, 255), cv2.FILLED)
length = math.hypot(x2 - x1, y2 - y1)
return length, img, [x1, y1, x2, y2, cx, cy]
def main():
cam = cv2.VideoCapture(1, cv2.CAP_DSHOW)
detector = HandDetector()
while True:
success, img = cam.read()
img = detector.findHands(img)
lmlist, bbox = detector.findPosition(img)
if len(lmlist) != 0:
print(lmlist[4])
cv2.imshow("Image", img)
cv2.waitKey(1)
if __name__ == "__main__":
main()
And the second code :
import cv2
import first as htm
import numpy as np
import autopy
import mouse
wCam, hCam = 640, 480
frameR = 90
smoothening = 7
plocX, plocY = 0, 0
clocX, clocY = 0, 0
cam = cv2.VideoCapture(1, cv2.CAP_DSHOW)
cam.set(3, wCam)
cam.set(4, hCam)
detector = htm.HandDetector(max_num_hands=1)
wScr, hScr = autopy.screen.size()
while True:
# 1. Find hand Landmarks
success, img = cam.read()
img = detector.findHands(img)
lmList, bbox = detector.findPosition(img)
# 2. Get the tip of the index and thump
if len(lmList) != 0:
x1, y1 = lmList[8][1:]
x2, y2 = lmList[4][1:]
# 3. Check which fingers are up
fingers = detector.fingersUp()
cv2.rectangle(img, (frameR, frameR), (wCam - frameR, 250),
(255, 0, 255), 2)
# 4. Only Index Finger : Moving Mode
if fingers[1] == 1:
# 5. Convert Coordinates
x3 = np.interp(x1, (frameR, wCam - frameR), (0, wScr))
y3 = np.interp(y1, (frameR, 250), (0, hScr))
# 6. Smoothen Values
clocX = plocX + (x3 - plocX) / smoothening
clocY = plocY + (y3 - plocY) / smoothening
# 7. Move Mouse
autopy.mouse.move(wScr - clocX, clocY)
cv2.circle(img, (x1, y1), 15, (255, 0, 255), cv2.FILLED)
plocX, plocY = clocX, clocY
# 8. Find distance between fingers
length, img, lineInfo = detector.findDistance(4, 8, img)
length_2, img, lineInfo_2 = detector.findDistance(8, 20, img)
# 9. Click mouse if distance long / Short
if length > 125:
cv2.circle(img, (lineInfo[4], lineInfo[5]),
15, (0, 255, 0), cv2.FILLED)
mouse.click('left')
if length_2 < 115:
mouse.click('right')
# 10. Display
cv2.imshow("Image", img)
cv2.waitKey(1)
And the error :
Traceback (most recent call last):
File "Second.py", line 16, in <module>
detector = htm.HandDetector(max_num_hands=1)
File "first.py", line 23, in __init__
self.trackCon)
File "mediapipe\python\solutions\hands.py", line 129, in __init__
File "mediapipe\python\solution_base.py", line 238, in __init__
FileNotFoundError: The path does not exist.
I solved it this way on windows10.
sample *.spec:
block_cipher = None
def get_mediapipe_path():
import mediapipe
mediapipe_path = mediapipe.__path__[0]
return mediapipe_path
pyz = PYZ(a.pure, a.zipped_data, cipher=block_cipher)
mediapipe_tree = Tree(get_mediapipe_path(), prefix='mediapipe', excludes=["*.pyc"])
a.datas += mediapipe_tree
a.binaries = filter(lambda x: 'mediapipe' not in x[0], a.binaries)
1st the spec file is created and than according to this the application is buit.
So you 1st run the command :
pyinstaller --onefile main.py
This will buid the application file (not in working condition) and a spec file. Delete everything except the spec file and do changes in spec file according to https://python.tutorialink.com/issues-compiling-mediapipe-with-pyinstaller-on-macos/
.
Now we will build the application file according to this updated spec file using command : pyinstaller main.spec
(main is the name of the spec file). Now a working exe file is built.
Related
I want a code for double clicking with only these packages:
import cv2
import pyautogui
from HandTracker import HandDetector
import math
This is the code for handtracker:
import cv2
import mediapipe as mp
import time
import math
import numpy as np
class HandDetector():
def __init__(self, imageMode=False, maxHands=2, modelComplexity=1 , detectionConfidence=0.5, trackingConfidence=0.5 ):
self.imageMode = imageMode
self.maxHands = maxHands
self.modelComplexity = modelComplexity
self.detectionConfidence = detectionConfidence
self.trackingConfidence = trackingConfidence
self.mpHands = mp.solutions.hands
self.hands = self.mpHands.Hands(self.imageMode, self.maxHands, self.modelComplexity, self.detectionConfidence, self.trackingConfidence)
self.mpDraw = mp.solutions.drawing_utils
self.tipIds = [4, 8, 12, 16, 20]
def findHandLandmarks(self, cap):
# X is the raw distance, Y is the value in cm.
x = [300, 245, 200, 170, 145, 130, 112, 103, 93, 87, 80, 75, 70, 67, 62, 59, 57]
y = [20, 25, 30, 35, 40, 45, 50, 55, 60, 65, 70, 75, 80, 85, 90, 95, 100]
coff = np.polyfit(x, y, 2)
isImageRead, image = cap.read()
image = self.findHands(image)
landmarksList, boundingbox = self.findPosition(image)
try:
_, x1, y1 = landmarksList[5]
_, x2, y2 = landmarksList[17]
distance = int(math.sqrt((y2 - y1)**2 + (x2 - x1)**2))
A, B, C = coff
distanceCM = A * distance**2 + B * distance + C
if distanceCM < 40:
text = 'Move your hand away'
cv2.putText(image, text, (10, 70), cv2.FONT_HERSHEY_PLAIN, 2, (255, 0, 255), 3)
if distanceCM > 90:
text = 'Bring your hand closer'
cv2.putText(image, text, (10, 70), cv2.FONT_HERSHEY_PLAIN, 2, (255, 0, 255), 3)
if 90 >= distanceCM >= 40:
text = f'{int(distanceCM)} cm'
cv2.putText(image, text, (10, 70), cv2.FONT_HERSHEY_PLAIN, 3, (255, 0, 255), 3)
except IndexError:
pass
return image
def findHands(self, image, draw=True):
imgRGB = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
self.results = self.hands.process(imgRGB)
if self.results.multi_hand_landmarks:
for handLandmarks in self.results.multi_hand_landmarks:
if draw:
self.mpDraw.draw_landmarks(image, handLandmarks, self.mpHands.HAND_CONNECTIONS)
return image
def findPosition(self, image, handNo=0, draw=True):
xList = []
yList = []
bbox = []
self.landmarksList = []
if self.results.multi_hand_landmarks:
myHand = self.results.multi_hand_landmarks[handNo]
for id, lm in enumerate(myHand.landmark):
h, w, c = image.shape
cx, cy = int(lm.x * w), int(lm.y * h)
xList.append(cx)
yList.append(cy)
self.landmarksList.append([id, cx, cy])
if draw:
cv2.circle(image, (cx, cy), 5, (255, 0, 255), cv2.FILLED)
xmin, xmax = min(xList), max(xList)
ymin, ymax = min(yList), max(yList)
bbox = xmin, ymin, xmax, ymax
if draw:
cv2.rectangle(image, (xmin - 20, ymin - 20), (xmax + 20, ymax + 20), (0, 255, 0), 2)
return self.landmarksList, bbox
def fingersMotion(self):
# Fingers[] represents which fingers are up where 1 = up and 0 = down
fingers = []
# Thumb
if self.landmarksList[self.tipIds[0]][1] > self.landmarksList[self.tipIds[0] - 1][1]:
fingers.append(1)
else:
fingers.append(0)
# Fingers
for id in range(1, 5):
if self.landmarksList[self.tipIds[id]][2] < self.landmarksList[self.tipIds[id] - 2][2]:
fingers.append(1)
else:
fingers.append(0)
return fingers
def findDistance(self, p1, p2, image, draw=True, r=15, t=3):
x1, y1 = self.landmarksList[p1][1:]
x2, y2 = self.landmarksList[p2][1:]
cx, cy = (x1 + x2) // 2, (y1 + y2) // 2
if draw:
cv2.line(image, (x1, y1), (x2, y2), (255, 0, 255), t)
cv2.circle(image, (x1, y1), r, (255, 0, 255), cv2.FILLED)
cv2.circle(image, (x2, y2), r, (255, 0, 255), cv2.FILLED)
cv2.circle(image, (cx, cy), r, (0, 0, 255), cv2.FILLED)
length = math.hypot(x2 - x1, y2 - y1)
return length, image, [x1, y1, x2, y2, cx, cy]
def main():
previousTime = 0
currentTime = 0
cap = cv2.VideoCapture(0)
detector = HandDetector()
while True:
image = detector.findHandLandmarks(cap)
# Frame rate
currentTime = time.time()
framesPerSecond = 1 / (currentTime - previousTime)
previousTime = currentTime
cv2.putText(image, str(int(framesPerSecond)), (10, 70), cv2.FONT_HERSHEY_PLAIN, 3, (255, 0, 255), 3)
# Display
cv2.imshow("Image", image)
if cv2.waitKey(1) == ord('q'):
break
if __name__ == "__main__":
main()
I have been following some tutorials online on how to make a hand tracking program which can control the volume of my PC via OpenCV. I am on a macbook pro with the M1 chip, using Python 3.8.2, and MacOS Big Sur 11.6.
Whenever I run my program, I get 15 FPS instead of the usual 30 that most people get when following the tutorial. Further investigation showed that only 1 line of code is causing my fps to drop from 30 to 15.
self.results = self.hands.process(imgRGB)
Does anyone know any way to improve my FPS, and why this line causes such a massive drop in FPS? Full code is below
First part here is the hand detection script, where the problematic line of code is (under FindHands function):
import cv2
import mediapipe as mp
import time
class handDetector():
def __init__(self, mode=False, maxHands=2, modelComplexity=1, detectionCon=0.8, trackCon=0.8):
self.mode = mode
self.maxHands = maxHands
self.modelComplex = modelComplexity
self.detectionCon = detectionCon
self.trackCon = trackCon
self.mpHands = mp.solutions.hands
self.hands = self.mpHands.Hands(self.mode, self.maxHands, self.modelComplex, self.detectionCon,
self.trackCon)
self.mpDraw = mp.solutions.drawing_utils
def findHands(self,img, draw = True):
imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
self.results = self.hands.process(imgRGB)
# print(results.multi_hand_landmarks)
if self.results.multi_hand_landmarks:
for handLms in self.results.multi_hand_landmarks:
if draw:
self.mpDraw.draw_landmarks(img, handLms, self.mpHands.HAND_CONNECTIONS)
return img
def findPosition(self, img, handNo = 0, draw = True):
lmlist = []
if self.results.multi_hand_landmarks:
myHand = self.results.multi_hand_landmarks[handNo]
for id, lm in enumerate(myHand.landmark):
h, w, c = img.shape
cx, cy = int(lm.x * w), int(lm.y * h)
lmlist.append([id, cx, cy])
if draw:
cv2.circle(img, (cx, cy), 7, (255, 0, 255), cv2.FILLED)
return lmlist
def main():
pTime = 0
cTime = 0
cap = cv2.VideoCapture(0)
detector = handDetector()
while True:
success, img = cap.read()
img = detector.findHands(img)
lmlist = detector.findPosition(img)
if len(lmlist) != 0:
print(lmlist[4])
cTime = time.time()
fps = 1 / (cTime - pTime)
pTime = cTime
cv2.putText(img, str(int(fps)), (10, 70), cv2.FONT_HERSHEY_PLAIN, 3, (255, 0, 255), 3)
cv2.imshow("Image", img)
cv2.waitKey(1)
if __name__ == "__main__":
main()
This next part is the script that controls the volume control, which on another note further reduces my FPS to 5 when a hand is in frame, which is apparently due to Osascript not being very good:
import cv2
import time
import numpy as np
import DoneHand as dh
import math
import osascript
minVol = 0
maxVol = 100
wCam, hCam = 640, 480
pTime = 0
cap = cv2.VideoCapture(0)
detector = dh.handDetector()
while True:
success, img = cap.read()
img = detector.findHands(img)
lmList = detector.findPosition(img, draw = False)
if len(lmList) != 0:
#print(lmList[4], lmList[8])
x1, y1 = lmList[4][1], lmList[4][2]
x2, y2 = lmList[8][1], lmList[8][2]
cx, cy = (x1 + x2) // 2, (y1 + y2) // 2
cv2.circle(img, (x1, y1), 15, (255, 0, 255), cv2.FILLED)
cv2.circle(img, (x2, y2), 15, (255, 0, 255), cv2.FILLED)
cv2.circle(img, (cx, cy), 15, (255, 0, 255), cv2.FILLED)
cv2.line(img, (x1, y1), (x2, y2), (255, 0, 255), 3)
length = math.hypot(x2 - x1, y2 - y1)
print(length)
#Hand range 55 - 350
#Volume range 0 - 100
vol = np.interp(length, [55, 350], [minVol, maxVol])
print(vol)
target_volume = vol
#osascript.osascript("set volume output volume {}".format(target_volume))
if length < 55:
cv2.circle(img, (cx, cy), 15, (0, 255, 0), cv2.FILLED)
cTime = time.time()
fps = 1/(cTime-pTime)
pTime = cTime
cv2.putText(img,str(int(fps)), (10,70), cv2.FONT_HERSHEY_PLAIN, 3, (255,0,255), 3)
cv2.imshow("Img", img)
cv2.waitKey(1)
Thank you in advance!
**I am trying to create a virtual mouse. I am using packages like opencv-python, mediapipe, time, numpy, And also my own package. But seems to have some error. Can you help me on it. At first I have created the Module. Then the mouse itself. But I don't understand the problem in it. **
Hand tracking module that I have created
import cv2
import mediapipe as mp
import time
class handDetector():
def __init__(self, mode=False, maxHands=2, detectionCon=0.5, trackCon=0.5):
self.mode = mode
self.maxHands = maxHands
self.detectionCon = detectionCon
self.trackCon = trackCon
self.mpHands = mp.solutions.hands
self.hands = self.mpHands.Hands(self.mode, self.maxHands,
self.detectionCon, self.trackCon)
self.mpDraw = mp.solutions.drawing_utils
def findHands(self, img, draw=True):
imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
self.results = self.hands.process(imgRGB)
print(self.results.multi_hand_landmarks)
if self.results.multi_hand_landmarks:
for handLms in self.results.multi_hand_landmarks:
if draw:
self.mpDraw.draw_landmarks(img, handLms, self.mpHands.HAND_CONNECTIONS)
return img
def findPosition(self, img, handNo=0, draw=True):
lmList = []
if self.results.multi_hand_landmarks:
myHand = self.results.multi_hand_landmarks [handNo]
for id, lm in enumerate(myHand.landmark):
print(id, lm)
h, w, c = img.shape
cx, cy = int(lm.x * w), int(lm.y * h)
print(id, cx, cy)
lmList.append( [ id, cx, cy])
if draw:
cv2.circle(img, (cx, cy), 15, (255, 0, 255), cv2.FILLED)
return lmList
def main():
pTime = 0
cTime = 0
cap = cv2.VideoCapture(1)
detector = handDetector()
while True:
success, img = cap.read()
img = detector.findHands(img)
lmList = detector.findPosition(img)
if len(lmList) != 0:
print(lmList[4])
cTime = time.time()
fps = 1 / (cTime - pTime)
pTime = cTime
cv2.putText(img, str(int(fps)), (10, 70), cv2.FONT_HERSHEY_PLAIN, 3,
(255, 0, 255), 3)
cv2.imshow("Image", img)
if cv2.waitKey(1) == ord("q"):
break
if __name__ == "__main__":
main()
Here is the software code
import cv2
import cvzone.HandTrackingModule
import numpy as np
import HandTrackingModule as htm
import time
import autopy
##########################
wCam, hCam = 640, 480
frameR = 100 # Frame Reduction
smoothening = 7
#########################
pTime = 0
plocX, plocY = 0, 0
clocX, clocY = 0, 0
cap = cv2.VideoCapture(1)
cap.set(3, wCam)
cap.set(4, hCam)
detector = htm.handDetector()
wScr, hScr = autopy.screen.size()
# print(wScr, hScr)
while True:
# 1. Find hand Landmarks
success, img = cap.read()
img = detector.findHands(img)
lmList, bbox = detector.findPosition(img)
# 2. Get the tip of the index and middle fingers
if len(lmList) != 0:
x1, y1 = lmList [8][1:]
x2, y2 = lmList [12][1:]
# print(x1, y1, x2, y2)
# 3. Check which fingers are up
fingers = cvzone.HandTrackingModule.HandDetector.fingersUp()
# print(fingers)
cv2.rectangle(img, (frameR, frameR), (wCam - frameR, hCam - frameR),
(255, 0, 255), 2)
# 4. Only Index Finger : Moving Mode
if fingers[1] == 1 and fingers[2] == 0:
# 5. Convert Coordinates
x3 = np.interp(x1, (frameR, wCam - frameR), (0, wScr))
y3 = np.interp(y1, (frameR, hCam - frameR), (0, hScr))
# 6. Smoothen Values
clocX = plocX + (x3 - plocX) / smoothening
clocY = plocY + (y3 - plocY) / smoothening
# 7. Move Mouse
autopy.mouse.move(wScr - clocX, clocY)
cv2.circle(img, (x1, y1), 15, (255, 0, 255), cv2.FILLED)
plocX, plocY = clocX, clocY
# 8. Both Index and middle fingers are up : Clicking Mode
if fingers [1] == 1 and fingers[2] == 1:
# 9. Find distance between fingers
length, img, lineInfo = detector.findDistance(8, 12, img)
print(length)
# 10. Click mouse if distance short
if length [40]:
cv2.circle(img, (lineInfo [4], lineInfo[5]),
15, (0, 255, 0), cv2.FILLED)
autopy.mouse.click()
# 11. Frame Rate
cTime = time.time()
fps = 1 / (cTime - pTime)
pTime = cTime
cv2.putText(img, str(int(fps)), (20, 50), cv2.FONT_HERSHEY_PLAIN, 3,
(255, 0, 0), 3)
# 12. Display
cv2.imshow("Image", img)
cv2.waitKey(1)
Error:
lmList, bbox = detector.findPosition(img)
ValueError: not enough values to unpack (expected 2, got 0)
Pleas help me on this. This is the software code. I am trying to create virtual-mouse using these packages and also I created a module myself. But seems to have some error. I hope you can help. May God bless the helpers
Function detector.findPosition returns a list lmList, but when called in your main script its output gets assigned (and unpacked) to two variables, lmList, bbox. So the function needs to output two values, not just one:
def findPosition(self, img, handNo=0, draw=True):
# Do something to produce lmList and bbox...
return lmList, bbox
If you are only interested in the list output, the you can always do:
lmList, _ = detector.findPosition(img)
I'm new to this and I tried googling but
I can't seem to fix this error inside "fingersUp" function.
Basically, each part of a finger has indexes and this function checks if the thumb finger is up based on these indexes. Each finger has 4 points.
I'm using PyCharm and Python 3.8, if that helps.
if self.lmList[self.tipIds[0]][1] > self.lmList[self.tipIds[0] - 1][1]: IndexError: list index out of range
Here's the full code:
import cv2
import mediapipe as mp
import time
import math
import numpy as np
class handDetector():
def __init__(self, mode=False, maxHands=2, detectionCon=0.5, trackCon=0.5):
self.mode = mode
self.maxHands = maxHands
self.detectionCon = detectionCon
self.trackCon = trackCon
self.mpHands = mp.solutions.hands
self.hands = self.mpHands.Hands(self.mode, self.maxHands,
self.detectionCon, self.trackCon)
self.mpDraw = mp.solutions.drawing_utils
self.tipIds = [4, 8, 12, 16, 20]
def findHands(self, img, draw=True):
imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
self.results = self.hands.process(imgRGB)
# print(results.multi_hand_landmarks)
if self.results.multi_hand_landmarks:
for handLms in self.results.multi_hand_landmarks:
if draw:
self.mpDraw.draw_landmarks(img, handLms,
self.mpHands.HAND_CONNECTIONS)
return img
def findPosition(self, img, handNo=0, draw=True):
xList = []
yList = []
bbox = []
self.lmList = []
if self.results.multi_hand_landmarks:
myHand = self.results.multi_hand_landmarks[handNo]
for id, lm in enumerate(myHand.landmark):
# print(id, lm)
h, w, c = img.shape
cx, cy = int(lm.x * w), int(lm.y * h)
xList.append(cx)
yList.append(cy)
# print(id, cx, cy)
self.lmList.append([id, cx, cy])
if draw:
cv2.circle(img, (cx, cy), 5, (255, 0, 255), cv2.FILLED)
xmin, xmax = min(xList), max(xList)
ymin, ymax = min(yList), max(yList)
bbox = xmin, ymin, xmax, ymax
if draw:
cv2.rectangle(img, (xmin - 20, ymin - 20), (xmax + 20, ymax + 20),
(0, 255, 0), 2)
return self.lmList, bbox
def fingersUp(self):
fingers = []
# Thumb
if self.lmList[self.tipIds[0]][1] > self.lmList[self.tipIds[0] - 1][1]:
fingers.append(1)
else:
fingers.append(0)
# Fingers
for id in range(1, 5):
if self.lmList[self.tipIds[id]][2] < self.lmList[self.tipIds[id] - 2][2]:
fingers.append(1)
else:
fingers.append(0)
# totalFingers = fingers.count(1)
return fingers
def findDistance(self, p1, p2, img, draw=True,r=15, t=3):
x1, y1 = self.lmList[p1][1:]
x2, y2 = self.lmList[p2][1:]
cx, cy = (x1 + x2) // 2, (y1 + y2) // 2
if draw:
cv2.line(img, (x1, y1), (x2, y2), (255, 0, 255), t)
cv2.circle(img, (x1, y1), r, (255, 0, 255), cv2.FILLED)
cv2.circle(img, (x2, y2), r, (255, 0, 255), cv2.FILLED)
cv2.circle(img, (cx, cy), r, (0, 0, 255), cv2.FILLED)
length = math.hypot(x2 - x1, y2 - y1)
return length, img, [x1, y1, x2, y2, cx, cy]
def main():
pTime = 0
cTime = 0
cap = cv2.VideoCapture(0)
detector = handDetector()
while True:
success, img = cap.read()
img = detector.findHands(img)
lmList, bbox = detector.findPosition(img)
if len(lmList) != 0:
cTime = time.time()
fps = 1 / (cTime - pTime)
pTime = cTime
cv2.putText(img, str(int(fps)), (10, 70), cv2.FONT_HERSHEY_PLAIN, 3,
(255, 0, 255), 3)
cv2.imshow("Image", img)
cv2.waitKey(1)
if __name__ == "__main_":
main()
def fingersUp(self):
fingers = []
if len(self.lmList):
if self.lmList[self.tipIds[0]][1] > self.lmList[self.tipIds[0]-1][1]:
fingers.append(1)
else:
fingers.append(0)
for id in range(1,5):
if self.lmList[self.tipIds[id]][2]< self.lmList[self.tipIds[id]-2][2]:
fingers.append(1)
else:
fingers.append(0)
else:
fingers = [0,0,0,0,0]
return fingers
I had this same issue. Once adding Try: Except: around the if else statements in the fingersUp function I had no issue. The error is there because the fuction is being run even when your hands are off the screen.
Try:
if self.lmList[self.tipIds[0]][1] > self.lmList[self.tipIds[0] - 1][1]:
fingers.append(1)
else:
fingers.append(0)
Except:
fingers.append(0)
Try, this line:
the brackets aren't placed right that is why it shows the error.
if (self.lmList[self.tipIds[0][1]]
enter image description here
1.Indentation
2. Missing "_ "
The below code is for a lane detection algo, I have used Canny Edge detection to work the follow code. However, there are a few errors that I am facing if you could check and let me know it would be of great help.
I feel the errors are somewhere around the various arguments and parameters in Python since I might not entirely be thorough with the maths of the problem.
import cv2
import numpy as np
import matplotlib.pyplot as plt
#108 300
def empty(a) :
pass
cv2.namedWindow('image')
cv2.resizeWindow('image',640,240)
#---track-bars
cv2.createTrackbar('MIN-VAL','image', 0, 300, empty)
cv2.createTrackbar('MAX-VAL','image',0, 300, empty)
# while True:
def ROI1(image):
height = image.shape[0]
triangle = np.array([
[(200, height), (1150, height), (700, 400)]
])
mask = np.zeros_like(image)
cv2.fillPoly(mask, triangle, 255)
return mask
def ROI2(image):
height = image.shape[0]
triangle = np.array([
[(0, 650), (1050, height), (750, 445)]
])
mask = np.zeros_like(image)
cv2.fillPoly(mask, triangle, 255)
return mask
def avergaeLines(image, linez):
left_fit = []
right_fit = []
for line in linez:
x1, y1, x2, y2 = line.reshape(4)
parameters = np.polyfit((x1, x2), (y1, y2), 1)
slope = parameters[0]
intercept = parameters[1]
if slope < 0:
left_fit.append((slope, intercept))
else:
right_fit.append((slope, intercept))
left_Fit_Average = np.average(left_fit, axis=0)
right_Fit_Average = np.average(right_fit, axis=0)
left_line = coord(image, left_Fit_Average)
right_line = coord(image, right_Fit_Average)
return np.array([left_line, right_line])
def displayLines(image, lines):
line_image = np.zeros_like(image)
if lines is not None:
for line in lines:
x1, y1, x2, y2 = line.reshape(4)
cv2.line(line_image, (x1, y1), (x2, y2), (0, 255, 0), 5)
return line_image
def coord(image, linePara):
try:
slope, intercept = linePara
except TypeError:
slope, intercept = 0.001, 0
y1 = image.shape[0]
y2 = int(y1*(4/5))
x1 = int((y1 - intercept)/slope)
x2 = int((y2-intercept)/slope)
return np.array([x1, y1, x2, y2])
cap = cv2.VideoCapture("challenge_video.mp4")
#img = cv2.imread("test3.jpg")
while True:
success, img = cap.read()
while True:
lane_image = np.copy(img)
# cv2.imshow("a", lane_image)
gray = cv2.cvtColor(lane_image, cv2.COLOR_RGB2GRAY)
# cv2.imshow("b", gray)
blur_gray = cv2.GaussianBlur(gray, (5, 5), 0)
# cv2.imshow("c", blur_gray)
min_val = cv2.getTrackbarPos('MIN-VAL', 'image')
max_val = cv2.getTrackbarPos('MAX-VAL', 'image')
canny_blur_gray = cv2.Canny(blur_gray, 0, 159)
# cv2.imshow("f", canny_blur_gray)
mask = ROI2(canny_blur_gray)
ADDED = cv2.bitwise_and(canny_blur_gray, mask)
lines = cv2.HoughLinesP(ADDED, 2, np.pi/180, 100, np.array([]), minLineLength=70, maxLineGap=4)
#70 AND 7 WORKS FINE
averagedLines = avergaeLines(lane_image, lines)
linesImage = displayLines(lane_image, averagedLines)
comboImage = cv2.addWeighted(lane_image, 0.8, linesImage, 1, 1)
key = cv2.waitKey(100)
cv2.waitKey(0) & 0xFF
cv2.imshow("RESULT", comboImage)
if cv2.waitKey(0) & 0xFF == ord('c'):
break
#188 but 209 seems perfect
# print(min_wal, max_val)
#ROI_image = ROI(canny_blur_gray)
Below are the various errors that come -
packages\numpy\lib\function_base.py:380: RuntimeWarning: Mean of empty slice.
avg = a.mean(axis)
RuntimeWarning: invalid value encountered in double_scalars
ret = ret.dtype.type(ret / rcount)
Traceback (most recent call last):
linesImage = displayLines(lane_image, averagedLines)
in displayLines
cv2.line(line_image, (x1, y1), (x2, y2), (0, 255, 0), 5)
OverflowError: Python int too large to convert to C long
https://youtu.be/LfnBPeoyGBg
https://youtu.be/SCXg2fDbkCg
You need the array elements to have a larger data type, like double. Try this. I think that would be enough:
line_image = np.zeros_like(image,dtype=np.longdouble)