i have followed this tutorial for face tracking using servo motors
website:https://embeditelectronics.com/blog/project/face-tracker/
github:https://github.com/embeditelectronics/Face-Tracker/blob/master/python-face-tracker/face.py
but the thing is the hardware he used in the tutorial is different from the hardware i have used
right now i'm using adafruit PCA9685 to connect my servos to my raspberry pi
i have tried changing the code according to my adafruit board using the github provided example
from picamera.array import PiRGBArray
from picamera import PiCamera
import time
import cv2
# from pisoc import *
import Adafruit_PCA9685
pwm = Adafruit_PCA9685.PCA9685()
position=90
class Point(object):
def __init__(self, x, y):
self.x = x
self.y = y
def Track(pan, tilt, center, target = Point(160, 120), threshold = Point(16, 24), delta = Point(4, 3)):
global position
position=90
if (center.x > target.x + threshold.x):
position=position-delta.x
pwm.set_pwm(0, 0,position)
# pan.SetAngle(pan.ReadAngle() - delta.x)
elif (center.x < target.x - threshold.x):
position=position+delta.x
pwm.set_pwm(0, 0,position)
# pan.SetAngle(pan.ReadAngle() + delta.x)
if (center.y > target.y + threshold.y):
position=position+delta.x
pwm.set_pwm(1, 0,position)
# tilt.SetAngle(tilt.ReadAngle() + delta.y)
elif (center.y < target.y - threshold.y):
position=position-delta.x
pwm.set_pwm(1, 0,position)
# tilt.SetAngle(tilt.ReadAngle() - delta.y)
if __name__ == "__main__":
# PiSoC(log_level = 'debug')
pan= pwm.set_pwm(0, 0,position)
tilt=pwm.set_pwm(1,0,position)
# pan = Servo(0, max_angle = 320)
# tilt = Servo(1, max_angle = 240)
camera = PiCamera()
camera.resolution = (640, 480)
camera.framerate = 32
rawCapture = PiRGBArray(camera, size = camera.resolution)
face_cascade = cv2.CascadeClassifier('/home/pi/Downloads/lbpcascade_frontalface.xml')
scale = (camera.resolution[0]/320.0, camera.resolution[1]/240.0)
time.sleep(0.1)
# pan.Start()
# tilt.Start()
for frame in camera.capture_continuous(rawCapture, format = 'bgr', use_video_port = True):
image = frame.array
resized = cv2.resize(image, (320, 240))
gray = cv2.cvtColor(resized,cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.1, 5)
if len(faces) > 0:
for (x, y, w, h) in faces:
Track(pan, tilt, Point(x + w/2.0, y+ h/2.0))
break
faces_resized = [(int(scale[0]*x), int(scale[1]*y), int(scale[0]*w), int(scale[1]*h)) for (x, y, w, h) in faces]
for (x,y,w,h) in faces_resized:
cv2.rectangle(image,(x,y),(x+w,y+h),(255,255,0),2)
cv2.imshow("Result", image)
key = cv2.waitKey(1) & 0xFF
rawCapture.truncate(0)
if key == ord('q') or key == 27:
break
# pan.Stop()
# tilt.Stop()
here is the complete code
but the thing i'm stuck with this is, the pi-camera can detect my face but the servo motors are not functioned as expected
and i don't understand the connection between the servo motors and the code part which detects my face i know somewhere there is a missing connection but im not sure where exactly the thing is
and i'm not even sure if this the best way to do face tracking i have tried a lot other ways and ended up with many blunder errors
if you have a better version of this code or any tutorial please do suggest me
*******updated****
from picamera.array import PiRGBArray
from picamera import PiCamera
import time
import cv2
# from pisoc import *
import Adafruit_PCA9685
pwm = Adafruit_PCA9685.PCA9685()
position=90
FRAME_W = 180
FRAME_H = 100
cam_pan = 90
cam_tilt = 60
pwm.set_pwm_freq(50)
pwm.set_pwm(0, 0,120)
pwm.set_pwm(1, 0,120)
class Point(object):
def __init__(self, x, y):
self.x = x
self.y = y
def Track(pan, tilt, center, target = Point(160, 120), threshold = Point(16, 24), delta = Point(4, 3)):
global position
position=90
if (center.x > target.x + threshold.x):
position=position-delta.x
pwm.set_pwm(0, 0,position)
# pan.SetAngle(pan.ReadAngle() - delta.x)
elif (center.x < target.x - threshold.x):
position=position+delta.x
pwm.set_pwm(0, 0,position)
# pan.SetAngle(pan.ReadAngle() + delta.x)
if (center.y > target.y + threshold.y):
position=position+delta.x
pwm.set_pwm(1, 0,position)
# tilt.SetAngle(tilt.ReadAngle() + delta.y)
elif (center.y < target.y - threshold.y):
position=position-delta.x
pwm.set_pwm(1, 0,position)
# tilt.SetAngle(tilt.ReadAngle() - delta.y)
if __name__ == "__main__":
# PiSoC(log_level = 'debug')
pan= pwm.set_pwm(0, 0,position)
tilt=pwm.set_pwm(1,0,position)
# pan = Servo(0, max_angle = 320)
# tilt = Servo(1, max_angle = 240)
camera = PiCamera()
camera.resolution = (640, 480)
camera.framerate = 32
rawCapture = PiRGBArray(camera, size = camera.resolution)
face_cascade = cv2.CascadeClassifier('/home/pi/Downloads/lbpcascade_frontalface.xml')
scale = (camera.resolution[0]/320.0, camera.resolution[1]/240.0)
time.sleep(0.1)
# pan.Start()
# tilt.Start()
for frame in camera.capture_continuous(rawCapture, format = 'bgr', use_video_port = True):
image = frame.array
resized = cv2.resize(image, (320, 240))
gray = cv2.cvtColor(resized,cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.1, 5)
if len(faces) > 0:
for (x, y, w, h) in faces:
Track(pan, tilt, Point(x + w/2.0, y+ h/2.0))
break
faces_resized = [(int(scale[0]*x), int(scale[1]*y), int(scale[0]*w), int(scale[1]*h)) for (x, y, w, h) in faces]
for (x,y,w,h) in faces_resized:
cv2.rectangle(image,(x,y),(x+w,y+h),(255,255,0),2)
cv2.imshow("Result", image)
key = cv2.waitKey(1) & 0xFF
rawCapture.truncate(0)
if key == ord('q') or key == 27:
break
# pan.Stop()
# tilt.Stop()
now the servo motors are moving but just a 0.5 right /0.5 left based on the face direction
not sure if you spotted it yet but you are setting the position to 90 everytime the function is run so its never going to get past one step as its always reset to 90 when called again.
def Track(pan, tilt, center, target = Point(160, 120), threshold = Point(16, 24), delta = Point(4, 3)):
global position
position=90
if (center.x > target.x + threshold.x):
position=position-delta.x
you should move the initialization of position to outside the function.
hope it helps
TIP If you fail to get many\any responses when you post issues its usually because the answer is staring at you and you need to either research or check your code again.
Related
I want to convert my python file main.py into an executable. I'm using Py2exe to do this.
This is my code.
Setup for the conversion
` from distutils.core import setup
import py2exe
setup(
options = {'py2exe': {'bundle_files': 1}},
windows = \[{'script': "main.py"}\],
zipfile = None,
)\`
Main
`import cv2
import mediapipe as mp
import pyautogui
def show_webcam():
scale = 10
camera = cv2.VideoCapture(0)
face_mesh = mp.solutions.face_mesh.FaceMesh(refine_landmarks=True)
screen_w, screen_h = pyautogui.size()
while True:
_, frame = camera.read()
frame = cv2.flip(frame, 1)
height, width, channels = frame.shape
#cropping
centerX, centerY = int(height/2), int(width/2)
radiusX, radiusY = int(scale*height/100), int(scale*width/100)
minX, maxX = centerX - radiusX, centerX + radiusX
minY, maxY = centerY-radiusY, centerY+radiusY
cropped = frame[minX:maxX, minY:maxY]
resized_cropped = cv2.resize(cropped, (width, height))
rgb_frame = cv2.cvtColor(resized_cropped, cv2.COLOR_BGR2RGB)
output = face_mesh.process(rgb_frame)
landmark_points = output.multi_face_landmarks
# print(landmark_points)
frame_h, frame_w, _ = resized_cropped.shape
if landmark_points:
landmarks = landmark_points[0].landmark
for id, landmark in enumerate(landmarks[473:478]):
x = int(landmark.x * frame_w)
y = int(landmark.y * frame_h)
cv2.circle(resized_cropped, (x, y), 3, (0, 255, 0))
if id == 1:
screen_x = screen_w / frame_w * x
screen_y = screen_h / frame_h * y
try:
pyautogui.moveTo(screen_x, screen_y)
except pyautogui.FailSafeException:
print('Running code before exiting.')
break
pyautogui.moveTo(screen_x, screen_y)
# LEFT CLICK
left = [landmarks[145], landmarks[159]]
for landmark in left:
x = int(landmark.x * frame_w)
y = int(landmark.y * frame_h)
cv2.circle(resized_cropped, (x, y), 3, (0, 255, 255))
if (left[0].y - left[1].y) < 0.04:
# print('click')
pyautogui.click()
pyautogui.sleep(1)
cv2.imshow('Eyes Controlled Mouse', resized_cropped)
key = cv2.waitKey(10)
# if Esc key is press then break out of the loop
if key == 27:
break
camera.release()
cv2.destroyAllWindows()
def main():
show_webcam()
if name == 'main':
main()`
If I run python main.py py2exe I get this error
? zmq.libzmq imported from - Building 'dist\main.exe'. error: [WinError 87] the parameter is incorrect.
Could anyone help me?
I have prepared some code for it to lock when I get up from computer and get away from it
but when I use it with a casual algorithm, it turns off immediately because it does not detect my face in some movements. For this, I want it to wait 3 seconds when it does not detect my face, check it again and if it still does not detect my face, I want it to lock but when I use the time.sleep method, webcam video freezes and works as face does not exist even my face at camera, what kind of working algorithm do you suggest for this?
from multiprocessing.connection import wait
import cv2
import time
import pyautogui
import ctypes
from math import sin, cos, radians
camera = cv2.VideoCapture(0)
face = cv2.CascadeClassifier("haarcascade_frontalface_alt2.xml")
settings = {
'scaleFactor': 1.3,
'minNeighbors': 3,
'minSize': (50, 50),
'flags': cv2.CASCADE_FIND_BIGGEST_OBJECT|cv2.CASCADE_DO_ROUGH_SEARCH
}
def rotate_image(image, angle):
if angle == 0: return image
height, width = image.shape[:2]
rot_mat = cv2.getRotationMatrix2D((width/2, height/2), angle, 0.9)
result = cv2.warpAffine(image, rot_mat, (width, height), flags=cv2.INTER_LINEAR)
return result
def rotate_point(pos, img, angle):
if angle == 0: return pos
x = pos[0] - img.shape[1]*0.4
y = pos[1] - img.shape[0]*0.4
newx = x*cos(radians(angle)) + y*sin(radians(angle)) + img.shape[1]*0.4
newy = -x*sin(radians(angle)) + y*cos(radians(angle)) + img.shape[0]*0.4
return int(newx), int(newy), pos[2], pos[3]
while True:
ret, img = camera.read()
for angle in [0, -25, 25]:
rimg = rotate_image(img, angle)
detected = face.detectMultiScale(rimg, **settings)
if len(detected):
detected = [rotate_point(detected[-1], img, -angle)]
break
for x, y, w, h in detected[-1:]:
cv2.rectangle(img, (x, y), (x+w, y+h), (255,0,0), 2)
cv2.imshow('facedetect', img)
if cv2.waitKey(5) != -1:
break
if 0==(len(detected)):
time.sleep(3)
if 1==(len(detected)):
pass
else:
ctypes.windll.user32.LockWorkStation()
cv2.destroyWindow("facedetect")```
set a variable with the last timestamp where you wouldn't have detected a face. On every loop, if you detect your face again, set this variable to None, if this variable is not None and variable + 3secondes <= current timestamp, lock your station.
import time
unseen_from = None
while True:
# etc etc
detected = bool(detected) # empty list == False, True otherwise
if unseen_from is None:
detected = None if detected else time.time()
elif detected:
unseen_from = None
else if detected_from + 3 < time.time():
ctypes.windll.user32.LockWorkStation()
live coding, I don't have a windows to test this on, but the idea is there
I'm working on a real time face recognition program from an IP camera video stream that triggers a GPIO signal when a face is recognized. After a face is first recognized, I need the GPIO not to be activated for a certain amount of time (e.g. 45 seconds).
I tried to insert time.sleep(45) after the GPIO signal is triggered, which seems to work BUT after 45 seconds of pausing, the video stream being analyzed is not live anymore. It starts with the very frame that came after the one where the face was recognized, in other words with a 45 seconds delay.
How could I get to pausing the GPIO output for 45 seconds and getting back to a live video stream being analyzed after that?
import cv2
import numpy as np
import os
import time
import RPi.GPIO as GPIO
relay = 23
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.setup(relay, GPIO.OUT)
GPIO.output(relay, 1)
recognizer = cv2.face.LBPHFaceRecognizer_create()
recognizer.read('trainer/trainer.yml')
cascadePath = "haarcascade_frontalface_default.xml"
faceCascade = cv2.CascadeClassifier(cascadePath);
font = cv2.FONT_HERSHEY_SIMPLEX
#initiate id counter
id = 0
# names related to ids: example ==> Jenifer: id=1, etc
names = ['None', 'Jenifer', 'Jenifer', 'Luciola']
# Initialize and start realtime video capture
cam = cv2.VideoCapture('ipcamera')
frame_rate = 1
prev = 0
# Define min window size to be recognized as a face
minW = 0.1*cam.get(3)
minH = 0.1*cam.get(4)
while True:
time_elapsed = time.time() - prev
res, image = cam.read()
if time_elapsed > 1./frame_rate:
prev = time.time()
ret, img =cam.read()
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor = 1.2,
minNeighbors = 5,
minSize = (int(minW), int(minH)),
)
for(x,y,w,h) in faces:
cv2.rectangle(img, (x,y), (x+w,y+h), (0,255,0), 2)
id, confidence = recognizer.predict(gray[y:y+h,x:x+w])
# Check if confidence is less than 100 ==> "0" is perfect match
if (confidence < 85):
id = names[id]
confidence = " {0}%".format(round(100 - confidence))
GPIO.output(relay, 0)
print("Ouverture du portail")
time.sleep(1)
GPIO.output(relay, 1)
else:
GPIO.output(relay, 1)
cv2.putText(img, str(id), (x+5,y-5), font, 1, (255,255,255), 2)
cv2.putText(img, str(confidence), (x+5,y+h-5), font, 1, (255,255,0), 1)
cv2.imshow('camera',img)
k = cv2.waitKey(10) & 0xff # Press 'ESC' for exiting video
if k == 27:
break
# Do a bit of cleanup
print("\n [INFO] Exiting Program and cleanup stuff")
cam.release()
cv2.destroyAllWindows()
Possum's solution works well.
Line 66:
GPIO.output(relay, 1)
cam.release()
time.sleep(45)
cam = cv2.VideoCapture('ipcamera')
else:
You could use threading.Thread to create a thread that will run the camera stream in the background, so it will be unaffected by the sleep function. You could do something like this:
from threading import Thread
import cv2
gray = None
minW = None
minH = None
def camera_stream():
def get_frames():
while True:
global minW
global minH
cv2.VideoCapture('ipcamera')
minW = 0.1*cam.get(3)
minH = 0.1*cam.get(4)
res, image = cam.read()
gray_frame = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
yield gray_frame
while True:
global gray
gray = next(get_frames)
thread = Thread(target=camera_stream, daemon=True)
thread.start()
...
Now wherever you used the variable gray, it should use the next frame in the camera stream that is constantly running in the background.
You could also try to simply define a function to check if the delay has passed.
I amended your code, haven't tested it, but i think it should work.
import numpy as np
import os
import time
import RPi.GPIO as GPIO
import cv2
relay = 23
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.setup(relay, GPIO.OUT)
GPIO.output(relay, 1)
recognizer = cv2.face.LBPHFaceRecognizer_create()
recognizer.read('trainer/trainer.yml')
cascadePath = "haarcascade_frontalface_default.xml"
faceCascade = cv2.CascadeClassifier(cascadePath);
delay = 45
last_trigger = a = int(time.time()) - delay
def check_delay():
globals(last_trigger, delay)
current_time = int(time.time())
current_delay = current_time - last_trigger
if(current_delay < delay):
return False
else:
return True
font = cv2.FONT_HERSHEY_SIMPLEX
# initiate id counter
id = 0
# names related to ids: example ==> Jenifer: id=1, etc
names = ['None', 'Jenifer', 'Jenifer', 'Luciola']
# Initialize and start realtime video capture
cam = cv2.VideoCapture('ipcamera')
frame_rate = 1
prev = 0
# Define min window size to be recognized as a face
minW = 0.1 * cam.get(3)
minH = 0.1 * cam.get(4)
while True:
time_elapsed = time.time() - prev
res, image = cam.read()
if time_elapsed > 1. / frame_rate:
prev = time.time()
ret, img = cam.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.2,
minNeighbors=5,
minSize=(int(minW), int(minH)),
)
for (x, y, w, h) in faces:
cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2)
id, confidence = recognizer.predict(gray[y:y + h, x:x + w])
# Check if confidence is less than 100 ==> "0" is perfect match
if (confidence < 85):
id = names[id]
confidence = " {0}%".format(round(100 - confidence))
GPIO.output(relay, 0)
print("Ouverture du portail")
time.sleep(1)
if check_delay():
GPIO.output(relay, 1)
last_trigger = int(time.time())
else:
if check_delay():
GPIO.output(relay, 1)
last_trigger = int(time.time())
cv2.putText(img, str(id), (x + 5, y - 5), font, 1, (255, 255, 255), 2)
cv2.putText(img, str(confidence), (x + 5, y + h - 5), font, 1, (255, 255, 0), 1)
cv2.imshow('camera', img)
k = cv2.waitKey(10) & 0xff # Press 'ESC' for exiting video
if k == 27:
break
# Do a bit of cleanup
print("\n [INFO] Exiting Program and cleanup stuff")
cam.release()
cv2.destroyAllWindows()
(Sorry about formatting)
I am trying to write an opencv program to track the color red.
So far it works okay, but the servo is jittery and when the object is still and centre the servo moves back and forth. I have a 470uf capacitor and an external power supply. Any help to make it smoother would be a godsend!
'''
from picamera.array import PiRGBArray
from picamera import PiCamera
import time
import cv2
import numpy as np
import RPi.GPIO as GPIO
import time
###SERVO SETUP###
servoPIN_x = 17
servoPIN_y = 18
GPIO.setmode(GPIO.BCM)
GPIO.setup(servoPIN_x, GPIO.OUT)
GPIO.setup(servoPIN_y, GPIO.OUT)
px = GPIO.PWM(servoPIN_x, 50)
py = GPIO.PWM(servoPIN_y, 50)
position_x = 7.5
position_y = 7.5
px.start(position_x)
py.start(position_y)
x_gain = 0.1#0.01 - 5.00
y_gain = 0.3#0.01 - 5.00
xon = True
yon = True
###CAMERA SETUP##
camera = PiCamera()
camera.resolution = (640, 480)
camera.framerate = 32
raw_capture = PiRGBArray(camera, size=(640, 480))
time.sleep(0.1)
###VIDEO CAPTURE LOOP###
for frame in camera.capture_continuous(raw_capture, format="bgr", use_video_port=True):
#cv2 video init
image = frame.array
hsv_frame = cv2.cvtColor(image,cv2.COLOR_BGR2HSV)
#color parameters
low_red = np.array([161,155,84])
high_red = np.array([179,255,255])
#find color
red_mask = cv2.inRange(hsv_frame, low_red, high_red)#create black/white mask for all reds
contours,_ = cv2.findContours(red_mask,cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)#find different red contours
contours = sorted(contours, key=lambda x:cv2.contourArea(x), reverse=True)#sort contours from largest to smallest
#set line x/y var to center
x_medium=320
y_medium=240
#loop sets x/y_medium
for cnt in contours:
(x,y,w,h) = cv2.boundingRect(cnt)
y_medium = int((y+y+h)/2)
x_medium = int((x+x+w)/2)
cv2.rectangle(image,(x,y),(x+w,y+h),(0,255,0),2)
break
#draw lines
cv2.line(image,(x_medium,0),(x_medium,480), (0,255,0),2)
cv2.line(image,(0,y_medium),(640,y_medium), (0,255,0),2)
#cv2.imshow('mask', red_mask)
#show regular frame
cv2.imshow("Frame", image)
key = cv2.waitKey(1) & 0xFF
raw_capture.truncate(0)
#quit
if key == ord("q"):
cv2.waitKey(1)
px.stop()
py.stop
break
#servoX travels to place postion_x in center frame
if not xon and x_medium != 320:
px.start(position_x)
if x_medium > 320:
position_x = position_x - x_gain
elif x_medium < 320:
position_x = position_x + x_gain
elif x_medium == 320:
position_x = position_x
xon = False
else:
position_x = 7.5
if xon:
px.ChangeDutyCycle(position_x)
else:
px.stop
px.stop()
py.stop()
cv2.destroyAllWindows()
exit(0)
'''
I had the same issue with this kind of servo motors while building a robot. I solved it using another library called "pigpio", to control the pins of the rasperry pi . Since then the servos no longer move for no reason. The commands are very similar. Have a look at:
http://abyz.me.uk/rpi/pigpio/
I have made a code to stimulate the game using face detection. I find the centre of the face and upon its movement, I press the keys with pynput library. The code is working perfectly fine but its just a small issue whenever it detects a movement of the point it presses the keyboard key more than once. I want to limit the pressed key to 1.
'''
import cv2
import numpy as np
from pynput.keyboard import Key, Controller
import time
keyboard = Controller()
wc = cv2.VideoCapture(0)
time.sleep(2)
for i in range(40):
ret, img = wc.read()
img = cv2.flip(img,1)
imgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(imgGray, 1.1, 4)
for (x, y, w, h) in faces:
cv2.rectangle(img, (x,y), (x + w, y + h), (255, 0, 0), 2)
centre = [int((x + w + x)/2), int((y + h + y)/2)]
faceCascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
wc = cv2.VideoCapture(0)
# Read until video is completed
while(wc.isOpened()):
# Capture frame-by-frame
ret, img = wc.read()
img = cv2.flip(img,1)
if ret == True:
imgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(imgGray, 1.1, 4)
for (x, y, w, h) in faces:
cv2.rectangle(img, (x,y), (x + w, y + h), (255, 0, 0), 2)
centre_new = [int((x + w + x)/2), int((y + h + y)/2)]
cv2.circle(img, (centre_new[0], centre_new[1]), 0, (0,0,255), 5)
if centre_new[0] - centre[0] > 100 :
keyboard.press(Key.right)
keyboard.release(Key.right)
print('right')
if centre_new[0] - centre[0] < -100 :
keyboard.press(Key.left)
keyboard.release(Key.left)
print('left')
if centre_new[1] - centre[1] < -100 :
keyboard.press(Key.up)
keyboard.release(Key.up)
print('up')
if centre_new[1] - centre[1] > 100 :
keyboard = Controller()
keyboard.press(Key.down)
keyboard.release(Key.down)
print('down')
# Display the resulting frame
cv2.imshow('Face',img)
# Press Q on keyboard to exit
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Break the loop
else:
break
# When everything done, release the video capture object
wc.release()
# Closes all the frames
cv2.destroyAllWindows()
'''
I get this kind of output:
'''
up
up
up
up
up
up
up
right
right
right
right
right
right
right
right
right
up
up
up
up
up
'''
Define some home zone where face should return to distinguish one keystroke from another and use flag to watch it. Is this what you looking for?
keystroke_zone = 100
home_zone = keystroke_zone - 10 # or whatever smaller than
is_home = True
while(wc.isOpened()):
...
if is_home:
if centre_new[0] - centre[0] > keystroke_zone :
keyboard.press(Key.right)
keyboard.release(Key.right)
print('right')
is_home = False
if centre_new[0] - centre[0] < -keystroke_zone :
keyboard.press(Key.left)
keyboard.release(Key.left)
print('left')
is_home = False
if centre_new[1] - centre[1] < -keystroke_zone :
keyboard.press(Key.up)
keyboard.release(Key.up)
print('up')
is_home = False
if centre_new[1] - centre[1] > keystroke_zone :
keyboard = Controller()
keyboard.press(Key.down)
keyboard.release(Key.down)
print('down')
is_home = False
else:
if abs(centre_new[0] - centre[0]) < home_zone or\
abs(centre_new[1] - centre[1]) < home_zone:
is_home = True