Pausing GPIO.output without affecting the rest of the code - python

I'm working on a real time face recognition program from an IP camera video stream that triggers a GPIO signal when a face is recognized. After a face is first recognized, I need the GPIO not to be activated for a certain amount of time (e.g. 45 seconds).
I tried to insert time.sleep(45) after the GPIO signal is triggered, which seems to work BUT after 45 seconds of pausing, the video stream being analyzed is not live anymore. It starts with the very frame that came after the one where the face was recognized, in other words with a 45 seconds delay.
How could I get to pausing the GPIO output for 45 seconds and getting back to a live video stream being analyzed after that?
import cv2
import numpy as np
import os
import time
import RPi.GPIO as GPIO
relay = 23
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.setup(relay, GPIO.OUT)
GPIO.output(relay, 1)
recognizer = cv2.face.LBPHFaceRecognizer_create()
recognizer.read('trainer/trainer.yml')
cascadePath = "haarcascade_frontalface_default.xml"
faceCascade = cv2.CascadeClassifier(cascadePath);
font = cv2.FONT_HERSHEY_SIMPLEX
#initiate id counter
id = 0
# names related to ids: example ==> Jenifer: id=1, etc
names = ['None', 'Jenifer', 'Jenifer', 'Luciola']
# Initialize and start realtime video capture
cam = cv2.VideoCapture('ipcamera')
frame_rate = 1
prev = 0
# Define min window size to be recognized as a face
minW = 0.1*cam.get(3)
minH = 0.1*cam.get(4)
while True:
time_elapsed = time.time() - prev
res, image = cam.read()
if time_elapsed > 1./frame_rate:
prev = time.time()
ret, img =cam.read()
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor = 1.2,
minNeighbors = 5,
minSize = (int(minW), int(minH)),
)
for(x,y,w,h) in faces:
cv2.rectangle(img, (x,y), (x+w,y+h), (0,255,0), 2)
id, confidence = recognizer.predict(gray[y:y+h,x:x+w])
# Check if confidence is less than 100 ==> "0" is perfect match
if (confidence < 85):
id = names[id]
confidence = " {0}%".format(round(100 - confidence))
GPIO.output(relay, 0)
print("Ouverture du portail")
time.sleep(1)
GPIO.output(relay, 1)
else:
GPIO.output(relay, 1)
cv2.putText(img, str(id), (x+5,y-5), font, 1, (255,255,255), 2)
cv2.putText(img, str(confidence), (x+5,y+h-5), font, 1, (255,255,0), 1)
cv2.imshow('camera',img)
k = cv2.waitKey(10) & 0xff # Press 'ESC' for exiting video
if k == 27:
break
# Do a bit of cleanup
print("\n [INFO] Exiting Program and cleanup stuff")
cam.release()
cv2.destroyAllWindows()
Possum's solution works well.
Line 66:
GPIO.output(relay, 1)
cam.release()
time.sleep(45)
cam = cv2.VideoCapture('ipcamera')
else:

You could use threading.Thread to create a thread that will run the camera stream in the background, so it will be unaffected by the sleep function. You could do something like this:
from threading import Thread
import cv2
gray = None
minW = None
minH = None
def camera_stream():
def get_frames():
while True:
global minW
global minH
cv2.VideoCapture('ipcamera')
minW = 0.1*cam.get(3)
minH = 0.1*cam.get(4)
res, image = cam.read()
gray_frame = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
yield gray_frame
while True:
global gray
gray = next(get_frames)
thread = Thread(target=camera_stream, daemon=True)
thread.start()
...
Now wherever you used the variable gray, it should use the next frame in the camera stream that is constantly running in the background.

You could also try to simply define a function to check if the delay has passed.
I amended your code, haven't tested it, but i think it should work.
import numpy as np
import os
import time
import RPi.GPIO as GPIO
import cv2
relay = 23
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.setup(relay, GPIO.OUT)
GPIO.output(relay, 1)
recognizer = cv2.face.LBPHFaceRecognizer_create()
recognizer.read('trainer/trainer.yml')
cascadePath = "haarcascade_frontalface_default.xml"
faceCascade = cv2.CascadeClassifier(cascadePath);
delay = 45
last_trigger = a = int(time.time()) - delay
def check_delay():
globals(last_trigger, delay)
current_time = int(time.time())
current_delay = current_time - last_trigger
if(current_delay < delay):
return False
else:
return True
font = cv2.FONT_HERSHEY_SIMPLEX
# initiate id counter
id = 0
# names related to ids: example ==> Jenifer: id=1, etc
names = ['None', 'Jenifer', 'Jenifer', 'Luciola']
# Initialize and start realtime video capture
cam = cv2.VideoCapture('ipcamera')
frame_rate = 1
prev = 0
# Define min window size to be recognized as a face
minW = 0.1 * cam.get(3)
minH = 0.1 * cam.get(4)
while True:
time_elapsed = time.time() - prev
res, image = cam.read()
if time_elapsed > 1. / frame_rate:
prev = time.time()
ret, img = cam.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.2,
minNeighbors=5,
minSize=(int(minW), int(minH)),
)
for (x, y, w, h) in faces:
cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2)
id, confidence = recognizer.predict(gray[y:y + h, x:x + w])
# Check if confidence is less than 100 ==> "0" is perfect match
if (confidence < 85):
id = names[id]
confidence = " {0}%".format(round(100 - confidence))
GPIO.output(relay, 0)
print("Ouverture du portail")
time.sleep(1)
if check_delay():
GPIO.output(relay, 1)
last_trigger = int(time.time())
else:
if check_delay():
GPIO.output(relay, 1)
last_trigger = int(time.time())
cv2.putText(img, str(id), (x + 5, y - 5), font, 1, (255, 255, 255), 2)
cv2.putText(img, str(confidence), (x + 5, y + h - 5), font, 1, (255, 255, 0), 1)
cv2.imshow('camera', img)
k = cv2.waitKey(10) & 0xff # Press 'ESC' for exiting video
if k == 27:
break
# Do a bit of cleanup
print("\n [INFO] Exiting Program and cleanup stuff")
cam.release()
cv2.destroyAllWindows()

Related

DNN OpenCV Python using RSTP always crash after few minutes

Description:
I want to create a people counter using DNN. The model I'm using is MobileNetSSD. The camera I use is IPCam from Hikvision. Python communicates with IPCam using the RSTP protocol.
The program that I made is good and there are no bugs, when running the sample video the program does its job well. But when I replaced it with IPcam there was an unknown error.
Error:
Sometimes the error is:
[h264 # 000001949f7adfc0] error while decoding MB 13 4, bytestream -6
[h264 # 000001949f825ac0] left block unavailable for requested intra4x4 mode -1
[h264 # 000001949f825ac0] error while decoding MB 0 17, bytestream 762
Sometimes the error does not appear and the program is killed.
Update Error
After revising the code, I caught the error. The error found is
[h264 # 0000019289b3fa80] error while decoding MB 4 5, bytestream -25
Now I don't know what to do, because the error is not in Google.
Source Code:
Old Code
This is my very earliest code before getting suggestions from the comments field.
import time
import cv2
import numpy as np
import math
import threading
print("Load MobileNeteSSD model")
prototxt = "MobileNetSSD_deploy.prototxt"
model = "MobileNetSSD_deploy.caffemodel"
CLASSES = ["background", "aeroplane", "bicycle", "bird", "boat",
"bottle", "bus", "car", "cat", "chair", "cow", "diningtable",
"dog", "horse", "motorbike", "person", "pottedplant", "sheep",
"sofa", "train", "tvmonitor"]
net = cv2.dnn.readNetFromCaffe(prototxt, model)
pos_line = 0
offset = 50
car = 0
detected = False
check = 0
prev_frame_time = 0
def detect():
global check, car, detected
check = 0
if(detected == False):
car += 1
detected = True
def center_object(x, y, w, h):
cx = x + int(w / 2)
cy = y + int(h / 2)
return cx, cy
def process_frame_MobileNetSSD(next_frame):
global car, check, detected
rgb = cv2.cvtColor(next_frame, cv2.COLOR_BGR2RGB)
(H, W) = next_frame.shape[:2]
blob = cv2.dnn.blobFromImage(next_frame, size=(300, 300), ddepth=cv2.CV_8U)
net.setInput(blob, scalefactor=1.0/127.5, mean=[127.5, 127.5, 127.5])
detections = net.forward()
for i in np.arange(0, detections.shape[2]):
confidence = detections[0, 0, i, 2]
if confidence > 0.5:
idx = int(detections[0, 0, i, 1])
if CLASSES[idx] != "person":
continue
label = CLASSES[idx]
box = detections[0, 0, i, 3:7] * np.array([W, H, W, H])
(startX, startY, endX, endY) = box.astype("int")
center_ob = center_object(startX, startY, endX-startX, endY-startY)
cv2.circle(next_frame, center_ob, 4, (0, 0, 255), -1)
if center_ob[0] < (pos_line+offset) and center_ob[0] > (pos_line-offset):
# car+=1
detect()
else:
check += 1
if(check >= 5):
detected = False
cv2.putText(next_frame, label+' '+str(round(confidence, 2)),
(startX, startY-10), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
cv2.rectangle(next_frame, (startX, startY),
(endX, endY), (0, 255, 0), 3)
return next_frame
def PersonDetection_UsingMobileNetSSD():
cap = cv2.VideoCapture()
cap.open("rtsp://admin:Admin12345#192.168.100.20:554/Streaming/channels/2/")
global car,pos_line,prev_frame_time
frame_count = 0
while True:
try:
time.sleep(0.1)
new_frame_time = time.time()
fps = int(1/(new_frame_time-prev_frame_time))
prev_frame_time = new_frame_time
ret, next_frame = cap.read()
w_video = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
h_video = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
pos_line = int(h_video/2)-50
if ret == False: break
frame_count += 1
cv2.line(next_frame, (int(h_video/2), 0),
(int(h_video/2), int(h_video)), (255, 127, 0), 3)
next_frame = process_frame_MobileNetSSD(next_frame)
cv2.rectangle(next_frame, (248,22), (342,8), (0,0,0), -1)
cv2.putText(next_frame, "Counter : "+str(car), (250, 20),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1)
cv2.putText(next_frame, "FPS : "+str(fps), (0, int(h_video)-10),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1)
cv2.imshow("Video Original", next_frame)
# print(car)
except Exception as e:
print(str(e))
if cv2.waitKey(1) & 0xFF == ord('q'):
break
print("/MobileNetSSD Person Detector")
cap.release()
cv2.destroyAllWindows()
if __name__ == "__main__":
t1 = threading.Thread(PersonDetection_UsingMobileNetSSD())
t1.start()
New Code
I have revised my code and the program still stops taking frames. I just revised the PersonDetection_UsingMobileNetSSD() function. I've also removed the multithreading I was using. The code has been running for about 30 minutes but after a broken frame, the code will never re-execute the program block if ret == True.
def PersonDetection_UsingMobileNetSSD():
cap = cv2.VideoCapture()
cap.open("rtsp://admin:Admin12345#192.168.100.20:554/Streaming/channels/2/")
global car,pos_line,prev_frame_time
frame_count = 0
while True:
try:
if cap.isOpened():
ret, next_frame = cap.read()
if ret:
new_frame_time = time.time()
fps = int(1/(new_frame_time-prev_frame_time))
prev_frame_time = new_frame_time
w_video = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
h_video = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
pos_line = int(h_video/2)-50
# next_frame = cv2.resize(next_frame,(720,480),fx=0,fy=0, interpolation = cv2.INTER_CUBIC)
if ret == False: break
frame_count += 1
cv2.line(next_frame, (int(h_video/2), 0),
(int(h_video/2), int(h_video)), (255, 127, 0), 3)
next_frame = process_frame_MobileNetSSD(next_frame)
cv2.rectangle(next_frame, (248,22), (342,8), (0,0,0), -1)
cv2.putText(next_frame, "Counter : "+str(car), (250, 20),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1)
cv2.putText(next_frame, "FPS : "+str(fps), (0, int(h_video)-10),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1)
cv2.imshow("Video Original", next_frame)
# print(car)
else:
print("Crashed Frame")
else:
print("Cap is not open")
except Exception as e:
print(str(e))
if cv2.waitKey(1) & 0xFF == ord('q'):
break
print("/MobileNetSSD Person Detector")
cap.release()
cv2.destroyAllWindows()
Requirement:
Hardware : Intel i5-1035G1, RAM 8 GB, NVIDIA GeForce MX330
Software : Python 3.6.2 , OpenCV 4.5.1, Numpy 1.16.0
Question:
What should i do for fixing this error?
What causes this to happen?
Best Regards,
Thanks
The main problem here is that RSTP always has some corrupted frames in it. The solution is to run video capture on thread 1 and video processing on thread 2.
As an example:
import cv2
import threading
import queue
q=queue.Queue()
def this_receive(q):
cap = cv2.VideoCapture("rtsp://admin:Admin12345#192.168.10.20:554/Streaming/channels/2/")
cap.set(cv2.CAP_PROP_FPS, 5)
ret, next_frame = cap.read()
q.put(next_frame)
while ret:
ret, next_frame = cap.read()
w_video = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
h_video = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
q.put(next_frame)
def main_program(q):
while True:
try:
if q.empty() != True:
next_frame=q.get()
except Exception as e:
print(str(e))
if cv2.waitKey(1) & 0xFF == ord('q'):
break
if __name__ == "__main__":
print("Main Program")
p2 = threading.Thread(target=this_receive,args=((q),))
p2.start()
p1 = threading.Thread(target=main_program,args=((q),))
p1.start()
This example will work according to the case you are experiencing. Damage to the frame will not affect the quality of data processing. It's just that this method can cause delays in processing. Time on video and real time have a delay of up to 10 minutes. Want to know what kind of delay? Just try it!

Python CV2 video writer doesn't save video

I have a simple python code using OpenCV and Keras that performs some detections on frames (follow-up from my previous question here). But when I want to record and save the frames as a video using video_writer, the generated video is empty.
What is wrong in the video_writer?
#........some code
# start the webcam feed
cap = cv2.VideoCapture(1)
canvasImageOriginal = cv2.imread("fg2.png")
canvasImage = cv2.imread("fg2.png")
canvasHappy = cv2.imread("fg2happy.png")
canvasSad = cv2.imread("fg2sad.png")
x0, x1 = 330, 1290
y0, y1 = 155, 700
#=========
w=960#int(cap.get(cv2.CV_CAP_PROP_FRAME_WIDTH ))
h=540#int(cap.get(cv2.CV_CAP_PROP_FRAME_HEIGHT ))
# video recorder
fourcc = cv2.VideoWriter_fourcc(*'XVID')
video_writer = cv2.VideoWriter('output.avi', fourcc, 25.0, (w, h))
#=========
prediction_history = []
LOOKBACK = 5 # how far you want to look back
counter = 0
while True:
# Find haar cascade to draw bounding box around face
ret, frame = cap.read()
frame=cv2.flip(frame,3)
if not ret:
break
facecasc = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = facecasc.detectMultiScale(gray,scaleFactor=1.3, minNeighbors=5)
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y-50), (x+w, y+h+10), (255, 0, 0), 2)
roi_gray = gray[y:y + h, x:x + w]
cropped_img = np.expand_dims(np.expand_dims(cv2.resize(roi_gray, (48, 48)), -1), 0)
prediction = model.predict(cropped_img)
maxindex = int(np.argmax(prediction))
text = emotion_dict[maxindex]
print(prediction[0][3])
prediction_history.append(maxindex)
most_common_index = max(set(prediction_history[-LOOKBACK:][::-1]), key = prediction_history.count)
text = emotion_dict[most_common_index]
#if ("Sad" in text) or ("Angry" in text) or ("Disgusted" in text):
# text = "Sad"
if ("Happy" in text) or ("Sad" in text) :
cv2.putText(frame, text, (x+20, y-60), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2, cv2.LINE_AA)
if ("Happy" in text):
counter= counter+1
if counter == 10:
#print("Happy!")
canvasImage = canvasHappy
else:
counter = 0
canvasImage = canvasImageOriginal
dim = (800,480)
frame_shrunk = cv2.resize(frame, (x1 - x0, y1 - y0))
canvasImage[y0:y1, x0:x1] = frame_shrunk
#cv2.imshow('Video', cv2.resize(frame,dim,interpolation = cv2.INTER_CUBIC))
cv2.imshow('Demo', canvasImage)
video_writer.write(frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
video_writer.release()
cv2.destroyAllWindows()
As it is mentioned above, please check print(frame.shape).
When I did it, I saw (300,450,3), and I changed the resolution of videowriter as (450,300) and it worked for me. As a result, I can say that frame.shape=(y, x, maybe color) but the resolution of videowriter=(x,y).

OpenCv facial recognition app detection problem

This is a facial recognition app I had created using python , opencv and haar cascade classifier, the app is working good in classifying trained persons, however the app has a problem of detecting unknown persons as a known person who is trained previously , How to fix such problem ?
this is the dataset creation code
import cv2
import os
import time
cam = cv2.VideoCapture(0)
cam.set(3, 640) # set video width
cam.set(4, 480) # set video height
face_detector = cv2.CascadeClassifier('Cascades/haarcascade_frontalface_default.xml')
# For each person, enter one numeric face id
face_id = input('\n enter user id end press <return> ==> ')
print("\n [INFO] Initializing face capture. Look the camera and wait ...")
# Initialize individual sampling face count
count = 0
while(True):
ret, img = cam.read()
img = cv2.flip(img, 1)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_detector.detectMultiScale(gray, 1.3, 5)
width_d, height_d = 150, 150
for (x,y,w,h) in faces:
cv2.rectangle(img, (x,y), (x+w,y+h), (255,0,0), 2)
count += 1
# Save the captured image into the datasets folder
cv2.imwrite("dataset/User." + str(face_id) + '.' + str(count) + ".jpg", cv2.resize(gray[y:y+h,x:x+w] , (width_d, height_d)))
cv2.imshow('image', img)
k = cv2.waitKey(100) & 0xff # Press 'ESC' for exiting video
if k == 27:
break
elif count >= 400: # Take 30 face sample and stop video
break
# Do a bit of cleanup
print("\n [INFO] Exiting Program and cleanup stuff")
cam.release()
cv2.destroyAllWindows()
This is the training phase
import cv2
import numpy as np
from PIL import Image
import os
# Path for face image database
path = 'dataset'
recognizer = cv2.face.LBPHFaceRecognizer_create()
detector = cv2.CascadeClassifier("Cascades/haarcascade_frontalface_default.xml");
# function to get the images and label data
def getImagesAndLabels(path):
width_d, height_d = 150, 150 # Declare your own width and height
imagePaths = [os.path.join(path,f) for f in os.listdir(path)]
faceSamples=[]
ids = []
for imagePath in imagePaths:
PIL_img = Image.open(imagePath).convert('L') # convert it to grayscale
img_numpy = np.array(PIL_img,'uint8')
id = int(os.path.split(imagePath)[-1].split(".")[1])
faces = detector.detectMultiScale(img_numpy)
for (x,y,w,h) in faces:
faceSamples.append(cv2.resize(img_numpy[y:y+h,x:x+w], (width_d, height_d)))
ids.append(id)
return faceSamples,ids
print ("\n [INFO] Training faces. It will take a few seconds. Wait ...")
faces,ids = getImagesAndLabels(path)
recognizer.train(faces, np.array(ids))
# Save the model into trainer/trainer.yml
recognizer.write('trainer/trainer.yml') # recognizer.save() worked on Mac, but not on Pi
# Print the numer of faces trained and end program
print("\n [INFO] {0} faces trained. Exiting Program".format(len(np.unique(ids))))
This is the recognizing phase
import cv2
import numpy as np
import os
recognizer = cv2.face.LBPHFaceRecognizer_create()
recognizer.read('trainer/trainer.yml')
cascadePath = "Cascades/haarcascade_frontalface_default.xml"
faceCascade = cv2.CascadeClassifier(cascadePath);
font = cv2.FONT_HERSHEY_SIMPLEX
#iniciate id counter
id =0
# names related to ids: example ==> Marcelo: id=1, etc
names = ['Mamdouh Alaa' , 'Dr.Ahmed Seddawy' , 'Dr.Ismail Abdulghaffar']
# Initialize and start realtime video capture
cam = cv2.VideoCapture(0)
cam.set(3, 1366) # set video widht
cam.set(4, 768) # set video height[enter image description here][1]
# Define min window size to be recognized as a face
minW = 0.1*cam.get(3)
minH = 0.1*cam.get(4)
while True:
ret, img =cam.read()
img = cv2.flip(img, 1)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
width_d, height_d = 150, 150
faces = faceCascade.detectMultiScale(
gray,
scaleFactor = 1.2,
minNeighbors = 5,
minSize = (int(minW), int(minH)),
)
for(x,y,w,h) in faces:
cv2.rectangle(img, (x,y), (x+w,y+h), (255,0,0), 2)
id, confidence = recognizer.predict(cv2.resize(gray[y:y+h,x:x+w], (width_d, height_d)))
# Check if confidence is less them 100 ==> "0" is perfect match
if (confidence < 100) :
id = names[id]
confidence = " {0}%".format(round(100 - confidence))
else:
id = "unknown person"
confidence = " {0}%".format(round(100 - confidence))
cv2.putText(img, str(id), (x+5,y-5), font, 1, (255,255,255), 2)
cv2.putText(img, str(confidence), (x+5,y+h-5), font, 1, (255,255,0), 1)
cv2.imshow('IFR',img)
k = cv2.waitKey(10) & 0xff # Press 'ESC' for exiting video
if k == 27:
break
# Do a bit of cleanup
print("\n [INFO] Exiting Program and cleanup stuff")
cam.release()
cv2.destroyAllWindows()

How to limit the keys pressed using pynput

I have made a code to stimulate the game using face detection. I find the centre of the face and upon its movement, I press the keys with pynput library. The code is working perfectly fine but its just a small issue whenever it detects a movement of the point it presses the keyboard key more than once. I want to limit the pressed key to 1.
'''
import cv2
import numpy as np
from pynput.keyboard import Key, Controller
import time
keyboard = Controller()
wc = cv2.VideoCapture(0)
time.sleep(2)
for i in range(40):
ret, img = wc.read()
img = cv2.flip(img,1)
imgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(imgGray, 1.1, 4)
for (x, y, w, h) in faces:
cv2.rectangle(img, (x,y), (x + w, y + h), (255, 0, 0), 2)
centre = [int((x + w + x)/2), int((y + h + y)/2)]
faceCascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
wc = cv2.VideoCapture(0)
# Read until video is completed
while(wc.isOpened()):
# Capture frame-by-frame
ret, img = wc.read()
img = cv2.flip(img,1)
if ret == True:
imgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(imgGray, 1.1, 4)
for (x, y, w, h) in faces:
cv2.rectangle(img, (x,y), (x + w, y + h), (255, 0, 0), 2)
centre_new = [int((x + w + x)/2), int((y + h + y)/2)]
cv2.circle(img, (centre_new[0], centre_new[1]), 0, (0,0,255), 5)
if centre_new[0] - centre[0] > 100 :
keyboard.press(Key.right)
keyboard.release(Key.right)
print('right')
if centre_new[0] - centre[0] < -100 :
keyboard.press(Key.left)
keyboard.release(Key.left)
print('left')
if centre_new[1] - centre[1] < -100 :
keyboard.press(Key.up)
keyboard.release(Key.up)
print('up')
if centre_new[1] - centre[1] > 100 :
keyboard = Controller()
keyboard.press(Key.down)
keyboard.release(Key.down)
print('down')
# Display the resulting frame
cv2.imshow('Face',img)
# Press Q on keyboard to exit
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Break the loop
else:
break
# When everything done, release the video capture object
wc.release()
# Closes all the frames
cv2.destroyAllWindows()
'''
I get this kind of output:
'''
up
up
up
up
up
up
up
right
right
right
right
right
right
right
right
right
up
up
up
up
up
'''
Define some home zone where face should return to distinguish one keystroke from another and use flag to watch it. Is this what you looking for?
keystroke_zone = 100
home_zone = keystroke_zone - 10 # or whatever smaller than
is_home = True
while(wc.isOpened()):
...
if is_home:
if centre_new[0] - centre[0] > keystroke_zone :
keyboard.press(Key.right)
keyboard.release(Key.right)
print('right')
is_home = False
if centre_new[0] - centre[0] < -keystroke_zone :
keyboard.press(Key.left)
keyboard.release(Key.left)
print('left')
is_home = False
if centre_new[1] - centre[1] < -keystroke_zone :
keyboard.press(Key.up)
keyboard.release(Key.up)
print('up')
is_home = False
if centre_new[1] - centre[1] > keystroke_zone :
keyboard = Controller()
keyboard.press(Key.down)
keyboard.release(Key.down)
print('down')
is_home = False
else:
if abs(centre_new[0] - centre[0]) < home_zone or\
abs(centre_new[1] - centre[1]) < home_zone:
is_home = True

Face Tracker with raspberry pi

i have followed this tutorial for face tracking using servo motors
website:https://embeditelectronics.com/blog/project/face-tracker/
github:https://github.com/embeditelectronics/Face-Tracker/blob/master/python-face-tracker/face.py
but the thing is the hardware he used in the tutorial is different from the hardware i have used
right now i'm using adafruit PCA9685 to connect my servos to my raspberry pi
i have tried changing the code according to my adafruit board using the github provided example
from picamera.array import PiRGBArray
from picamera import PiCamera
import time
import cv2
# from pisoc import *
import Adafruit_PCA9685
pwm = Adafruit_PCA9685.PCA9685()
position=90
class Point(object):
def __init__(self, x, y):
self.x = x
self.y = y
def Track(pan, tilt, center, target = Point(160, 120), threshold = Point(16, 24), delta = Point(4, 3)):
global position
position=90
if (center.x > target.x + threshold.x):
position=position-delta.x
pwm.set_pwm(0, 0,position)
# pan.SetAngle(pan.ReadAngle() - delta.x)
elif (center.x < target.x - threshold.x):
position=position+delta.x
pwm.set_pwm(0, 0,position)
# pan.SetAngle(pan.ReadAngle() + delta.x)
if (center.y > target.y + threshold.y):
position=position+delta.x
pwm.set_pwm(1, 0,position)
# tilt.SetAngle(tilt.ReadAngle() + delta.y)
elif (center.y < target.y - threshold.y):
position=position-delta.x
pwm.set_pwm(1, 0,position)
# tilt.SetAngle(tilt.ReadAngle() - delta.y)
if __name__ == "__main__":
# PiSoC(log_level = 'debug')
pan= pwm.set_pwm(0, 0,position)
tilt=pwm.set_pwm(1,0,position)
# pan = Servo(0, max_angle = 320)
# tilt = Servo(1, max_angle = 240)
camera = PiCamera()
camera.resolution = (640, 480)
camera.framerate = 32
rawCapture = PiRGBArray(camera, size = camera.resolution)
face_cascade = cv2.CascadeClassifier('/home/pi/Downloads/lbpcascade_frontalface.xml')
scale = (camera.resolution[0]/320.0, camera.resolution[1]/240.0)
time.sleep(0.1)
# pan.Start()
# tilt.Start()
for frame in camera.capture_continuous(rawCapture, format = 'bgr', use_video_port = True):
image = frame.array
resized = cv2.resize(image, (320, 240))
gray = cv2.cvtColor(resized,cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.1, 5)
if len(faces) > 0:
for (x, y, w, h) in faces:
Track(pan, tilt, Point(x + w/2.0, y+ h/2.0))
break
faces_resized = [(int(scale[0]*x), int(scale[1]*y), int(scale[0]*w), int(scale[1]*h)) for (x, y, w, h) in faces]
for (x,y,w,h) in faces_resized:
cv2.rectangle(image,(x,y),(x+w,y+h),(255,255,0),2)
cv2.imshow("Result", image)
key = cv2.waitKey(1) & 0xFF
rawCapture.truncate(0)
if key == ord('q') or key == 27:
break
# pan.Stop()
# tilt.Stop()
here is the complete code
but the thing i'm stuck with this is, the pi-camera can detect my face but the servo motors are not functioned as expected
and i don't understand the connection between the servo motors and the code part which detects my face i know somewhere there is a missing connection but im not sure where exactly the thing is
and i'm not even sure if this the best way to do face tracking i have tried a lot other ways and ended up with many blunder errors
if you have a better version of this code or any tutorial please do suggest me
*******updated****
from picamera.array import PiRGBArray
from picamera import PiCamera
import time
import cv2
# from pisoc import *
import Adafruit_PCA9685
pwm = Adafruit_PCA9685.PCA9685()
position=90
FRAME_W = 180
FRAME_H = 100
cam_pan = 90
cam_tilt = 60
pwm.set_pwm_freq(50)
pwm.set_pwm(0, 0,120)
pwm.set_pwm(1, 0,120)
class Point(object):
def __init__(self, x, y):
self.x = x
self.y = y
def Track(pan, tilt, center, target = Point(160, 120), threshold = Point(16, 24), delta = Point(4, 3)):
global position
position=90
if (center.x > target.x + threshold.x):
position=position-delta.x
pwm.set_pwm(0, 0,position)
# pan.SetAngle(pan.ReadAngle() - delta.x)
elif (center.x < target.x - threshold.x):
position=position+delta.x
pwm.set_pwm(0, 0,position)
# pan.SetAngle(pan.ReadAngle() + delta.x)
if (center.y > target.y + threshold.y):
position=position+delta.x
pwm.set_pwm(1, 0,position)
# tilt.SetAngle(tilt.ReadAngle() + delta.y)
elif (center.y < target.y - threshold.y):
position=position-delta.x
pwm.set_pwm(1, 0,position)
# tilt.SetAngle(tilt.ReadAngle() - delta.y)
if __name__ == "__main__":
# PiSoC(log_level = 'debug')
pan= pwm.set_pwm(0, 0,position)
tilt=pwm.set_pwm(1,0,position)
# pan = Servo(0, max_angle = 320)
# tilt = Servo(1, max_angle = 240)
camera = PiCamera()
camera.resolution = (640, 480)
camera.framerate = 32
rawCapture = PiRGBArray(camera, size = camera.resolution)
face_cascade = cv2.CascadeClassifier('/home/pi/Downloads/lbpcascade_frontalface.xml')
scale = (camera.resolution[0]/320.0, camera.resolution[1]/240.0)
time.sleep(0.1)
# pan.Start()
# tilt.Start()
for frame in camera.capture_continuous(rawCapture, format = 'bgr', use_video_port = True):
image = frame.array
resized = cv2.resize(image, (320, 240))
gray = cv2.cvtColor(resized,cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.1, 5)
if len(faces) > 0:
for (x, y, w, h) in faces:
Track(pan, tilt, Point(x + w/2.0, y+ h/2.0))
break
faces_resized = [(int(scale[0]*x), int(scale[1]*y), int(scale[0]*w), int(scale[1]*h)) for (x, y, w, h) in faces]
for (x,y,w,h) in faces_resized:
cv2.rectangle(image,(x,y),(x+w,y+h),(255,255,0),2)
cv2.imshow("Result", image)
key = cv2.waitKey(1) & 0xFF
rawCapture.truncate(0)
if key == ord('q') or key == 27:
break
# pan.Stop()
# tilt.Stop()
now the servo motors are moving but just a 0.5 right /0.5 left based on the face direction
not sure if you spotted it yet but you are setting the position to 90 everytime the function is run so its never going to get past one step as its always reset to 90 when called again.
def Track(pan, tilt, center, target = Point(160, 120), threshold = Point(16, 24), delta = Point(4, 3)):
global position
position=90
if (center.x > target.x + threshold.x):
position=position-delta.x
you should move the initialization of position to outside the function.
hope it helps
TIP If you fail to get many\any responses when you post issues its usually because the answer is staring at you and you need to either research or check your code again.

Categories

Resources