DNN OpenCV Python using RSTP always crash after few minutes - python

Description:
I want to create a people counter using DNN. The model I'm using is MobileNetSSD. The camera I use is IPCam from Hikvision. Python communicates with IPCam using the RSTP protocol.
The program that I made is good and there are no bugs, when running the sample video the program does its job well. But when I replaced it with IPcam there was an unknown error.
Error:
Sometimes the error is:
[h264 # 000001949f7adfc0] error while decoding MB 13 4, bytestream -6
[h264 # 000001949f825ac0] left block unavailable for requested intra4x4 mode -1
[h264 # 000001949f825ac0] error while decoding MB 0 17, bytestream 762
Sometimes the error does not appear and the program is killed.
Update Error
After revising the code, I caught the error. The error found is
[h264 # 0000019289b3fa80] error while decoding MB 4 5, bytestream -25
Now I don't know what to do, because the error is not in Google.
Source Code:
Old Code
This is my very earliest code before getting suggestions from the comments field.
import time
import cv2
import numpy as np
import math
import threading
print("Load MobileNeteSSD model")
prototxt = "MobileNetSSD_deploy.prototxt"
model = "MobileNetSSD_deploy.caffemodel"
CLASSES = ["background", "aeroplane", "bicycle", "bird", "boat",
"bottle", "bus", "car", "cat", "chair", "cow", "diningtable",
"dog", "horse", "motorbike", "person", "pottedplant", "sheep",
"sofa", "train", "tvmonitor"]
net = cv2.dnn.readNetFromCaffe(prototxt, model)
pos_line = 0
offset = 50
car = 0
detected = False
check = 0
prev_frame_time = 0
def detect():
global check, car, detected
check = 0
if(detected == False):
car += 1
detected = True
def center_object(x, y, w, h):
cx = x + int(w / 2)
cy = y + int(h / 2)
return cx, cy
def process_frame_MobileNetSSD(next_frame):
global car, check, detected
rgb = cv2.cvtColor(next_frame, cv2.COLOR_BGR2RGB)
(H, W) = next_frame.shape[:2]
blob = cv2.dnn.blobFromImage(next_frame, size=(300, 300), ddepth=cv2.CV_8U)
net.setInput(blob, scalefactor=1.0/127.5, mean=[127.5, 127.5, 127.5])
detections = net.forward()
for i in np.arange(0, detections.shape[2]):
confidence = detections[0, 0, i, 2]
if confidence > 0.5:
idx = int(detections[0, 0, i, 1])
if CLASSES[idx] != "person":
continue
label = CLASSES[idx]
box = detections[0, 0, i, 3:7] * np.array([W, H, W, H])
(startX, startY, endX, endY) = box.astype("int")
center_ob = center_object(startX, startY, endX-startX, endY-startY)
cv2.circle(next_frame, center_ob, 4, (0, 0, 255), -1)
if center_ob[0] < (pos_line+offset) and center_ob[0] > (pos_line-offset):
# car+=1
detect()
else:
check += 1
if(check >= 5):
detected = False
cv2.putText(next_frame, label+' '+str(round(confidence, 2)),
(startX, startY-10), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
cv2.rectangle(next_frame, (startX, startY),
(endX, endY), (0, 255, 0), 3)
return next_frame
def PersonDetection_UsingMobileNetSSD():
cap = cv2.VideoCapture()
cap.open("rtsp://admin:Admin12345#192.168.100.20:554/Streaming/channels/2/")
global car,pos_line,prev_frame_time
frame_count = 0
while True:
try:
time.sleep(0.1)
new_frame_time = time.time()
fps = int(1/(new_frame_time-prev_frame_time))
prev_frame_time = new_frame_time
ret, next_frame = cap.read()
w_video = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
h_video = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
pos_line = int(h_video/2)-50
if ret == False: break
frame_count += 1
cv2.line(next_frame, (int(h_video/2), 0),
(int(h_video/2), int(h_video)), (255, 127, 0), 3)
next_frame = process_frame_MobileNetSSD(next_frame)
cv2.rectangle(next_frame, (248,22), (342,8), (0,0,0), -1)
cv2.putText(next_frame, "Counter : "+str(car), (250, 20),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1)
cv2.putText(next_frame, "FPS : "+str(fps), (0, int(h_video)-10),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1)
cv2.imshow("Video Original", next_frame)
# print(car)
except Exception as e:
print(str(e))
if cv2.waitKey(1) & 0xFF == ord('q'):
break
print("/MobileNetSSD Person Detector")
cap.release()
cv2.destroyAllWindows()
if __name__ == "__main__":
t1 = threading.Thread(PersonDetection_UsingMobileNetSSD())
t1.start()
New Code
I have revised my code and the program still stops taking frames. I just revised the PersonDetection_UsingMobileNetSSD() function. I've also removed the multithreading I was using. The code has been running for about 30 minutes but after a broken frame, the code will never re-execute the program block if ret == True.
def PersonDetection_UsingMobileNetSSD():
cap = cv2.VideoCapture()
cap.open("rtsp://admin:Admin12345#192.168.100.20:554/Streaming/channels/2/")
global car,pos_line,prev_frame_time
frame_count = 0
while True:
try:
if cap.isOpened():
ret, next_frame = cap.read()
if ret:
new_frame_time = time.time()
fps = int(1/(new_frame_time-prev_frame_time))
prev_frame_time = new_frame_time
w_video = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
h_video = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
pos_line = int(h_video/2)-50
# next_frame = cv2.resize(next_frame,(720,480),fx=0,fy=0, interpolation = cv2.INTER_CUBIC)
if ret == False: break
frame_count += 1
cv2.line(next_frame, (int(h_video/2), 0),
(int(h_video/2), int(h_video)), (255, 127, 0), 3)
next_frame = process_frame_MobileNetSSD(next_frame)
cv2.rectangle(next_frame, (248,22), (342,8), (0,0,0), -1)
cv2.putText(next_frame, "Counter : "+str(car), (250, 20),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1)
cv2.putText(next_frame, "FPS : "+str(fps), (0, int(h_video)-10),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1)
cv2.imshow("Video Original", next_frame)
# print(car)
else:
print("Crashed Frame")
else:
print("Cap is not open")
except Exception as e:
print(str(e))
if cv2.waitKey(1) & 0xFF == ord('q'):
break
print("/MobileNetSSD Person Detector")
cap.release()
cv2.destroyAllWindows()
Requirement:
Hardware : Intel i5-1035G1, RAM 8 GB, NVIDIA GeForce MX330
Software : Python 3.6.2 , OpenCV 4.5.1, Numpy 1.16.0
Question:
What should i do for fixing this error?
What causes this to happen?
Best Regards,
Thanks

The main problem here is that RSTP always has some corrupted frames in it. The solution is to run video capture on thread 1 and video processing on thread 2.
As an example:
import cv2
import threading
import queue
q=queue.Queue()
def this_receive(q):
cap = cv2.VideoCapture("rtsp://admin:Admin12345#192.168.10.20:554/Streaming/channels/2/")
cap.set(cv2.CAP_PROP_FPS, 5)
ret, next_frame = cap.read()
q.put(next_frame)
while ret:
ret, next_frame = cap.read()
w_video = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
h_video = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
q.put(next_frame)
def main_program(q):
while True:
try:
if q.empty() != True:
next_frame=q.get()
except Exception as e:
print(str(e))
if cv2.waitKey(1) & 0xFF == ord('q'):
break
if __name__ == "__main__":
print("Main Program")
p2 = threading.Thread(target=this_receive,args=((q),))
p2.start()
p1 = threading.Thread(target=main_program,args=((q),))
p1.start()
This example will work according to the case you are experiencing. Damage to the frame will not affect the quality of data processing. It's just that this method can cause delays in processing. Time on video and real time have a delay of up to 10 minutes. Want to know what kind of delay? Just try it!

Related

cv2.findcontours returning none in Motion Detector App

I followed a video online about motion detection using openCV however I came across the problem that the findContours function is not returning a value. Any help is appreceated.
Here is the code:
import cv2
import time
import datetime
import imutils
def motion_detection():
video_capture = cv2.VideoCapture(0, cv2.CAP_DSHOW)
time.sleep(2)
first_frame = None
while True:
frame = video_capture.read()[1]
text = 'Unoccupied'
greyscale_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gaussian_frame = cv2.GaussianBlur(greyscale_frame, (21, 21), 0)
blur_frame = cv2.blur(gaussian_frame, (5, 5))
greyscale_image = blur_frame
if first_frame is None:
first_frame = greyscale_image
else:
pass
frame = imutils.resize(frame, width=500)
frame_delta = cv2.absdiff(first_frame, greyscale_image)
# edit the ** thresh ** depending on the light/dark in room,
# change the 100(anything pixel value over 100 will become 255(white)
thresh = cv2.threshold(frame_delta, 100, 255, cv2.THRESH_BINARY)[1]
dilate_image = cv2.dilate(thresh, None, iterations=2)
cnt = cv2.findContours(dilate_image.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[1]
for c in cnt:
if cv2.contourArea(c) > 800:
(x, y, w, h) = cv2.boundingRect(
c)
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
text = 'Occupied'
# text that appears when there is motion in video feed
else:
pass
''' now draw text and timestamp on security feed '''
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(frame, '{+} Room Status: %s' % text, (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
cv2.putText(frame, datetime.datetime.now().strftime('%A %d %B %Y %I:%M:%S%p'),
(10, frame.shape[0] - 10), font, 0.35, (0, 0, 255), 1)
cv2.imshow('Security Feed', frame)
cv2.imshow('Threshold(foreground mask)', dilate_image)
cv2.imshow('Frame_delta', frame_delta)
key = cv2.waitKey(1) & 0xFF
if key == ord('q'):
cv2.destroyAllWindows()
break
if __name__ == '__main__':
motion_detection()
I have tried to debug and find the problem the code is exactly what the video said to write and I have had no luck.

how to stop printing repeated values in this code

import numpy as np
import cv2
from pyzbar import pyzbar
def read_barcodes(frame):
processed =[]
barcodes = pyzbar.decode(frame)
for barcode in barcodes:
x, y , w, h = barcode.rect
barcode_info = barcode.data.decode('utf-8')
cv2.rectangle(frame, (x, y),(x+w, y+h), (0, 255, 0), 2)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(frame, barcode_info, (x + 6, y - 6), font, 2.0, (255, 255, 255), 1)
#my_list = [barcode_info]
#myFinalList = np.unique(my_list).tolist()
#print(myFinalList)
#print(barcode_info)
with open("barcode_result.txt", mode ='w') as file:
file.write("Recognized Barcode:" + barcode_info)
return frame
def main():
camera = cv2.VideoCapture(0)
ret, frame = camera.read()
while ret:
ret, frame = camera.read()
frame = read_barcodes(frame)
cv2.imshow('Barcode/QR code reader', frame)
if cv2.waitKey(1) & 0xFF == 27:
break
camera.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
how to break the loop from printing the repeating values

Print only once in a live detection model

import cv2
from tensorflow.keras.preprocessing.image import img_to_array
import os
import numpy as np
from tensorflow.keras.models import model_from_json
root_dir = os.getcwd()
# Load Face Detection Model
face_cascade = cv2.CascadeClassifier("models/haarcascade_frontalface_default.xml")
# Load Anti-Spoofing Model graph
json_file = open('antispoofing_models/finalyearproject_antispoofing_model_mobilenet.json','r')
loaded_model_json = json_file.read()
json_file.close()
model = model_from_json(loaded_model_json)
# load antispoofing model weights
model.load_weights('antispoofing_models/finalyearproject_antispoofing_model_99-0.976842.h5')
print("Model loaded from disk")
# video.open("http://192.168.1.101:8080/video")
# vs = VideoStream(src=0).start()
# time.sleep(2.0)
video = cv2.VideoCapture(0)
while True:
try:
ret,frame = video.read()
gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray,1.3,5)
for (x,y,w,h) in faces:
face = frame[y-5:y+h+5,x-5:x+w+5]
resized_face = cv2.resize(face,(160,160))
resized_face = resized_face.astype("float") / 255.0
# resized_face = img_to_array(resized_face)
resized_face = np.expand_dims(resized_face, axis=0)
# pass the face ROI through the trained liveness detector
# model to determine if the face is "real" or "fake"
preds = model.predict(resized_face)[0]
# print(preds)
if preds> 0.45:
label = 'spoof'
cv2.putText(frame, label, (x,y - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,255), 2)
cv2.rectangle(frame, (x, y), (x+w,y+h),
(0, 0, 255), 2)
print("SPOOFED FACE DETECTED")
break
else:
label = 'real'
cv2.putText(frame, label, (x,y - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,255,0), 2)
cv2.rectangle(frame, (x, y), (x+w,y+h),
(0, 255, 0), 2)
cv2.imshow('frame', frame)
key = cv2.waitKey(1)
if key == ord('q'):
break
except Exception as e:
pass
video.release()
cv2.destroyAllWindows()
The above code detects the real and spoofed faces but I want to print("SPOOFED FACE DETECTED") only once in 1 min when the preds> 0.45 but it prints it continuously.
how to add a time delay that after every 1 min on detecting spoofed face, it should print("SPOOFED FACE DETECTED") but the program will keep on executing.
It will print if preds> 0.45 while the while loop is running. If you want to print once every one minute without interrupting the while loop then you'll need to use a timer and adjust your condition.
I'm not familiar with the openCV library, so I'm not sure if there's a better way to track the elapsed time of the capture. But, using the included python time library, you could do something like
while True:
start = time.time() # CHANGE start time counter
delta = 60
try:
ret,frame = video.read()
gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray,1.3,5)
for (x,y,w,h) in faces:
face = frame[y-5:y+h+5,x-5:x+w+5]
resized_face = cv2.resize(face,(160,160))
resized_face = resized_face.astype("float") / 255.0
# resized_face = img_to_array(resized_face)
resized_face = np.expand_dims(resized_face, axis=0)
# pass the face ROI through the trained liveness detector
# model to determine if the face is "real" or "fake"
preds = model.predict(resized_face)[0]
# print(preds)
if preds> 0.45: # CHANGE; compute delta and compare with threshold
label = 'spoof'
cv2.putText(frame, label, (x,y - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,255), 2)
cv2.rectangle(frame, (x, y), (x+w,y+h),
(0, 0, 255), 2)
if (time.time() - start) >= delta:
print("SPOOFED FACE DETECTED")
start = time.time()
break
else:
label = 'real'
cv2.putText(frame, label, (x,y - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,255,0), 2)
cv2.rectangle(frame, (x, y), (x+w,y+h),
(0, 255, 0), 2)
cv2.imshow('frame', frame)
key = cv2.waitKey(1)
if key == ord('q'):
break
except Exception as e:
pass
video.release()
cv2.destroyAllWindows()
I've omitted the bits outside the loop , and marked the changes for readibility

Python CV2 video writer doesn't save video

I have a simple python code using OpenCV and Keras that performs some detections on frames (follow-up from my previous question here). But when I want to record and save the frames as a video using video_writer, the generated video is empty.
What is wrong in the video_writer?
#........some code
# start the webcam feed
cap = cv2.VideoCapture(1)
canvasImageOriginal = cv2.imread("fg2.png")
canvasImage = cv2.imread("fg2.png")
canvasHappy = cv2.imread("fg2happy.png")
canvasSad = cv2.imread("fg2sad.png")
x0, x1 = 330, 1290
y0, y1 = 155, 700
#=========
w=960#int(cap.get(cv2.CV_CAP_PROP_FRAME_WIDTH ))
h=540#int(cap.get(cv2.CV_CAP_PROP_FRAME_HEIGHT ))
# video recorder
fourcc = cv2.VideoWriter_fourcc(*'XVID')
video_writer = cv2.VideoWriter('output.avi', fourcc, 25.0, (w, h))
#=========
prediction_history = []
LOOKBACK = 5 # how far you want to look back
counter = 0
while True:
# Find haar cascade to draw bounding box around face
ret, frame = cap.read()
frame=cv2.flip(frame,3)
if not ret:
break
facecasc = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = facecasc.detectMultiScale(gray,scaleFactor=1.3, minNeighbors=5)
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y-50), (x+w, y+h+10), (255, 0, 0), 2)
roi_gray = gray[y:y + h, x:x + w]
cropped_img = np.expand_dims(np.expand_dims(cv2.resize(roi_gray, (48, 48)), -1), 0)
prediction = model.predict(cropped_img)
maxindex = int(np.argmax(prediction))
text = emotion_dict[maxindex]
print(prediction[0][3])
prediction_history.append(maxindex)
most_common_index = max(set(prediction_history[-LOOKBACK:][::-1]), key = prediction_history.count)
text = emotion_dict[most_common_index]
#if ("Sad" in text) or ("Angry" in text) or ("Disgusted" in text):
# text = "Sad"
if ("Happy" in text) or ("Sad" in text) :
cv2.putText(frame, text, (x+20, y-60), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2, cv2.LINE_AA)
if ("Happy" in text):
counter= counter+1
if counter == 10:
#print("Happy!")
canvasImage = canvasHappy
else:
counter = 0
canvasImage = canvasImageOriginal
dim = (800,480)
frame_shrunk = cv2.resize(frame, (x1 - x0, y1 - y0))
canvasImage[y0:y1, x0:x1] = frame_shrunk
#cv2.imshow('Video', cv2.resize(frame,dim,interpolation = cv2.INTER_CUBIC))
cv2.imshow('Demo', canvasImage)
video_writer.write(frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
video_writer.release()
cv2.destroyAllWindows()
As it is mentioned above, please check print(frame.shape).
When I did it, I saw (300,450,3), and I changed the resolution of videowriter as (450,300) and it worked for me. As a result, I can say that frame.shape=(y, x, maybe color) but the resolution of videowriter=(x,y).

Pausing GPIO.output without affecting the rest of the code

I'm working on a real time face recognition program from an IP camera video stream that triggers a GPIO signal when a face is recognized. After a face is first recognized, I need the GPIO not to be activated for a certain amount of time (e.g. 45 seconds).
I tried to insert time.sleep(45) after the GPIO signal is triggered, which seems to work BUT after 45 seconds of pausing, the video stream being analyzed is not live anymore. It starts with the very frame that came after the one where the face was recognized, in other words with a 45 seconds delay.
How could I get to pausing the GPIO output for 45 seconds and getting back to a live video stream being analyzed after that?
import cv2
import numpy as np
import os
import time
import RPi.GPIO as GPIO
relay = 23
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.setup(relay, GPIO.OUT)
GPIO.output(relay, 1)
recognizer = cv2.face.LBPHFaceRecognizer_create()
recognizer.read('trainer/trainer.yml')
cascadePath = "haarcascade_frontalface_default.xml"
faceCascade = cv2.CascadeClassifier(cascadePath);
font = cv2.FONT_HERSHEY_SIMPLEX
#initiate id counter
id = 0
# names related to ids: example ==> Jenifer: id=1, etc
names = ['None', 'Jenifer', 'Jenifer', 'Luciola']
# Initialize and start realtime video capture
cam = cv2.VideoCapture('ipcamera')
frame_rate = 1
prev = 0
# Define min window size to be recognized as a face
minW = 0.1*cam.get(3)
minH = 0.1*cam.get(4)
while True:
time_elapsed = time.time() - prev
res, image = cam.read()
if time_elapsed > 1./frame_rate:
prev = time.time()
ret, img =cam.read()
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor = 1.2,
minNeighbors = 5,
minSize = (int(minW), int(minH)),
)
for(x,y,w,h) in faces:
cv2.rectangle(img, (x,y), (x+w,y+h), (0,255,0), 2)
id, confidence = recognizer.predict(gray[y:y+h,x:x+w])
# Check if confidence is less than 100 ==> "0" is perfect match
if (confidence < 85):
id = names[id]
confidence = " {0}%".format(round(100 - confidence))
GPIO.output(relay, 0)
print("Ouverture du portail")
time.sleep(1)
GPIO.output(relay, 1)
else:
GPIO.output(relay, 1)
cv2.putText(img, str(id), (x+5,y-5), font, 1, (255,255,255), 2)
cv2.putText(img, str(confidence), (x+5,y+h-5), font, 1, (255,255,0), 1)
cv2.imshow('camera',img)
k = cv2.waitKey(10) & 0xff # Press 'ESC' for exiting video
if k == 27:
break
# Do a bit of cleanup
print("\n [INFO] Exiting Program and cleanup stuff")
cam.release()
cv2.destroyAllWindows()
Possum's solution works well.
Line 66:
GPIO.output(relay, 1)
cam.release()
time.sleep(45)
cam = cv2.VideoCapture('ipcamera')
else:
You could use threading.Thread to create a thread that will run the camera stream in the background, so it will be unaffected by the sleep function. You could do something like this:
from threading import Thread
import cv2
gray = None
minW = None
minH = None
def camera_stream():
def get_frames():
while True:
global minW
global minH
cv2.VideoCapture('ipcamera')
minW = 0.1*cam.get(3)
minH = 0.1*cam.get(4)
res, image = cam.read()
gray_frame = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
yield gray_frame
while True:
global gray
gray = next(get_frames)
thread = Thread(target=camera_stream, daemon=True)
thread.start()
...
Now wherever you used the variable gray, it should use the next frame in the camera stream that is constantly running in the background.
You could also try to simply define a function to check if the delay has passed.
I amended your code, haven't tested it, but i think it should work.
import numpy as np
import os
import time
import RPi.GPIO as GPIO
import cv2
relay = 23
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.setup(relay, GPIO.OUT)
GPIO.output(relay, 1)
recognizer = cv2.face.LBPHFaceRecognizer_create()
recognizer.read('trainer/trainer.yml')
cascadePath = "haarcascade_frontalface_default.xml"
faceCascade = cv2.CascadeClassifier(cascadePath);
delay = 45
last_trigger = a = int(time.time()) - delay
def check_delay():
globals(last_trigger, delay)
current_time = int(time.time())
current_delay = current_time - last_trigger
if(current_delay < delay):
return False
else:
return True
font = cv2.FONT_HERSHEY_SIMPLEX
# initiate id counter
id = 0
# names related to ids: example ==> Jenifer: id=1, etc
names = ['None', 'Jenifer', 'Jenifer', 'Luciola']
# Initialize and start realtime video capture
cam = cv2.VideoCapture('ipcamera')
frame_rate = 1
prev = 0
# Define min window size to be recognized as a face
minW = 0.1 * cam.get(3)
minH = 0.1 * cam.get(4)
while True:
time_elapsed = time.time() - prev
res, image = cam.read()
if time_elapsed > 1. / frame_rate:
prev = time.time()
ret, img = cam.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.2,
minNeighbors=5,
minSize=(int(minW), int(minH)),
)
for (x, y, w, h) in faces:
cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2)
id, confidence = recognizer.predict(gray[y:y + h, x:x + w])
# Check if confidence is less than 100 ==> "0" is perfect match
if (confidence < 85):
id = names[id]
confidence = " {0}%".format(round(100 - confidence))
GPIO.output(relay, 0)
print("Ouverture du portail")
time.sleep(1)
if check_delay():
GPIO.output(relay, 1)
last_trigger = int(time.time())
else:
if check_delay():
GPIO.output(relay, 1)
last_trigger = int(time.time())
cv2.putText(img, str(id), (x + 5, y - 5), font, 1, (255, 255, 255), 2)
cv2.putText(img, str(confidence), (x + 5, y + h - 5), font, 1, (255, 255, 0), 1)
cv2.imshow('camera', img)
k = cv2.waitKey(10) & 0xff # Press 'ESC' for exiting video
if k == 27:
break
# Do a bit of cleanup
print("\n [INFO] Exiting Program and cleanup stuff")
cam.release()
cv2.destroyAllWindows()

Categories

Resources