Related
This code has been taken from github.I have installed all the Dependencies.
What could be the possible fix for this issue?
If I try to run this project I get these errors
Traceback (most recent call last):
File "c:\Project\Drowsiness-Detection-System-for-Drivers\driver_drowsiness.py", line 102, in <module>
cv2.imshow("Result of detector", face_frame)
NameError: name 'face_frame' is not defined
[ WARN:0#19.631] global D:\a\opencv-python\opencv-python\opencv\modules\videoio\src\cap_msmf.cpp (539) `anonymous-namespace'::SourceReaderCB::~SourceReaderCB terminating async callback
# Importing OpenCV Library for basic image processing functions
import cv2
# Numpy for array related functions
import numpy as np
# Dlib for deep learning based Modules and face landmark detection
import dlib
# face_utils for basic operations of conversion
from imutils import face_utils
# Initializing the camera and taking the instance
cap = cv2.VideoCapture(0)
# Initializing the face detector and landmark detector
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")
# status marking for current state
sleep = 0
drowsy = 0
active = 0
status = ""
color = (0, 0, 0)
def compute(ptA, ptB):
dist = np.linalg.norm(ptA - ptB)
return dist
def blinked(a, b, c, d, e, f):
up = compute(b, d) + compute(c, e)
down = compute(a, f)
ratio = up/(2.0*down)
# Checking if it is blinked
if(ratio > 0.25):
return 2
elif(ratio > 0.21 and ratio <= 0.25):
return 1
else:
return 0
while True:
_, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = detector(gray)
# detected face in faces array
for face in faces:
x1 = face.left()
y1 = face.top()
x2 = face.right()
y2 = face.bottom()
face_frame = frame.copy()
cv2.rectangle(face_frame, (x1, y1), (x2, y2), (0, 255, 0), 2)
landmarks = predictor(gray, face)
landmarks = face_utils.shape_to_np(landmarks)
# The numbers are actually the landmarks which will show eye
left_blink = blinked(landmarks[36], landmarks[37],
landmarks[38], landmarks[41], landmarks[40], landmarks[39])
right_blink = blinked(landmarks[42], landmarks[43],
landmarks[44], landmarks[47], landmarks[46], landmarks[45])
# Now judge what to do for the eye blinks
if(left_blink == 0 or right_blink == 0):
sleep += 1
drowsy = 0
active = 0
if(sleep > 6):
status = "SLEEPING !!!"
color = (255, 0, 0)
elif(left_blink == 1 or right_blink == 1):
sleep = 0
active = 0
drowsy += 1
if(drowsy > 6):
status = "Drowsy !"
color = (0, 0, 255)
else:
drowsy = 0
sleep = 0
active += 1
if(active > 6):
status = "Active :)"
color = (0, 255, 0)
cv2.putText(frame, status, (100, 100),
cv2.FONT_HERSHEY_SIMPLEX, 1.2, color, 3)
for n in range(0, 68):
(x, y) = landmarks[n]
cv2.circle(face_frame, (x, y), 1, (255, 255, 255), -1)
cv2.imshow("Frame", frame)
cv2.imshow("Result of detector", face_frame)
key = cv2.waitKey(1)
if key == 27:
break
There is an issue in the module itself for face_frame variable usage, which is already reported in the github
reported issue for face_frame
Working Code:-
# Importing OpenCV Library for basic image processing functions
import cv2
# Numpy for array related functions
import numpy as np
# Dlib for deep learning based Modules and face landmark detection
import dlib
# face_utils for basic operations of conversion
from imutils import face_utils
# Initializing the camera and taking the instance
cap = cv2.VideoCapture(0)
# Initializing the face detector and landmark detector
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")
# status marking for current state
sleep = 0
drowsy = 0
active = 0
status = ""
color = (0, 0, 0)
def compute(ptA, ptB):
dist = np.linalg.norm(ptA - ptB)
return dist
def blinked(a, b, c, d, e, f):
up = compute(b, d) + compute(c, e)
down = compute(a, f)
ratio = up/(2.0*down)
# Checking if it is blinked
if(ratio > 0.25):
return 2
elif(ratio > 0.21 and ratio <= 0.25):
return 1
else:
return 0
while True:
_, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = detector(gray)
face_frame = frame.copy()
# detected face in faces array
for face in faces:
x1 = face.left()
y1 = face.top()
x2 = face.right()
y2 = face.bottom()
cv2.rectangle(face_frame, (x1, y1), (x2, y2), (0, 255, 0), 2)
landmarks = predictor(gray, face)
landmarks = face_utils.shape_to_np(landmarks)
# The numbers are actually the landmarks which will show eye
left_blink = blinked(landmarks[36], landmarks[37],
landmarks[38], landmarks[41], landmarks[40], landmarks[39])
right_blink = blinked(landmarks[42], landmarks[43],
landmarks[44], landmarks[47], landmarks[46], landmarks[45])
# Now judge what to do for the eye blinks
if(left_blink == 0 or right_blink == 0):
sleep += 1
drowsy = 0
active = 0
if(sleep > 6):
status = "SLEEPING !!!"
color = (255, 0, 0)
elif(left_blink == 1 or right_blink == 1):
sleep = 0
active = 0
drowsy += 1
if(drowsy > 6):
status = "Drowsy !"
color = (0, 0, 255)
else:
drowsy = 0
sleep = 0
active += 1
if(active > 6):
status = "Active :)"
color = (0, 255, 0)
cv2.putText(frame, status, (100, 100),
cv2.FONT_HERSHEY_SIMPLEX, 1.2, color, 3)
for n in range(0, 68):
(x, y) = landmarks[n]
cv2.circle(face_frame, (x, y), 1, (255, 255, 255), -1)
cv2.imshow("Frame", frame)
cv2.imshow("Result of detector", face_frame)
key = cv2.waitKey(1)
if key == 27:
break
I'm trying to save the output imgCanny as an image/screenshot. How do I incorporate this so a screenshot of that window can be captured when a certain key is pressed. I am taking the live feed from the webcam and processing it. I would then like to be able to press a key on the keyboard to capture and save a screenshot of the imgCanny Window.
cap = cv2.VideoCapture(1)
cv2.waitKey(0)
cap.set(3, frameWidth)
cap.set(4, frameHeight)
def empty(a):
pass
cv2.namedWindow("Parameters")
cv2.resizeWindow("Parameters", 640, 240)
cv2.createTrackbar("Threshold1", "Parameters", 150, 500, empty)
cv2.createTrackbar("Threshold2", "Parameters", 255, 500, empty)
def getContours(img, imgContour):
contours, hierarchy = cv2.findContours(img, cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_NONE) #retreival method. External, only extreme
outer contours
#change APPROX to simple for less points
#remove small bits of noise
for cnt in contours:
area = cv2.contourArea(cnt)
# Used to flatted the array containing
# the co-ordinates of the vertices.
approx = cv2.approxPolyDP(cnt, 0.009 * cv2.arcLength(cnt, True),
True)
n = approx.ravel()
i = 0
for j in n:
if (i % 2 == 0):
x = n[i]
y = n[i + 1]
# String containing the co-ordinates.
string = str(int(x / 3.5)) + " " + str(int(y / 3.5))
print(string)
if (i == 0):
# text on topmost co-ordinate.
cv2.putText(imgContour, "", (x, y),
font, 0.5, (255, 0, 0))
else:
# text on remaining co-ordinates.
cv2.putText(imgContour, string, (x, y),
font, 0.5, (0, 255, 0))
i = i + 1
if area > 3000:
cv2.drawContours(imgContour, cnt, -1, (255, 0, 255), 1)
peri = cv2.arcLength(cnt, True) #true means contour is closed
while True:
success, img = cap.read()
imgContour = img.copy()
imgBlur = cv2.GaussianBlur(img, (31, 31), 1)
imgGray = cv2.cvtColor(imgBlur, cv2.COLOR_BGR2GRAY)
threshold1 = cv2.getTrackbarPos("Threshold1", "Parameters")
threshold2 = cv2.getTrackbarPos("Threshold2", "Parameters")
imgCanny = cv2.Canny(imgGray, threshold1, threshold2)
kernel = np.ones((5, 5))
imgDil = cv2.dilate(imgCanny, kernel, iterations=1)
getContours(imgDil, imgContour)
#cv2.imshow("Results", img )
#cv2.imshow("Mask", imgGray)
cv2.imshow("Canny", imgCanny)
cv2.imshow("Dilated", imgContour)
cv2.imshow("Test", imgDil)
key = cv2.waitKey(100)
if key == 27: #kills with Esc
break
cap.release()
cv2.destroyAllWindows()
i stuck in below code please help me to solve error which is WARN:0 terminating async callback error in cv2
# this is a client program which run on client side
import cv2
import socket
import numpy as np
import math
import time
try:
start_time = time.time()
state1 = "off"
state2 = "off"
state3 = "off"
mode = "on"
host = "192.168.0.106" # socket which acording server in our case #ip address of rapberry Pi
port = 9345
mySocket = socket.socket()
mySocket.connect((host,port))
cap = cv2.VideoCapture(0)
while(cap.isOpened()):
# read image
ret, img = cap.read()
# get hand data from the rectangle sub window on the screen
cv2.rectangle(img, (300,300), (100,100), (0,255,0),0)
crop_img = img[100:300, 100:300]
# convert to grayscale
grey = cv2.cvtColor(crop_img, cv2.COLOR_BGR2GRAY)
# applying gaussian blur
value = (35, 35)
blurred = cv2.GaussianBlur(grey, value, 0)
# thresholdin: Otsu's Binarization method
_, thresh1 = cv2.threshold(blurred, 127, 255,
cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
# show thresholded image
cv2.imshow('Thresholded', thresh1)
# check OpenCV version to avoid unpacking error
(version, _, _) = cv2.__version__.split('.')
if version == '3':
image, contours, hierarchy = cv2.findContours(thresh1.copy(), \
cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
elif version == '2':
contours, hierarchy = cv2.findContours(thresh1.copy(),cv2.RETR_TREE, \
cv2.CHAIN_APPROX_NONE)
# find contour with max area
cnt = max(contours, key = lambda x: cv2.contourArea(x))
# create bounding rectangle around the contour (can skip below two lines)
x, y, w, h = cv2.boundingRect(cnt)
cv2.rectangle(crop_img, (x, y), (x+w, y+h), (0, 0, 255), 0)
# finding convex hull
hull = cv2.convexHull(cnt)
# drawing contours
drawing = np.zeros(crop_img.shape,np.uint8)
cv2.drawContours(drawing, [cnt], 0, (0, 255, 0), 0)
cv2.drawContours(drawing, [hull], 0,(0, 0, 255), 0)
# finding convex hull
hull = cv2.convexHull(cnt, returnPoints=False)
# finding convexity defects
defects = cv2.convexityDefects(cnt, hull)
count_defects = 0
cv2.drawContours(thresh1, contours, -1, (0, 255, 0), 3)
# applying Cosine Rule to find angle for all defects (between fingers)
# with angle > 90 degrees and ignore defects
for i in range(defects.shape[0]):
s,e,f,d = defects[i,0]
start = tuple(cnt[s][0])
end = tuple(cnt[e][0])
far = tuple(cnt[f][0])
# find length of all sides of triangle
a = math.sqrt((end[0] - start[0])**2 + (end[1] - start[1])**2)
b = math.sqrt((far[0] - start[0])**2 + (far[1] - start[1])**2)
c = math.sqrt((end[0] - far[0])**2 + (end[1] - far[1])**2)
# apply cosine rule here
angle = math.acos((b**2 + c**2 - a**2)/(2*b*c)) * 57
# ignore angles > 90 and highlight rest with red dots
if angle <= 90:
count_defects += 1
cv2.circle(crop_img, far, 1, [0,0,255], -1)
#dist = cv2.pointPolygonTest(cnt,far,True)
# draw a line from start to end i.e. the convex points (finger tips)
# (can skip this part)
cv2.line(crop_img,start, end, [0,255,0], 2)
#cv2.circle(crop_img,far,5,[0,0,255],-1)
# define actions required
if count_defects == 1:
if (time.time()>start_time+2):
if mode == "on":
mySocket.send("on_led1".encode())
state1 = "on"
print("led 1 is on")
else:
mySocket.send("off_led1".encode())
state1 = "off"
print("led 1 is off")
start_time = time.time()
cv2.putText(img, "led 1 is "+state1, (5, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, 2)
elif count_defects == 2:
if (time.time()>start_time+2):
if mode == "on":
mySocket.send("on_led2".encode())
state2 = "on"
print("led 2 is on")
else:
mySocket.send("off_led2".encode())
state2 = "off"
print("led 2 is off")
start_time = time.time()
cv2.putText(img, "led 2 is "+state2, (5, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, 2)
elif count_defects == 3:
if (time.time()>start_time+2):
if mode == "on":
mySocket.send("on_led3".encode())
state3 = "on"
print("led 3 is on")
else:
mySocket.send("off_led3".encode())
state3 = "off"
print("led 3 is off")
start_time = time.time()
cv2.putText(img, "led 3 is "+state3, (5, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, 2)
elif count_defects == 4:
cv2.putText(img,"mode is "+mode, (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 2, 2)
if(time.time() > start_time+2):
if mode == "on":
mode = "off"
else:
mode = "on"
start_time = time.time()
print(mode)
else:
cv2.putText(img, "use your fingure for turn On/Off lights current mode is "+mode, (5, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, 2)
# show appropriate images in windows
cv2.imshow('Gesture', img)
all_img = np.hstack((drawing, crop_img))
cv2.imshow('Contours', all_img)
k = cv2.waitKey(10)
if k == 27:
break
mySocket.close()
except:
mySocket.send("close_all".encode())
mySocket.close()
above is a program which i created for accessing embedded device using cv2 all things was working properly but still i get an error which [ WARN:0] terminating async callback i also use camera.release() cv2.destroyAllWindows() function but it is not work , any help would be appreciated
i also try this suggestionsuggestion but still it is not working i am using windows 10 operating system
here is full code with explanation code
This warning come from MSMF backend. Try:
cap = cv2.VideoCapture(cv2.CAP_DSHOW)
or
cap = cv2.VideoCapture(0, cv2.CAP_DSHOW)
i find problem in your code. maybe you was updated your cv2 library to version 4. that's why your both condition, which given below is not satisfied.
if version == '3':
image, contours, hierarchy = cv2.findContours(thresh1.copy(), \
cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
elif version == '2':
contours, hierarchy = cv2.findContours(thresh1.copy(),cv2.RETR_TREE, \
cv2.CHAIN_APPROX_NONE)
you should remove elif version == '2': and simply use else: it might be help.
I'm trying to create motion detection alarm using opencv,webcam and python as a project.I created the motion detection and wanted to add features like mail alert and sms alert but when i call my mailing code the screen lags for about 5 sec.I need to alert user as soon as motion get detected.
Here's my code for Motion Detection
def detect(img, feed):
motiondetect = False
temp = img.copy()
contours, hierarchy = findContours(temp, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
if len(contours) > 20 :
motiondetect = True
else:
motiondetect = False
return motiondetect
cap = cv2.VideoCapture(0)
pr = False
font = cv2.FONT_HERSHEY_SIMPLEX
recording = False
startr = True
send=True
fourcc = cv2.cv.CV_FOURCC(*'IYUV')
count = 0
out = cv2.VideoWriter()
while True:
ret, img = cap.read()
if startr == True:
out = cv2.VideoWriter("E:/vid" + str(count) + ".avi", fourcc, 6, (640, 480), 1)
startr = False
img = cv2.resize(img, (640, 480))
grayimg = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, img2 = cap.read()
grayimg2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
diffimg = cv2.absdiff(grayimg, grayimg2)
m, thresimg = cv2.threshold(diffimg, 20, 255, cv2.THRESH_BINARY)
thresimg = cv2.blur(thresimg, (10, 10))
n, thresimg = threshold(thresimg, 20, 255, cv2.THRESH_BINARY)
motionn = detect(thresimg, img)
now = datetime.datetime.now()
mm = str(now.month)
dd = str(now.day)
yy = str(now.year)
hour = str(now.hour)
if (int(hour) < 10):
hour = "0" + str(hour)
mi = str(now.minute)
if (int(mi) < 10):
mi = "0" + str(mi)
ss = str(now.second)
if (int(ss) < 10):
ss = "0" + str(ss)
pdt = dd + "-" + mm + "-" + yy + " " + hour + ":" + mi + ":" + ss
cv2.rectangle(img, (0, 460), (200, 480), (255, 255, 255), -1)
cv2.putText(img, str(pdt), (10, 473), font, 0.5, (0, 0, 0), 2)
if (motionn == True):
recording = True
else:
recording = False
if (recording == True):
out.write(img)
cv2.putText(img, "Motion Detected", (0, 420), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
imshow("Window", img)
k = waitKey(20)
if k == 27:
break
elif k == 110:
count = count + 1
startr = True
elif k == 114:
if (startr == False):
startr = True
cap.release()
cv2.destroyAllWindows()
where should i call the mailing function?
Thanks for the help
I have this code that I downloaded from GitHub to use in an OpenCV project. Everything worked fine the first time, but after that it won't open, and it keeps showing me the following error at line 8:
Traceback (most recent call last):
File "hand.py", line 8, in <module>
crop_img = img[100:300,100:300]
TypeError: 'NoneType' object is not subscriptable
Here is the code:
import cv2
import numpy as np
import math
cap = cv2.VideoCapture(0)
while(cap.isOpened()):
ret, img = cap.read()
cv2.rectangle(img,(300,300),(100,100),(0,255,0),0)
crop_img = img[100:300,100:300]
grey = cv2.cvtColor(crop_img, cv2.COLOR_BGR2GRAY)
value = (35, 35)
blurred = cv2.GaussianBlur(grey, value, 0)
_, thresh1 = cv2.threshold(blurred, 127, 255,
cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
cv2.imshow('Thresholded', thresh1)
contours, hierarchy = cv2.findContours(thresh1.copy(),cv2.RETR_TREE, \
cv2.CHAIN_APPROX_NONE)
max_area = -1
for i in range(len(contours)):
cnt=contours[i]
area = cv2.contourArea(cnt)
if(area>max_area):
max_area=area
ci=i
cnt=contours[ci]
x,y,w,h = cv2.boundingRect(cnt)
cv2.rectangle(crop_img,(x,y),(x+w,y+h),(0,0,255),0)
hull = cv2.convexHull(cnt)
drawing = np.zeros(crop_img.shape,np.uint8)
cv2.drawContours(drawing,[cnt],0,(0,255,0),0)
cv2.drawContours(drawing,[hull],0,(0,0,255),0)
hull = cv2.convexHull(cnt,returnPoints = False)
defects = cv2.convexityDefects(cnt,hull)
count_defects = 0
cv2.drawContours(thresh1, contours, -1, (0,255,0), 3)
for i in range(defects.shape[0]):
s,e,f,d = defects[i,0]
start = tuple(cnt[s][0])
end = tuple(cnt[e][0])
far = tuple(cnt[f][0])
a = math.sqrt((end[0] - start[0])**2 + (end[1] - start[1])**2)
b = math.sqrt((far[0] - start[0])**2 + (far[1] - start[1])**2)
c = math.sqrt((end[0] - far[0])**2 + (end[1] - far[1])**2)
angle = math.acos((b**2 + c**2 - a**2)/(2*b*c)) * 57
if angle <= 90:
count_defects += 1
cv2.circle(crop_img,far,1,[0,0,255],-1)
#dist = cv2.pointPolygonTest(cnt,far,True)
cv2.line(crop_img,start,end,[0,255,0],2)
#cv2.circle(crop_img,far,5,[0,0,255],-1)
if count_defects == 1:
cv2.putText(img,"this is 2", (50,50), cv2.FONT_HERSHEY_SIMPLEX, 2, 2)
elif count_defects == 2:
str = "this is 3 !!!"
cv2.putText(img, str, (5,50), cv2.FONT_HERSHEY_SIMPLEX, 1, 2)
elif count_defects == 3:
cv2.putText(img,"This is 4 :P", (50,50), cv2.FONT_HERSHEY_SIMPLEX, 2, 2)
elif count_defects == 4:
cv2.putText(img,"this is 5 !!!", (50,50), cv2.FONT_HERSHEY_SIMPLEX, 2, 2)
else:
cv2.putText(img,"this is 0 !!!", (50,50),\
cv2.FONT_HERSHEY_SIMPLEX, 2, 2)
#cv2.imshow('drawing', drawing)
#cv2.imshow('end', crop_img)
cv2.imshow('Gesture', img)
all_img = np.hstack((drawing, crop_img))
cv2.imshow('Contours', all_img)
k = cv2.waitKey(10)
if k == 27:
break
I found the solution , it appears that some crappy video drivers return an invalid 1st frame. all i did is check ret and continue if it's false , it's working fine .
The exception indicates that cap.read returned None to img. You should look out for that:
cap = cv2.VideoCapture(0)
while(cap.isOpened()):
ret, img = cap.read()
if not ret: break
The documentations say:
If no frames has been grabbed (camera has been disconnected, or there are no more frames in video file), the methods return false and the functions return NULL pointer.
So if you are using a USB webcam, make sure that your USB connection is stable. In particular, if you are using any USB hubs/extensions or your port is in a bad shape.