This code has been taken from github.I have installed all the Dependencies.
What could be the possible fix for this issue?
If I try to run this project I get these errors
Traceback (most recent call last):
File "c:\Project\Drowsiness-Detection-System-for-Drivers\driver_drowsiness.py", line 102, in <module>
cv2.imshow("Result of detector", face_frame)
NameError: name 'face_frame' is not defined
[ WARN:0#19.631] global D:\a\opencv-python\opencv-python\opencv\modules\videoio\src\cap_msmf.cpp (539) `anonymous-namespace'::SourceReaderCB::~SourceReaderCB terminating async callback
# Importing OpenCV Library for basic image processing functions
import cv2
# Numpy for array related functions
import numpy as np
# Dlib for deep learning based Modules and face landmark detection
import dlib
# face_utils for basic operations of conversion
from imutils import face_utils
# Initializing the camera and taking the instance
cap = cv2.VideoCapture(0)
# Initializing the face detector and landmark detector
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")
# status marking for current state
sleep = 0
drowsy = 0
active = 0
status = ""
color = (0, 0, 0)
def compute(ptA, ptB):
dist = np.linalg.norm(ptA - ptB)
return dist
def blinked(a, b, c, d, e, f):
up = compute(b, d) + compute(c, e)
down = compute(a, f)
ratio = up/(2.0*down)
# Checking if it is blinked
if(ratio > 0.25):
return 2
elif(ratio > 0.21 and ratio <= 0.25):
return 1
else:
return 0
while True:
_, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = detector(gray)
# detected face in faces array
for face in faces:
x1 = face.left()
y1 = face.top()
x2 = face.right()
y2 = face.bottom()
face_frame = frame.copy()
cv2.rectangle(face_frame, (x1, y1), (x2, y2), (0, 255, 0), 2)
landmarks = predictor(gray, face)
landmarks = face_utils.shape_to_np(landmarks)
# The numbers are actually the landmarks which will show eye
left_blink = blinked(landmarks[36], landmarks[37],
landmarks[38], landmarks[41], landmarks[40], landmarks[39])
right_blink = blinked(landmarks[42], landmarks[43],
landmarks[44], landmarks[47], landmarks[46], landmarks[45])
# Now judge what to do for the eye blinks
if(left_blink == 0 or right_blink == 0):
sleep += 1
drowsy = 0
active = 0
if(sleep > 6):
status = "SLEEPING !!!"
color = (255, 0, 0)
elif(left_blink == 1 or right_blink == 1):
sleep = 0
active = 0
drowsy += 1
if(drowsy > 6):
status = "Drowsy !"
color = (0, 0, 255)
else:
drowsy = 0
sleep = 0
active += 1
if(active > 6):
status = "Active :)"
color = (0, 255, 0)
cv2.putText(frame, status, (100, 100),
cv2.FONT_HERSHEY_SIMPLEX, 1.2, color, 3)
for n in range(0, 68):
(x, y) = landmarks[n]
cv2.circle(face_frame, (x, y), 1, (255, 255, 255), -1)
cv2.imshow("Frame", frame)
cv2.imshow("Result of detector", face_frame)
key = cv2.waitKey(1)
if key == 27:
break
There is an issue in the module itself for face_frame variable usage, which is already reported in the github
reported issue for face_frame
Working Code:-
# Importing OpenCV Library for basic image processing functions
import cv2
# Numpy for array related functions
import numpy as np
# Dlib for deep learning based Modules and face landmark detection
import dlib
# face_utils for basic operations of conversion
from imutils import face_utils
# Initializing the camera and taking the instance
cap = cv2.VideoCapture(0)
# Initializing the face detector and landmark detector
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")
# status marking for current state
sleep = 0
drowsy = 0
active = 0
status = ""
color = (0, 0, 0)
def compute(ptA, ptB):
dist = np.linalg.norm(ptA - ptB)
return dist
def blinked(a, b, c, d, e, f):
up = compute(b, d) + compute(c, e)
down = compute(a, f)
ratio = up/(2.0*down)
# Checking if it is blinked
if(ratio > 0.25):
return 2
elif(ratio > 0.21 and ratio <= 0.25):
return 1
else:
return 0
while True:
_, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = detector(gray)
face_frame = frame.copy()
# detected face in faces array
for face in faces:
x1 = face.left()
y1 = face.top()
x2 = face.right()
y2 = face.bottom()
cv2.rectangle(face_frame, (x1, y1), (x2, y2), (0, 255, 0), 2)
landmarks = predictor(gray, face)
landmarks = face_utils.shape_to_np(landmarks)
# The numbers are actually the landmarks which will show eye
left_blink = blinked(landmarks[36], landmarks[37],
landmarks[38], landmarks[41], landmarks[40], landmarks[39])
right_blink = blinked(landmarks[42], landmarks[43],
landmarks[44], landmarks[47], landmarks[46], landmarks[45])
# Now judge what to do for the eye blinks
if(left_blink == 0 or right_blink == 0):
sleep += 1
drowsy = 0
active = 0
if(sleep > 6):
status = "SLEEPING !!!"
color = (255, 0, 0)
elif(left_blink == 1 or right_blink == 1):
sleep = 0
active = 0
drowsy += 1
if(drowsy > 6):
status = "Drowsy !"
color = (0, 0, 255)
else:
drowsy = 0
sleep = 0
active += 1
if(active > 6):
status = "Active :)"
color = (0, 255, 0)
cv2.putText(frame, status, (100, 100),
cv2.FONT_HERSHEY_SIMPLEX, 1.2, color, 3)
for n in range(0, 68):
(x, y) = landmarks[n]
cv2.circle(face_frame, (x, y), 1, (255, 255, 255), -1)
cv2.imshow("Frame", frame)
cv2.imshow("Result of detector", face_frame)
key = cv2.waitKey(1)
if key == 27:
break
Related
Hi I'm making a project about detecting bullet holes in target circles. My original idea was to use Hough circle algorithms to detect both targets which works quite alright for photos that are straight in front of it and bullet holes that are not as good. Sooo I was wandering if anyone could tip me with some better solution on finding them or helping me improve this code.
import cv2 as cv
import numpy as np
import math
import sys
from PIL import Image
import matplotlib.pyplot as plt
MAX_POINTS = 10
def main(argv):
default_file = 'tarczamala.jpg'
default_size = 600, 600
im = Image.open(default_file)
im = im.resize(default_size, Image.ANTIALIAS)
im.save('600' + default_file)
filename = argv[0] if len(argv) > 0 else '600' + default_file
# Loads an image
src = cv.imread(cv.samples.findFile(filename), cv.IMREAD_COLOR)
# Check if image is loaded fine
if src is None:
print ('Error opening image!')
print ('Usage: hough_circle.py [image_name -- default ' + default_file + '] \n')
return -1
# skala szarości
gray = cv.cvtColor(src, cv.COLOR_BGR2GRAY)
cv.imshow('gray', gray)
# Bilateral
bilateral = cv.bilateralFilter(gray, 7, 15, 10)
cv.imshow('bilateral', bilateral)
blank = np.zeros(bilateral.shape[:2], dtype='uint8')
cv.imshow('blank', blank)
# mask = cv.circle(blank, (bilateral.shape[1] // 2, bilateral.shape[0] // 2), 320, 255, -1)
# cv.imshow('Mask', mask)
#
# masked = cv.bitwise_and(bilateral, bilateral, mask=mask)
# cv.imshow('masked', masked)
# Edge Cascade
canny = cv.Canny(bilateral, 50, 175)
cv.imshow('canny1', canny)
# ret, tresh = cv.threshold(gray, 125, 255, cv.THRESH_BINARY)
# cv.imshow('tresch', tresh)
contours, hierarchies = cv.findContours(canny, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_NONE)
print(f'{len(contours)} contour(s) found')
# cv.drawContours(blank, contours, -1, (255,0,0), 1)
# cv.imshow('contours drawn', blank)
rows = canny.shape[0]
# Target
circles = cv.HoughCircles(canny, cv.HOUGH_GRADIENT, 1, 0.01,
param1=100, param2=50,
minRadius=7, maxRadius=300)
# print(f'{circles}"')
biggestCircle = findBiggestCircle(circles)
# print(f'{biggestCircle} biggest circle')
mask = cv.circle(blank, (math.floor(biggestCircle[0]), math.floor(biggestCircle[1])), math.floor(biggestCircle[2]), 255, -1)
cv.imshow('rysowanie granicy', mask)
masked = cv.bitwise_and(bilateral, bilateral, mask=mask)
cv.imshow('granice', masked)
# Edge Cascade
canny = cv.Canny(masked, 50, 175)
cv.imshow('canny2', canny)
if biggestCircle is not None:
circles = np.uint16(np.around(circles))
# print(f'{biggestCircle} biggest circle')
delta_r = biggestCircle[2] / 10
biggest_circle_center = [biggestCircle[0], biggestCircle[1]]
center = (math.floor(biggestCircle[0]), math.floor(biggestCircle[1]))
# print(f'{center} center')
# circle center
cv.circle(src, center, 1, (255, 0, 0), 3)
# circle outline
radius = math.floor(biggestCircle[2])
cv.circle(src, center, radius, (0, 0, 255), 3)
# bullet holes
hits = cv.HoughCircles(canny, cv.HOUGH_GRADIENT, 1, 10,
param1=300, param2=10,
minRadius=7, maxRadius=10)
# print(f'{hits}"')
score = countHitScore(hits.tolist(), delta_r, biggest_circle_center)
print(f'The score is: {score}"')
if hits is not None:
hits = np.uint16(np.around(hits))
for i in hits[0, :]:
# print(f'promien trafienia {i[2]}"')
center = (i[0], i[1])
# circle center
cv.circle(src, center, 1, (0, 100, 100), 3)
# circle outline
radius = i[2]
cv.circle(src, center, radius, (255, 0, 255), 3)
cv.imshow("detected circles", src)
cv.waitKey(0)
return 0
def findBiggestCircle(circles):
# print(f'{circles}')
listOfCircles = circles[0]
biggestCircle = listOfCircles[0]
for circle in listOfCircles:
# print(f'{circle} circle')
# print(f'2 {circle}')
# print(f'3 {biggestCircle}')
if circle[2] > biggestCircle[2]:
# print('4')
biggestCircle = circle
print(biggestCircle)
return biggestCircle.tolist()
def countHitScore(hits, delta_r, target_center):
score = 0
print(f'{hits} hits')
for hit in hits[0]:
# print(f'{hit} hit')
# print(f'{(target_center)} center')
x_dist = hit[0] - target_center[0] if hit[0] > target_center[0] else target_center[0] - hit[0]
y_dist = hit[1] - target_center[1] if hit[1] > target_center[1] else target_center[1] - hit[1]
total_dist = math.hypot(x_dist, y_dist) - hit[2]
punkty = math.ceil(total_dist / delta_r)
if punkty < 1:
punkty = 1
score += 11 - punkty
# print(f'{total_dist / delta_r} math')
# print(f'{total_dist / delta_r} total_dist / delta_r')
print(f'{11 - punkty} zdobyte punkty')
# print(f'{x_dist} x {y_dist} y')
return score
if __name__ == "__main__":
main(sys.argv[1:])
Hello I have a code which draws me the outlines of shapes/objects which have 3 or 4 vertices. So far everything works. But now I want that only shapes/objects are recognized, which are in a square. I wanted to solve it in such a way that I create a square or a picture, in which a square is represented, with cv2.imread read and then with cv2.matchShapes check, how much the recognized contour agrees with the contour of the square. And if this match is good enough, corresponding objects/shapes within this square should be recognized.
Following is the code I have written so far:
import math
import cv2
import numpy as np
from graphics import *
cam = cv2.VideoCapture(0)
cam.set(cv2.CAP_PROP_FRAME_WIDTH, 1920)
cam.set(cv2.CAP_PROP_FRAME_HEIGHT, 1080)
rect = cv2.imread("Rectangle.png")
gray = cv2.cvtColor(rect, cv2.COLOR_BGR2GRAY)
rectcontour, _ = cv2.findContours(gray, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
while True:
_ , img = cam.read()
img = cv2.resize(img, None, fx = 0.5, fy = 0.5, interpolation=cv2.INTER_AREA)
gray1 = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
#edges = cv2.Canny(gray1, 250, 300)
_ , binary1 = cv2.threshold(gray1, 130, 255, cv2.THRESH_BINARY)
inverted_binary1 = ~binary1
contours1, _ = cv2.findContours(inverted_binary1, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
a = 0
for a in contours1:
x = cv2.matchShapes(contours1[a], rectcontour[0], cv2.CONTOURS_MATCH_I1, 0.0)
print(x)
a = a + 1
for contour in contours1:
area = cv2.contourArea(contour)
if area > 100 and area < 20000:
approx = cv2.approxPolyDP(contour, 0.06 * cv2.arcLength(contour, True), True)
print(len(approx))
if 2 < len(approx) < 5:
moment = cv2.moments(contour)
cx = int(moment['m10'] / moment['m00'])
cy = int(moment['m01'] / moment['m00'])
#print(x, y)
print(cx, cy)
cv2.drawContours(img, [contour], -1, (0, 0, 255), 3)
n = approx.ravel()
print(n)
i = 0
for j in n:
if (i % 2 == 0):
x = n[i]
y = n[i + 1]
# String containing the co-ordinates.
string = str(x) + " " + str(y)
if (i == 0):
# text on topmost co-ordinate.
cv2.putText(img, string, (x, y),cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (255, 0, 0))
else:
# text on remaining co-ordinates.
cv2.putText(img, string, (x, y),cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 255, 0))
i = i + 1
cv2.imshow("schwarz, weiß", inverted_binary1)
cv2.imshow('my webcam', img)
if cv2.waitKey(1) & 0xFF == ord("q"):
break
At the moment I get following error:
Traceback (most recent call last):
File "C:\Users\Paul\PycharmProjects\Opencv\test3.py", line 27, in <module>
x = cv2.matchShapes(contours1[a], rectcontour[0], cv2.CONTOURS_MATCH_I1, 0.0)
TypeError: only integer scalar arrays can be converted to a scalar index
[ WARN:0] global D:\a\opencv-python\opencv-python\opencv\modules\videoio\src\cap_msmf.cpp (438) `anonymous-namespace'::SourceReaderCB::~SourceReaderCB terminating async callback
And I dont exactly know how I can fix that.
The square image I am using:
And another Picture of the shape I actually want to detect:
I am making a object detection project.
I have my code. And I have written it by following a tutorial. In the tutorial, the guy drew a rectangle in opencv for every single object which is detected.
But I want to change the rectangle to triangle or Arrow.
let me explain with code===>
In my function, I detect objects.
And here I draw rectangle for detected objects==>
cv2.rectangle(img, (x, y), (x+w,y+h), (255, 0 , 255), 2)
But I want to change this rectangle to a triangle.(And I want to set position of triangle to above of object.
Just like in these images:::
This is the object detection with triangle
[![enter image description here][1]][1]
This is the thing that what I want to make instead of rectangle:::
[![enter image description here][2]][2]
How Can I make a triangle/arrow with positions of my detected objects?
All of my code is here==>
from os.path import sep
import cv2 as cv2
import numpy as np
import json
# Camera feed
cap_cam = cv2.VideoCapture(0)
ret, frame_cam = cap_cam.read()
hey = 0
print(cv2. __version__)
whT = 320
confThreshold =0.5
nmsThreshold= 0.2
classesFile = "coco.names"
classNames = []
with open(classesFile, 'rt') as f:
classNames = f.read().rstrip('\n').split('\n')
print(classNames)
## Model Files
modelConfiguration = "custom-yolov4-tiny-detector.cfg"
modelWeights = "custom-yolov4-tiny-detector_last.weights"
net = cv2.dnn.readNetFromDarknet(modelConfiguration, modelWeights)
net.setPreferableBackend(cv2.dnn.DNN_BACKEND_OPENCV)
net.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU)
def findObjects(outputs,img):
global hey
global previousHey
hT, wT, cT = img.shape
bbox = []
classIds = []
confs = []
for output in outputs:
for det in output:
scores = det[5:]
classId = np.argmax(scores)
confidence = scores[classId]
if confidence > confThreshold:
w,h = int(det[2]*wT) , int(det[3]*hT)
x,y = int((det[0]*wT)-w/2) , int((det[1]*hT)-h/2)
bbox.append([x,y,w,h])
classIds.append(classId)
confs.append(float(confidence))
global indicates
indices = cv2.dnn.NMSBoxes(bbox, confs, confThreshold, nmsThreshold)
hey = 0
for i in indices:
i = i[0]
box = bbox[i]
x, y, w, h = box[0], box[1], box[2], box[3]
# print(x,y,w,h)
cv2.rectangle(img, (x, y), (x+w,y+h), (255, 0 , 255), 2)
#cv2.line(img, (350,400), (x, y), (255,0,0), 4)
#cv2.line(img, (400,400), (x + 50 , y), (255,0,0), 4)
#cv.putText(img,f'{classNames[classIds[i]].upper()} {int(confs[i]*100)}%',
#(x, y-10), cv.FONT_HERSHEY_SIMPLEX, 0.6, (255, 0, 255), 2)
print('success')
hey = 1
video_frame_counter = 0
while cap_cam.isOpened():
img = cv2.imread('photos' + sep + 'lutfen.jpg')
#BURADA OK VİDEOSU OYNATILACAK
#if not decetiona diye dene yarın.
blob = cv2.dnn.blobFromImage(img, 1 / 255, (whT, whT), [0, 0, 0], 1, crop=False)
net.setInput(blob)
layersNames = net.getLayerNames()
outputNames = [(layersNames[i[0] - 1]) for i in net.getUnconnectedOutLayers()]
outputs = net.forward(outputNames)
findObjects(outputs,img)
cv2.imshow('Image', img)
# Video feed
if hey == 1:
filename = 'photos' + sep + 'Baslksz-3.mp4'
cap_vid = cv2.VideoCapture(filename)
if hey == 0:
filename = 'photos' + sep + 'vid2.mp4'
cap_vid = cv2.VideoCapture(filename)
print(hey)
ret, frame_vid = cap_vid.read()
#cap_cam.set(cv2.CAP_PROP_FRAME_WIDTH, 1920)
#cap_cam.set(cv2.CAP_PROP_FRAME_HEIGHT, 1080)
# Resize the camera frame to the size of the video
height = int(cap_vid.get(cv2.CAP_PROP_FRAME_HEIGHT))
width = int(cap_vid.get(cv2.CAP_PROP_FRAME_WIDTH))
# Capture the next frame from camera
ret, frame_cam = cap_cam.read()
video_frame_counter += 1
if video_frame_counter == cap_vid.get(cv2.CAP_PROP_FRAME_COUNT):
video_frame_counter = 0
cap_vid.set(cv2.CAP_PROP_POS_FRAMES, 0)
frame_cam = cv2.resize(frame_cam, (width, height), interpolation = cv2.INTER_AREA)
#ret = cap_vid.set(cv2.CAP_PROP_POS_MSEC, time_passed)
ret, frame_vid = cap_vid.read()
if not ret:
print('Cannot read from video stream')
break
# Blend the two images and show the result
tr = 0.4 # transparency between 0-1, show camera if 0
frame = ((1-tr) * frame_cam.astype(np.float) + tr * frame_vid.astype(np.float)).astype(np.uint8)
cv2.imshow('Transparent result', frame)
if cv2.waitKey(1) == 27: # ESC is pressed
break
cap_cam.release()
cap_vid.release()
cv2.destroyAllWindows()
The easy way
You can use the cv.arrowedLine() function that will draw something similar to what you want. For example, to draw a red arrow above your rectangle:
center_x = x + w//2
cv2.arrowedLine(img, (center_x, y-50), (center_x, y-5), (0,0,255), 2, 8, 0, 0.5)
which should give a result similar to the image below. Take a look at the OpenCV documentation for the description of the parameters of the function. You can change its size, thickness, color, etc.
Custom arrow shape
If you want more control over the shape of your arrow, you can define a contour (vertex by vertex) and use cv.drawContours() to render it. For example:
# define the arrow shape
shape = np.array([[[0,0],[-25,-25],[-10,-25],[-10,-50],
[10,-50],[10,-25],[25,-25]]])
# move it to the desired position
cx = x + w // 2
cy = y - 5
shape[:,:,0] += cx
shape[:,:,1] += cy
# draw it
cv2.drawContours(img, shape, -1, (0, 255, 0), -1)
This snippet will give you the image below. You can adjust the shape by altering the vertices in the shape array, or look at the documentation to change the way OpenCV draws it.
So I have been following this tutorial for centroid tracking
https://www.pyimagesearch.com/2018/07/23/simple-object-tracking-with-opencv/
and have built the centroid tracking class like it is mentions in the tutorial.
Now when I try to use background subtraction for the detection instead of the CNN that he is using, it does not work and gives me this issue from the CentroidTracker.py
for i in range(0, inputCentroids):
TypeError: only integer scalar arrays can be converted to a scalar index
Here is my code that I am using
for i in range(0, num_frames):
rects = []
#Get the very first image from the video
if (first_iteration == 1):
ret, frame = cap.read()
frame = cv2.resize(frame, (imageHight,imageWidth))
first_frame = copy.deepcopy(frame)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
height, width = frame.shape[:2]
print("shape:", height,width)
first_iteration = 0
else:
ret, frame = cap.read()
frame = cv2.resize(frame, (imageHight,imageWidth))
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
forgroundMask = backgroundSub.apply(frame)
#Get contor for each person
_, contours, _ = cv2.findContours(forgroundMask.copy(),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
contours = filter(lambda cont: cv2.contourArea(cont) > 20, contours)
#Get bbox from the controus
for c in contours:
(x, y, w, h) = cv2.boundingRect(c)
rectangle = [x, y, (x + w), (y + h)]
rects.append(rectangle)
cv2.rectangle(frame, (rectangle[0], rectangle[1]), (rectangle[2], rectangle[3]),
(0, 255, 0), 2)
objects = ct.update(rects)
for (objectID, centroid) in objects.items():
text = "ID:{}".format(objectID)
cv2.putText(frame, text, (centroid[0] - 10, centroid[1] - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
cv2.circle(frame, (centroid[0], centroid[1]), 4, (0, 255, 0), -1)
'''Display Windows'''
cv2.imshow('FGMask', forgroundMask)
frame1 = frame.copy()
cv2.imshow('MOG', frame1)
cv2.imshow('frame', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
the code is breaking at the
objects = ct.update(rects)
line.
Here is the implementation of the CentroidTracker from the Tutorial:
from scipy.spatial import distance as dist
from collections import OrderedDict
import numpy as np
#Makes a the next unique object ID with
#2 ordered dictionaries
class CentroidTracker():
def __init__(self, maxDisappeared = 50):
self.nextObjectID = 0
self.objects = OrderedDict()
self.disappeared = OrderedDict()
self.maxDisappeared = maxDisappeared
def register(self, centroid):
self.objects[self.nextObjectID] = centroid
self.disappeared[self.nextObjectID] = 0
self.nextObjectID += 1
def deregister(self, objectID):
del self.objects[objectID]
del self.disappeared[objectID]
def update(self, rects):
if len(rects) == 0:
for objectID in self.disappeared.keys():
self.disappeared[objectID] += 1
if self.disappeared[objectID] > self.maxDisappeared:
self.deregister(objectID)
return self.objects
inputCentroids = np.zeros((len(rects), 2), dtype="int")
for (i, (startX, startY, endX, endY)) in enumerate(rects):
cX = int((startX + endX) / 2.0)
cY = int((startY + endY) / 2.0)
inputCentroids[i] = (cX, cY)
if len(self.objects) == 0:
for i in range(0, inputCentroids):
self.register(inputCentroids[i])
else:
objectIDs = list(self.objects.keys())
objectCentroids = list(self.objects.values())
D = dist.cdist(np.array(objectCentroids), inputCentroids)
rows = D.min(axis=1).argsort()
cols = D.argmin(axis=1)[rows]
usedRows = set()
usedCols = set()
for (row, col) in zip(rows, cols):
if row in usedRows or col in usedCols:
continue
objectID = objectIDs[row]
self.objects[objectID] = inputCentroids[col]
self.disappeared[objectID] = 0
usedRows.add(row)
usedCols.add(col)
# compute both the row and column index we have NOT yet
# examined
unusedRows = set(range(0, D.shape[0])).difference(usedRows)
unusedCols = set(range(0, D.shape[1])).difference(usedCols)
if D.shape[0] >= D.shape[1]:
# loop over the unused row indexes
for row in unusedRows:
# grab the object ID for the corresponding row
# index and increment the disappeared counter
objectID = objectIDs[row]
self.disappeared[objectID] += 1
# check to see if the number of consecutive
# frames the object has been marked "disappeared"
# for warrants deregistering the object
if self.disappeared[objectID] > self.maxDisappeared:
self.deregister(objectID)
else:
for col in unusedCols:
self.register(inputCentroids[col])
# return the set of trackable objects
return self.objects
I am kind of lost on what I am doing wrong here. All I should do is pass in a bounding box (x,y,x+w, y+h) into the rects[] list correct and that should give similar results for this, or am I wrong and do not understand how this works? Any help will be appreciated
You have forgotten the len function: for i in range(0, len(inputCentroids)):
By doing what Axel Puig said and then adding this line to the Main mehtod
objects = ct.update(rects)
if objects is not None:
for (objectID, centroid) in objects.items():
text = "ID:{}".format(objectID)
cv2.putText(frame, text, (centroid[0] - 10, centroid[1] - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
cv2.circle(frame, (centroid[0], centroid[1]), 4, (0, 255, 0), -1)
That fixed the issue. What I think was happening is the first frame didnt initialize the tracker so I needed to make sure it was not None then it worked after that
Hi in the code below I'm making frame difference and then I want to
find contours that appears from frame difference and drwa them whenever they appear
the code below is working well in differentiating but my problem is with contours I can't draw them when they appear . When I run it this error message appears :
cnt=contours[0] :index error list index out of range
I have used cnt=contours[0] in previous examples it works but I don't know
what is the problem here . Thanks for help
import cv2
import sys
import time
import numpy as np
BLUR_SIZE = 3
NOISE_CUTOFF = 12
cam = cv2.VideoCapture(0)
cam.set(3,640)
cam.set(4,480)
window_name = "delta view"
window_name_now = "now view"
frame_now = cam.read()[1]
frame_now = cam.read()[1]
frame_now = cv2.cvtColor(frame_now, cv2.COLOR_RGB2GRAY)
frame_now = cv2.blur(frame_now, (BLUR_SIZE, BLUR_SIZE))
frame_prior = frame_now
delta_count_last = 1
while True:
frame_delta = cv2.absdiff(frame_prior, frame_now)
frame_delta = cv2.threshold(frame_delta, NOISE_CUTOFF, 255, 3)[1]
delta_count = cv2.countNonZero(frame_delta)
cv2.normalize(frame_delta, frame_delta, 0, 255, cv2.NORM_MINMAX)
frame_delta = cv2.flip(frame_delta, 1)
kernel = np.ones((3,3),np.uint8)
opening = cv2.morphologyEx(frame_delta, cv2.MORPH_OPEN, kernel)
cont_img = opening.copy()
image,contours, hierarchy = cv2.findContours(cont_img, cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
cnt=contours[0]
for cnt in contours:
area = cv2.contourArea(cnt)
if area < 2000 or area > 4000:
continue
if len(cnt) < 5:
continue
ellipse = cv2.fitEllipse(cnt)
cv2.ellipse(roi, ellipse, (0,255,0), 2)
cv2.imshow('Contours', roi)
cv2.putText(frame_delta, "DELTA: %d" % (delta_count),
(5, 15), cv2.FONT_HERSHEY_PLAIN, 0.8, (255, 255, 255))
cv2.imshow(window_name, opening)
#frame_delta = cv2.threshold(frame_delta, 92, 255, 0)[1]
dst = cv2.flip(frame_now, 1)
dst = cv2.addWeighted(dst,1.0, frame_delta,0.9,0)
cv2.imshow(window_name_now, dst)
if (delta_count_last == 0 and delta_count != 0):
sys.stdout.write("MOVEMENT %f\n" % time.time())
sys.stdout.flush()
elif delta_count_last != 0 and delta_count == 0:
sys.stdout.write("STILL %f\n" % time.time())
sys.stdout.flush()
delta_count_last = delta_count
frame_prior = frame_now
frame_now = cam.read()[1]
frame_now = cv2.cvtColor(frame_now, cv2.COLOR_RGB2GRAY)
frame_now = cv2.blur(frame_now, (BLUR_SIZE, BLUR_SIZE))
key = cv2.waitKey(10)
if key == 0x1b or key == ord('q'):
cv2.destroyWindow(window_name)
break