Streaming to website instead of Window OpenCV - python

Human Recogition Program
class PeopleTracker:
hog = cv2.HOGDescriptor()
caps = cv2.VideoCapture(r'C:/Users/Emyr/Documents/Jupyter/pedestrian-detection/video/Ped4.MOV')
count = int(caps.get(cv2.CAP_PROP_FRAME_COUNT))
center = []
recCount = 0
pick = 0
# Red Yellow Blue Green Purple
colors = [(255,0,0),(255,255,0),(0,0,255),(0,128,0),(128,0,128)]
def BBoxes(self, frame):
#frame = imutils.resize(frame, width = min(frame.shape[0], frame.shape[1]))
frame = imutils.resize(frame, width= 1000,height = 1000)
# detect people in the image
(rects, weights) = self.hog.detectMultiScale(frame, winStride=(1,1), padding=(3, 3), scale=0.5)
# apply non-maxima suppression to the bounding boxes using a
# fairly large overlap threshold to try to maintain overlapping
# boxes that are still people
rects = np.array([[x, y, x + w, y + h] for (x, y, w, h) in rects])
self.pick = non_max_suppression(rects, probs=None, overlapThresh=0.7)
# draw the final bounding boxes
self.recCount = 0
for (xA, yA, xB, yB) in self.pick:
#cv2.rectangle(frame, (xA, yA), (xB, yB), (0, 255, 0), 2)
CentxPos = int((xA + xB)/2)
CentyPos = int((yA + yB)/2)
cv2.circle(frame,(CentxPos, CentyPos), 5, (0,255,0), -1)
self.recCount += 1
if len(rects) >1:
self.center.append([CentxPos, CentyPos])
return frame
def Clustering(self, frame):
db = DBSCAN(eps= 70, min_samples = 2).fit(self.center)
labels = db.labels_
# Number of clusters in labels, ignoring noise if present.
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
n_noise_ = list(labels).count(-1)
#print("Labels: ", labels)
# Black removed and is used for noise instead.
unique_labels = set(labels)
#print("Unique Labels: ", unique_labels)
#colors = plt.cm.rainbow(np.linspace(0, 255, len(unique_labels)))
#colors = [(random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)) for k in range(len(unique_labels)) ]
#print(self.colors)
i = 0
for (xA, yA, xB, yB) in self.pick:
if labels[i] == -1:
cv2.rectangle(frame, (xA, yA), (xB, yB), (0, 0, 0), 2)
i += 1
else:
cv2.rectangle(frame, (xA, yA), (xB, yB), (self.colors[labels[i]][0], self.colors[labels[i]][1], self.colors[labels[i]][2]), 2)
i += 1
#print("Colours: ", colors)
center = np.asarray(self.center)
#fig, ax = plt.subplots()
#ax.set_xlim(0,frame.shape[1])
#ax.set_ylim(frame.shape[0], 0)
#for k, col in zip(unique_labels, colors):
#if k == -1:
#Black used for noise.
#col = [0, 0, 0, 1]
#class_member_mask = (labels == k)
#xy = center[class_member_mask]
#plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=tuple(col), markeredgecolor='k', markersize=8)
def main():
PT = PeopleTracker()
PT.hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())
while PT.count > 1:
PT.center = []
ret, frame = PT.caps.read()
frame = PT.BBoxes(frame)
if PT.recCount >= 2:
PT.Clustering(frame)
#plt.title('Estimated number of clusters: %d' % n_clusters_)
#plt.show()
cv2.imshow("Tracker", frame)
cv2.waitKey(1)
#cv2.destroyAllWindows()
PT.count = PT.count - 1
else:
cv2.imshow("Tracker", frame)
cv2.waitKey(1)
#cv2.destroyAllWindows()
PT.count = PT.count - 1
The code I currently have here displays the stream of an existing human recognition video to a window (as shown in the picture in the link), if possible I was wondering is there a way in which I can send that video feed to a website that im developing instead of using a window?
Thank You in advance :)

I have it semi-working, I ended up using flask but the problem is that im displaying the original video not the one produced by opencv i was wondering if anyone had any ideas on how i could implement the prior code into this? and use the "frame" variable for the video feed
from flask import Flask, render_template, Response
import cv2
import sys
import numpy
app = Flask(__name__)
#app.route('/')
def index():
return render_template('index.html')
def gen():
i=1
while i < 10:
yield (b'--frame\r\n'b'Content-Type: text/plain\r\n\r\n'+str(i)+b'\r\n')
i+=1
def get_frame():
ramp_frames=100
camera=cv2.VideoCapture('IMG_2649.MOV')
i=1
while True:
retval, im = camera.read()
imgencode=cv2.imencode('.jpg',im)[1]
stringData=imgencode.tostring()
yield (b'--frame\r\n'
b'Content-Type: text/plain\r\n\r\n'+stringData+b'\r\n')
i+=1
del(camera)
#app.route('/calc')
def calc():
return Response(get_frame(),mimetype='multipart/x-mixed-replace; boundary=frame')
if __name__ == '__main__':
app.run(host='localhost', debug=True, threaded=True)
HTML Code
<html>
<head>
<title>Video Streaming Demonstration</title>
</head>
<body>
<h1>Video Streaming Demonstration</h1>
<img src="{{ url_for('calc') }}">
<!-- <h1>{{ url_for('calc') }}</h1> -->
</body>
</html>

Related

CNN computer vision with Keras and open CV

I'm trying to live to detect a label in a bottle, I already have my model and weights that are working fine with images but the problem comes when I try to live to detect the label using opencv cv2.read() it won't detect correctly.
So what I have noticed is when I use load_img from Keras it work well but when I use cv2.read() does not work well. So is there a way to process live images with Keras instead of using cv2.read()?
This code below does not work well:
while True:
success, img = cap.read()
rect = cv2.rectangle(img, start_point, end_point, color, thickness)
cropImg = img[yMin:yMax,xMin:xMax] # this is all there is to cropping
cv2.imshow("Original", rect)
cv2.imshow("Cropped", cropImg)
x = cv2.resize(cropImg, (altura, longitud))
x = img_to_array(x)
x = np.expand_dims(x, axis = 0)
val = cnn.predict(x)
#resultado = arreglo[0]
#respuesta = np.argmax(resultado)
if val == 0:
color = (46, 242, 79)
else:
color = (255, 0, 0)
print(val)
And this following code works well but is not working live:
while True:
success, img = cap.read()
rect = cv2.rectangle(img, start_point, end_point, color, thickness)
cropImg = img[yMin:yMax,xMin:xMax] # this is all there is to cropping
cv2.imshow("Original", rect)
cv2.imshow("Cropped", cropImg)
if cv2.waitKey(1) & 0xFF == ord('t'):
photo = cap.read()
#cropImg2 = photo[yMin:yMax,xMin:xMax]
cv2.imwrite("pic.png", cropImg)
x = load_img("pic.png", target_size = (longitud, altura))
#x = cv2.resize(cropImg, (altura, longitud))
x = img_to_array(x)
x = np.expand_dims(x, axis = 0)
val = cnn.predict(x)
#resultado = arreglo[0]
#respuesta = np.argmax(resultado)
if val == 0:
color = (46, 242, 79)
else:
color = (255, 0, 0)
print(val)

OpenCV Python: Using multithreading with VideoCapture and VideoWriter

I am labeling some recordings with quite a few objects, I notice that when I do this in the naieve OpenCV manner, the video export takes ~real-time (hour long video = hour long export). I'd like to accelerate this process, and I was looking at some forums and some code examples about how to use a separate thread to read the frames and then use the main thread to do the frame processing. When I try this, however, the exported video is way too fast, and my added labels/annotations do not sync with the actual recording. Furthermore, even though I set the export frame rate the same as the input video (30fps), it seems that only my added annotations follow it. Here's my code, the multiprocessing class was adapted from (https://github.com/gilbertfrancois/video-capture-async), I took their threadlock idea, but it did not do anything for me.
class Threaded_VidCapture(object):
def __init__(self, videoSrc):
#Constructor parameters
self.VidIn = cv2.VideoCapture(videoSrc)
self.Terminate = False
self.Ret, self.Frame = self.VidIn.read()
#Video Parameters
self.FPS = int(self.VidIn.get(cv2.CAP_PROP_FPS))
self.Resolution = (
int(self.VidIn.get(cv2.CAP_PROP_FRAME_WIDTH)),
int(self.VidIn.get(cv2.CAP_PROP_FRAME_HEIGHT))
)
self.FrameCount = int(self.VidIn.get(cv2.CAP_PROP_FRAME_COUNT))
self.ThreadLock = threading.Lock()
def startProcess(self):
self.ThreadStart = Thread(target=self.retrieveFrames, args=())
self.ThreadStart.daemon = True
self.ThreadStart.start()
return(self)
def retrieveFrames(self):
while self.Terminate is False:
Ret, Frame = self.VidIn.read()
with self.ThreadLock:
self.Ret = Ret
self.Frame = Frame
def read(self):
with self.ThreadLock:
return(self.Frame.copy(), self.Ret)
def TerminateProgram(self):
self.Terminate = True
self.ThreadStart.join()
self.VidIn.release()
def annotateVideo_wrapper(RotationalCounts, Coords_FromLabel, Coords_ToLabel,
videoFile, videoExport, **kwargs):
GetFrames = Threaded_VidCapture(videoSrc=videoFile).startProcess()
Write = cv2.VideoWriter_fourcc(*"XVID")
Export = cv2.VideoWriter(videoExport, Write, GetFrames.FPS, GetFrames.Resolution)
def labelAnnotation(Frame, Text, Position, Color_3ChannelRGB, **kwargs):
ScaleKwargs = dict([(key, vals) for key, vals in kwargs.items()])
#Base parameters from CV2 website
Font = cv2.FONT_HERSHEY_PLAIN
if len(ScaleKwargs) == 0:
Scale = 2
else:
Scale = ScaleKwargs["Scale"]
Color = (0,0,0)#Black
Thickness = cv2.FILLED
Margin = 5
TextSize = cv2.getTextSize(text=str(Text), fontFace=Font, fontScale=Scale, thickness=Thickness)
EndX = Position[0] + TextSize[0][0] + Margin
EndY = Position[1] - TextSize[0][1] - Margin
cv2.rectangle(Frame, Position, (EndX, EndY), Color_3ChannelRGB, Thickness)
cv2.putText(Frame, str(Text), Position, Font, Scale, Color, 1, cv2.LINE_AA)
if kwargs:
HashMap = {keys: Val for keys, Val in kwargs.items()}
try:
Graphs = Graph.Live_movementTrace(
DataFrame=HashMap["DataFrame"],
Label=HashMap["Label"],
RotationMetaData=HashMap["RotationMetaData"],
IndexLimit=HashMap["IndexLimit"]
)
except KeyError:
raise(KeyError("Missing arguments, re-run the program with the correct input or leave this function blank"))
fig = mp.figure()
fig.suptitle("Live Movement Plot", fontweight="bold", fontsize=14)
Hash = {"CW_x":[], "CW_y":[], "CCW_x":[], "CCW_y":[]}
line1, = mp.plot(Hash["CW_x"], Hash["CW_y"], color = "blue")
line2, = mp.plot(Hash["CCW_x"], Hash["CCW_y"], color = "red")
mp.xlim(0, GetFrames.Resolution[0])
mp.ylim(0, GetFrames.Resolution[1])
ax = mp.gca()
ax.invert_yaxis()
else:
Graphs = []
Ind=0
ProgressBar = tqdm(total = len(Coords_FromLabel))
while(Ind < GetFrames.FrameCount):
Frame = GetFrames.read()[0]
####################
#Add Coordinate System
####################
cv2.line(Frame, (int(Coords_FromLabel[Ind][0]), int(Coords_FromLabel[Ind][1])), (int(Coords_FromLabel[Ind][0]), 0), (0, 0, 0), 5, 8, 0)#MinY
cv2.line(Frame, (int(Coords_FromLabel[Ind][0]), int(Coords_FromLabel[Ind][1])), (int(Coords_FromLabel[Ind][0]), GetFrames.Resolution[1]), (255, 0, 0), 5, 8, 0)#MaxY
cv2.line(Frame, (int(Coords_FromLabel[Ind][0]), int(Coords_FromLabel[Ind][1])), (0, int(Coords_FromLabel[Ind][1])), (0, 255, 0), 5, 8, 0)#MinX
cv2.line(Frame, (int(Coords_FromLabel[Ind][0]), int(Coords_FromLabel[Ind][1])), (GetFrames.Resolution[0], int(Coords_FromLabel[Ind][1])), (255, 255, 255), 5, 8, 0)#MaxX
####################
#Add Coutners
####################
labelAnnotation(Frame, f"CW: {round(RotationalCounts[0][Ind], 3)}", (20, 100), (255, 255, 255))
labelAnnotation(Frame, f"CCW: {round(RotationalCounts[1][Ind], 3)}", (20, 130), (0, 0, 255))
if Ind < 10000:
labelAnnotation(Frame, f"Frame Num: {Ind}", (20, 160), (100, 100, 100))
elif Ind >= 10000:
labelAnnotation(Frame, f"Frame Num: {Ind}", (20, 160), (100, 100, 100), Scale = 1.5)
####################
#Add Vectors
####################
cv2.line(Frame, (int(Coords_FromLabel[Ind][0]), int(Coords_FromLabel[Ind][1])), (int(Coords_ToLabel[Ind][0]), int(Coords_ToLabel[Ind][1])), (0, 0, 255), 5, 8, 0)
if len(Graphs) != 0:
if ((Ind in Graphs[3]) and (Ind != 0)):
for keys in Hash:
Hash[keys].clear()
Hash["CW_x"].append(Graphs[0][f'{HashMap["Label"]}_x'][Ind])
Hash["CW_y"].append(Graphs[0][f'{HashMap["Label"]}_y'][Ind])
line1.set_xdata(Hash["CW_x"])
line1.set_ydata(Hash["CW_y"])
Hash["CCW_x"].append(Graphs[1][Ind])
Hash["CCW_y"].append(Graphs[2][Ind])
line2.set_xdata(Hash["CCW_x"])
line2.set_ydata(Hash["CCW_y"])
fig.canvas.draw()
img = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
img = img.reshape(fig.canvas.get_width_height()[::-1] + (3,))
img = cv2.cvtColor(img,cv2.COLOR_RGB2BGR)
#Needs to be a variable offset
x_offset = GetFrames.Resolution[0] - 450
y_offset = GetFrames.Resolution[1] - 340
# print(Frame.shape)
x_end = x_offset + img.shape[1]
y_end = y_offset + img.shape[0]
Frame[y_offset:y_end, x_offset:x_end] = img
Export.write(Frame)
#if HashMap["ShowFrame"] is True:
#cv2.imshow("Frame", Frame)
cv2.waitKey(1) & 0xFF
# if ((cv2.waitKey(1) & 0xFF == ord("q")) or (GetFrames.Terminate is True)):
# GetFrames.TerminateProgram()
# break
ProgressBar.update(1)
Ind += 1
Export.release()
cv2.destroyAllWindows()
GetFrames.TerminateProgram()
There's a lot that I'm adding to each frame, and again this works fine in just the naive approach, I get about 23 it/s as measured with tqdm, which is close to FPS mark. I'd like to be above 30 it/s, but I don't know if that's possible.

How To Draw a Triangle-Arrow With The Positions of Detected Objects

I am making a object detection project.
I have my code. And I have written it by following a tutorial. In the tutorial, the guy drew a rectangle in opencv for every single object which is detected.
But I want to change the rectangle to triangle or Arrow.
let me explain with code===>
In my function, I detect objects.
And here I draw rectangle for detected objects==>
cv2.rectangle(img, (x, y), (x+w,y+h), (255, 0 , 255), 2)
But I want to change this rectangle to a triangle.(And I want to set position of triangle to above of object.
Just like in these images:::
This is the object detection with triangle
[![enter image description here][1]][1]
This is the thing that what I want to make instead of rectangle:::
[![enter image description here][2]][2]
How Can I make a triangle/arrow with positions of my detected objects?
All of my code is here==>
from os.path import sep
import cv2 as cv2
import numpy as np
import json
# Camera feed
cap_cam = cv2.VideoCapture(0)
ret, frame_cam = cap_cam.read()
hey = 0
print(cv2. __version__)
whT = 320
confThreshold =0.5
nmsThreshold= 0.2
classesFile = "coco.names"
classNames = []
with open(classesFile, 'rt') as f:
classNames = f.read().rstrip('\n').split('\n')
print(classNames)
## Model Files
modelConfiguration = "custom-yolov4-tiny-detector.cfg"
modelWeights = "custom-yolov4-tiny-detector_last.weights"
net = cv2.dnn.readNetFromDarknet(modelConfiguration, modelWeights)
net.setPreferableBackend(cv2.dnn.DNN_BACKEND_OPENCV)
net.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU)
def findObjects(outputs,img):
global hey
global previousHey
hT, wT, cT = img.shape
bbox = []
classIds = []
confs = []
for output in outputs:
for det in output:
scores = det[5:]
classId = np.argmax(scores)
confidence = scores[classId]
if confidence > confThreshold:
w,h = int(det[2]*wT) , int(det[3]*hT)
x,y = int((det[0]*wT)-w/2) , int((det[1]*hT)-h/2)
bbox.append([x,y,w,h])
classIds.append(classId)
confs.append(float(confidence))
global indicates
indices = cv2.dnn.NMSBoxes(bbox, confs, confThreshold, nmsThreshold)
hey = 0
for i in indices:
i = i[0]
box = bbox[i]
x, y, w, h = box[0], box[1], box[2], box[3]
# print(x,y,w,h)
cv2.rectangle(img, (x, y), (x+w,y+h), (255, 0 , 255), 2)
#cv2.line(img, (350,400), (x, y), (255,0,0), 4)
#cv2.line(img, (400,400), (x + 50 , y), (255,0,0), 4)
#cv.putText(img,f'{classNames[classIds[i]].upper()} {int(confs[i]*100)}%',
#(x, y-10), cv.FONT_HERSHEY_SIMPLEX, 0.6, (255, 0, 255), 2)
print('success')
hey = 1
video_frame_counter = 0
while cap_cam.isOpened():
img = cv2.imread('photos' + sep + 'lutfen.jpg')
#BURADA OK VİDEOSU OYNATILACAK
#if not decetiona diye dene yarın.
blob = cv2.dnn.blobFromImage(img, 1 / 255, (whT, whT), [0, 0, 0], 1, crop=False)
net.setInput(blob)
layersNames = net.getLayerNames()
outputNames = [(layersNames[i[0] - 1]) for i in net.getUnconnectedOutLayers()]
outputs = net.forward(outputNames)
findObjects(outputs,img)
cv2.imshow('Image', img)
# Video feed
if hey == 1:
filename = 'photos' + sep + 'Baslksz-3.mp4'
cap_vid = cv2.VideoCapture(filename)
if hey == 0:
filename = 'photos' + sep + 'vid2.mp4'
cap_vid = cv2.VideoCapture(filename)
print(hey)
ret, frame_vid = cap_vid.read()
#cap_cam.set(cv2.CAP_PROP_FRAME_WIDTH, 1920)
#cap_cam.set(cv2.CAP_PROP_FRAME_HEIGHT, 1080)
# Resize the camera frame to the size of the video
height = int(cap_vid.get(cv2.CAP_PROP_FRAME_HEIGHT))
width = int(cap_vid.get(cv2.CAP_PROP_FRAME_WIDTH))
# Capture the next frame from camera
ret, frame_cam = cap_cam.read()
video_frame_counter += 1
if video_frame_counter == cap_vid.get(cv2.CAP_PROP_FRAME_COUNT):
video_frame_counter = 0
cap_vid.set(cv2.CAP_PROP_POS_FRAMES, 0)
frame_cam = cv2.resize(frame_cam, (width, height), interpolation = cv2.INTER_AREA)
#ret = cap_vid.set(cv2.CAP_PROP_POS_MSEC, time_passed)
ret, frame_vid = cap_vid.read()
if not ret:
print('Cannot read from video stream')
break
# Blend the two images and show the result
tr = 0.4 # transparency between 0-1, show camera if 0
frame = ((1-tr) * frame_cam.astype(np.float) + tr * frame_vid.astype(np.float)).astype(np.uint8)
cv2.imshow('Transparent result', frame)
if cv2.waitKey(1) == 27: # ESC is pressed
break
cap_cam.release()
cap_vid.release()
cv2.destroyAllWindows()
The easy way
You can use the cv.arrowedLine() function that will draw something similar to what you want. For example, to draw a red arrow above your rectangle:
center_x = x + w//2
cv2.arrowedLine(img, (center_x, y-50), (center_x, y-5), (0,0,255), 2, 8, 0, 0.5)
which should give a result similar to the image below. Take a look at the OpenCV documentation for the description of the parameters of the function. You can change its size, thickness, color, etc.
Custom arrow shape
If you want more control over the shape of your arrow, you can define a contour (vertex by vertex) and use cv.drawContours() to render it. For example:
# define the arrow shape
shape = np.array([[[0,0],[-25,-25],[-10,-25],[-10,-50],
[10,-50],[10,-25],[25,-25]]])
# move it to the desired position
cx = x + w // 2
cy = y - 5
shape[:,:,0] += cx
shape[:,:,1] += cy
# draw it
cv2.drawContours(img, shape, -1, (0, 255, 0), -1)
This snippet will give you the image below. You can adjust the shape by altering the vertices in the shape array, or look at the documentation to change the way OpenCV draws it.

Centroid Tracking with by using background subtracting in python

So I have been following this tutorial for centroid tracking
https://www.pyimagesearch.com/2018/07/23/simple-object-tracking-with-opencv/
and have built the centroid tracking class like it is mentions in the tutorial.
Now when I try to use background subtraction for the detection instead of the CNN that he is using, it does not work and gives me this issue from the CentroidTracker.py
for i in range(0, inputCentroids):
TypeError: only integer scalar arrays can be converted to a scalar index
Here is my code that I am using
for i in range(0, num_frames):
rects = []
#Get the very first image from the video
if (first_iteration == 1):
ret, frame = cap.read()
frame = cv2.resize(frame, (imageHight,imageWidth))
first_frame = copy.deepcopy(frame)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
height, width = frame.shape[:2]
print("shape:", height,width)
first_iteration = 0
else:
ret, frame = cap.read()
frame = cv2.resize(frame, (imageHight,imageWidth))
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
forgroundMask = backgroundSub.apply(frame)
#Get contor for each person
_, contours, _ = cv2.findContours(forgroundMask.copy(),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
contours = filter(lambda cont: cv2.contourArea(cont) > 20, contours)
#Get bbox from the controus
for c in contours:
(x, y, w, h) = cv2.boundingRect(c)
rectangle = [x, y, (x + w), (y + h)]
rects.append(rectangle)
cv2.rectangle(frame, (rectangle[0], rectangle[1]), (rectangle[2], rectangle[3]),
(0, 255, 0), 2)
objects = ct.update(rects)
for (objectID, centroid) in objects.items():
text = "ID:{}".format(objectID)
cv2.putText(frame, text, (centroid[0] - 10, centroid[1] - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
cv2.circle(frame, (centroid[0], centroid[1]), 4, (0, 255, 0), -1)
'''Display Windows'''
cv2.imshow('FGMask', forgroundMask)
frame1 = frame.copy()
cv2.imshow('MOG', frame1)
cv2.imshow('frame', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
the code is breaking at the
objects = ct.update(rects)
line.
Here is the implementation of the CentroidTracker from the Tutorial:
from scipy.spatial import distance as dist
from collections import OrderedDict
import numpy as np
#Makes a the next unique object ID with
#2 ordered dictionaries
class CentroidTracker():
def __init__(self, maxDisappeared = 50):
self.nextObjectID = 0
self.objects = OrderedDict()
self.disappeared = OrderedDict()
self.maxDisappeared = maxDisappeared
def register(self, centroid):
self.objects[self.nextObjectID] = centroid
self.disappeared[self.nextObjectID] = 0
self.nextObjectID += 1
def deregister(self, objectID):
del self.objects[objectID]
del self.disappeared[objectID]
def update(self, rects):
if len(rects) == 0:
for objectID in self.disappeared.keys():
self.disappeared[objectID] += 1
if self.disappeared[objectID] > self.maxDisappeared:
self.deregister(objectID)
return self.objects
inputCentroids = np.zeros((len(rects), 2), dtype="int")
for (i, (startX, startY, endX, endY)) in enumerate(rects):
cX = int((startX + endX) / 2.0)
cY = int((startY + endY) / 2.0)
inputCentroids[i] = (cX, cY)
if len(self.objects) == 0:
for i in range(0, inputCentroids):
self.register(inputCentroids[i])
else:
objectIDs = list(self.objects.keys())
objectCentroids = list(self.objects.values())
D = dist.cdist(np.array(objectCentroids), inputCentroids)
rows = D.min(axis=1).argsort()
cols = D.argmin(axis=1)[rows]
usedRows = set()
usedCols = set()
for (row, col) in zip(rows, cols):
if row in usedRows or col in usedCols:
continue
objectID = objectIDs[row]
self.objects[objectID] = inputCentroids[col]
self.disappeared[objectID] = 0
usedRows.add(row)
usedCols.add(col)
# compute both the row and column index we have NOT yet
# examined
unusedRows = set(range(0, D.shape[0])).difference(usedRows)
unusedCols = set(range(0, D.shape[1])).difference(usedCols)
if D.shape[0] >= D.shape[1]:
# loop over the unused row indexes
for row in unusedRows:
# grab the object ID for the corresponding row
# index and increment the disappeared counter
objectID = objectIDs[row]
self.disappeared[objectID] += 1
# check to see if the number of consecutive
# frames the object has been marked "disappeared"
# for warrants deregistering the object
if self.disappeared[objectID] > self.maxDisappeared:
self.deregister(objectID)
else:
for col in unusedCols:
self.register(inputCentroids[col])
# return the set of trackable objects
return self.objects
I am kind of lost on what I am doing wrong here. All I should do is pass in a bounding box (x,y,x+w, y+h) into the rects[] list correct and that should give similar results for this, or am I wrong and do not understand how this works? Any help will be appreciated
You have forgotten the len function: for i in range(0, len(inputCentroids)):
By doing what Axel Puig said and then adding this line to the Main mehtod
objects = ct.update(rects)
if objects is not None:
for (objectID, centroid) in objects.items():
text = "ID:{}".format(objectID)
cv2.putText(frame, text, (centroid[0] - 10, centroid[1] - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
cv2.circle(frame, (centroid[0], centroid[1]), 4, (0, 255, 0), -1)
That fixed the issue. What I think was happening is the first frame didnt initialize the tracker so I needed to make sure it was not None then it worked after that

Extract optical flow as data (numbers) from live feed (webcam) using Python openCV

first of all, I am new to programming though I would like to learn especially python. my background in animation and CGI.
I have python 2.7 and openCV x64 installed on windows. I tested optical flow example they have (opt_flow.py) (the green arrows) I like that, but I am trying to understand how I can get the data out as values. I am not interested in seeing the camera output or the green arrows I just want the data out to use it later.is there a way to do that?
for example: the value of x, y and the length of the green arrows.
Thank you all
You can get the optical flow vectors (green arrows) in the draw_flow function of opt_flow.py. Here is how I would do it :
#!/usr/bin/env python
'''
example to show optical flow
USAGE: opt_flow.py [<video_source>]
Keys:
1 - toggle HSV flow visualization
2 - toggle glitch
Keys:
ESC - exit
'''
# Python 2/3 compatibility
from __future__ import print_function
import numpy as np
import math
import cv2
import video
def draw_flow(img, flow, step=16):
global arrows
h, w = img.shape[:2]
y, x = np.mgrid[step/2:h:step, step/2:w:step].reshape(2,-1).astype(int)
fx, fy = flow[y,x].T
lines = np.vstack([x, y, x+fx, y+fy]).T.reshape(-1, 2, 2)
lines = np.int32(lines + 0.5)
vis = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
cv2.polylines(vis, lines, 0, (0, 255, 0))
for (x1, y1), (x2, y2) in lines:
arrows.append([x1,y1, math.sqrt((x2-x1)*(x2-x1) + (y2-y1)*(y2-y1))])
cv2.circle(vis, (x1, y1), 1, (0, 255, 0), -1)
return vis
def draw_hsv(flow):
h, w = flow.shape[:2]
fx, fy = flow[:,:,0], flow[:,:,1]
ang = np.arctan2(fy, fx) + np.pi
v = np.sqrt(fx*fx+fy*fy)
hsv = np.zeros((h, w, 3), np.uint8)
hsv[...,0] = ang*(180/np.pi/2)
hsv[...,1] = 255
hsv[...,2] = np.minimum(v*4, 255)
bgr = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
return bgr
def warp_flow(img, flow):
h, w = flow.shape[:2]
flow = -flow
flow[:,:,0] += np.arange(w)
flow[:,:,1] += np.arange(h)[:,np.newaxis]
res = cv2.remap(img, flow, None, cv2.INTER_LINEAR)
return res
if __name__ == '__main__':
import sys
print(__doc__)
try:
fn = sys.argv[1]
except IndexError:
fn = 0
arrows = []
cam = video.create_capture(fn)
ret, prev = cam.read()
prevgray = cv2.cvtColor(prev, cv2.COLOR_BGR2GRAY)
show_hsv = False
show_glitch = False
cur_glitch = prev.copy()
while True:
ret, img = cam.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
flow = cv2.calcOpticalFlowFarneback(prevgray, gray, None, 0.5, 3, 15, 3, 5, 1.2, 0)
prevgray = gray
arrows.clear()
finalImg = draw_flow(gray,flow)
print(arrows)
cv2.imshow('flow', finalImg)
if show_hsv:
cv2.imshow('flow HSV', draw_hsv(flow))
if show_glitch:
cur_glitch = warp_flow(cur_glitch, flow)
cv2.imshow('glitch', cur_glitch)
ch = cv2.waitKey(5)
if ch == 27:
break
if ch == ord('1'):
show_hsv = not show_hsv
print('HSV flow visualization is', ['off', 'on'][show_hsv])
if ch == ord('2'):
show_glitch = not show_glitch
if show_glitch:
cur_glitch = img.copy()
print('glitch is', ['off', 'on'][show_glitch])
cv2.destroyAllWindows()
In the code above, I'm saving the optical flow vectors (start point coordinates and vector length) in the global variable arrows like so :
arrows.append([x1,y1, math.sqrt((x2-x1)*(x2-x1) + (y2-y1)*(y2-y1))])
with (x1, y1) the arrow's start point and (x2, y2) the arrow's end point.
Hope it helps.

Categories

Resources