Centroid Tracking with by using background subtracting in python - python

So I have been following this tutorial for centroid tracking
https://www.pyimagesearch.com/2018/07/23/simple-object-tracking-with-opencv/
and have built the centroid tracking class like it is mentions in the tutorial.
Now when I try to use background subtraction for the detection instead of the CNN that he is using, it does not work and gives me this issue from the CentroidTracker.py
for i in range(0, inputCentroids):
TypeError: only integer scalar arrays can be converted to a scalar index
Here is my code that I am using
for i in range(0, num_frames):
rects = []
#Get the very first image from the video
if (first_iteration == 1):
ret, frame = cap.read()
frame = cv2.resize(frame, (imageHight,imageWidth))
first_frame = copy.deepcopy(frame)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
height, width = frame.shape[:2]
print("shape:", height,width)
first_iteration = 0
else:
ret, frame = cap.read()
frame = cv2.resize(frame, (imageHight,imageWidth))
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
forgroundMask = backgroundSub.apply(frame)
#Get contor for each person
_, contours, _ = cv2.findContours(forgroundMask.copy(),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
contours = filter(lambda cont: cv2.contourArea(cont) > 20, contours)
#Get bbox from the controus
for c in contours:
(x, y, w, h) = cv2.boundingRect(c)
rectangle = [x, y, (x + w), (y + h)]
rects.append(rectangle)
cv2.rectangle(frame, (rectangle[0], rectangle[1]), (rectangle[2], rectangle[3]),
(0, 255, 0), 2)
objects = ct.update(rects)
for (objectID, centroid) in objects.items():
text = "ID:{}".format(objectID)
cv2.putText(frame, text, (centroid[0] - 10, centroid[1] - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
cv2.circle(frame, (centroid[0], centroid[1]), 4, (0, 255, 0), -1)
'''Display Windows'''
cv2.imshow('FGMask', forgroundMask)
frame1 = frame.copy()
cv2.imshow('MOG', frame1)
cv2.imshow('frame', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
the code is breaking at the
objects = ct.update(rects)
line.
Here is the implementation of the CentroidTracker from the Tutorial:
from scipy.spatial import distance as dist
from collections import OrderedDict
import numpy as np
#Makes a the next unique object ID with
#2 ordered dictionaries
class CentroidTracker():
def __init__(self, maxDisappeared = 50):
self.nextObjectID = 0
self.objects = OrderedDict()
self.disappeared = OrderedDict()
self.maxDisappeared = maxDisappeared
def register(self, centroid):
self.objects[self.nextObjectID] = centroid
self.disappeared[self.nextObjectID] = 0
self.nextObjectID += 1
def deregister(self, objectID):
del self.objects[objectID]
del self.disappeared[objectID]
def update(self, rects):
if len(rects) == 0:
for objectID in self.disappeared.keys():
self.disappeared[objectID] += 1
if self.disappeared[objectID] > self.maxDisappeared:
self.deregister(objectID)
return self.objects
inputCentroids = np.zeros((len(rects), 2), dtype="int")
for (i, (startX, startY, endX, endY)) in enumerate(rects):
cX = int((startX + endX) / 2.0)
cY = int((startY + endY) / 2.0)
inputCentroids[i] = (cX, cY)
if len(self.objects) == 0:
for i in range(0, inputCentroids):
self.register(inputCentroids[i])
else:
objectIDs = list(self.objects.keys())
objectCentroids = list(self.objects.values())
D = dist.cdist(np.array(objectCentroids), inputCentroids)
rows = D.min(axis=1).argsort()
cols = D.argmin(axis=1)[rows]
usedRows = set()
usedCols = set()
for (row, col) in zip(rows, cols):
if row in usedRows or col in usedCols:
continue
objectID = objectIDs[row]
self.objects[objectID] = inputCentroids[col]
self.disappeared[objectID] = 0
usedRows.add(row)
usedCols.add(col)
# compute both the row and column index we have NOT yet
# examined
unusedRows = set(range(0, D.shape[0])).difference(usedRows)
unusedCols = set(range(0, D.shape[1])).difference(usedCols)
if D.shape[0] >= D.shape[1]:
# loop over the unused row indexes
for row in unusedRows:
# grab the object ID for the corresponding row
# index and increment the disappeared counter
objectID = objectIDs[row]
self.disappeared[objectID] += 1
# check to see if the number of consecutive
# frames the object has been marked "disappeared"
# for warrants deregistering the object
if self.disappeared[objectID] > self.maxDisappeared:
self.deregister(objectID)
else:
for col in unusedCols:
self.register(inputCentroids[col])
# return the set of trackable objects
return self.objects
I am kind of lost on what I am doing wrong here. All I should do is pass in a bounding box (x,y,x+w, y+h) into the rects[] list correct and that should give similar results for this, or am I wrong and do not understand how this works? Any help will be appreciated

You have forgotten the len function: for i in range(0, len(inputCentroids)):

By doing what Axel Puig said and then adding this line to the Main mehtod
objects = ct.update(rects)
if objects is not None:
for (objectID, centroid) in objects.items():
text = "ID:{}".format(objectID)
cv2.putText(frame, text, (centroid[0] - 10, centroid[1] - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
cv2.circle(frame, (centroid[0], centroid[1]), 4, (0, 255, 0), -1)
That fixed the issue. What I think was happening is the first frame didnt initialize the tracker so I needed to make sure it was not None then it worked after that

Related

ValueError(f"x and y must have same first dimension, but " ValueError: x and y must have same first dimension, but have shapes (6,) and (1)

I'm detecting contours in a video and calculating each angular displacement. Now I want to plot the angular velocities over time but I keep getting this error ValueError(f"x and y must have same first dimension, but " ValueError: x and y must have same first dimension, but have shapes (6,) and (1). Below is the code
import cv2
import numpy as np
import skimage.morphology
import matplotlib.pyplot as plt
# Load video
cap = cv2.VideoCapture("test2.mp4")
# get frame rate and frame count
fps = int(cap.get(cv2.CAP_PROP_FPS))
totalFrames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
# get total duration of the video
total_duration = totalFrames / fps
... calculate prev_angle
while True:
ret, nFrame = cap.read()
if not ret:
break
roi = nFrame[304:547, 685:1044]
... pre-process roi
# find contours
newContours, _ = cv2.findContours(roi_thresh, cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
angular_velocities = []
displacements = []
time = []
# Calculate angle of the contours
for i in range(len(newContours)):
for j in range(i, len(newContours)):
new_c1 = newContours[i]
new_c2 = newContours[j]
x1, y1, w1, h1 = cv2.boundingRect(new_c1)
x2, y2, w2, h2 = cv2.boundingRect(new_c2)
# find centroid of each contours
newM1 = cv2.moments(new_c1)
if newM1['m00'] != 0:
new_cx1 = int(newM1['m10'] / newM1['m00'])
new_cy1 = int(newM1['m01'] / newM1['m00'])
else:
new_cx1, new_cy1 = 0, 0
# view the centre of contour
cv2.circle(roi, (new_cx1, new_cy1), 3, (0, 0, 255), -1)
newM2 = cv2.moments(new_c2)
if newM2['m00'] != 0:
new_cx2 = int(newM2['m10'] / newM2['m00'])
new_cy2 = int(newM2['m01'] / newM2['m00'])
else:
new_cx2, new_cy2 = 0, 0
# view the center of contour
cv2.circle(roi, (new_cx2, new_cy2), 4, (0, 255, 0), 1)
newAngle = np.arctan2(new_cy2 - new_cy1, new_cx2 - new_cx1)
newAngle = (np.rad2deg(newAngle))
# newAngle = (newAngle + 180) % 360 - 180
# print("Angle between contour {} and {} is {:.2f} degrees".format(i+1, j, newAngle)) #working
cv2.drawContours(roi, [new_c1, new_c2], 0, (0, 0, 255), 1)
text = "C" + str(i+1)
cv2.putText(roi, text, (new_cx1, new_cy1), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1)
# calulate angular displacement
delta_angle = newAngle - prev_angle
print("Angular displacement of contour {} is {:.2f} degrees".format(i+1, delta_angle)) # working
prev_angle = newAngle
displacements.append(delta_angle)
time = [i/fps for i in range(len(displacements))]
current_Frame = int(cap.get(cv2.CAP_PROP_POS_FRAMES))
delta_t = (current_Frame - prev_Frame) / fps
angularVelocity = delta_angle / delta_t if delta_t != 0 else 0
# skeltonize
threshSkeleton = (roi_thresh / 255).astype(np.float64)
skeleton = skimage.morphology.skeletonize(threshSkeleton)
skeleton = (255 * skeleton).clip(0, 255).astype(np.uint8)
# Thickness
cntr = newContours[i]
length = cv2.arcLength(cntr, True)
# print("length of contour {}: {:.2f}".format(i+1, length))
area = cv2.contourArea(newContours[i])
if length == 0:
continue
thickness = (area / length) * 0.264
# print("Thickness of contour {} is {:.2f} mm".format(i+1, thickness)) #working
angular_velocities.append(angularVelocity)
prev_Frame = current_Frame
current_Frame = int(cap.get(cv2.CAP_PROP_POS_FRAMES))
# time.append(current_Frame / fps)
# time = current_Frame/fps
# cv2.imshow("ROI", roi)
cv2.imshow("frame", nFrame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
# plt.plot(angular_velocities)
for i in range(len(displacements)):
plt.plot(time, displacements[i], label='Contour{}'.format(i + 1))
plt.xlabel("Time (s)")
plt.ylabel('Angular displacement (degrees per second)')
plt.title('Angular displacement over time')
plt.show()
any help is highly appreciated. Also if there are any other errors, please do let me know. Thanks

How do I clock the user action?

I follow a simple head pose estimation tutorial in python, I try to make some modification in the code but I got stuck for days now, I just want to know how long the user been looking to the left or right, if the user is detected to be looking to the left or right for a long time let say 2-3 mins, then the program should give a warning or print a simple message saying how long his/her been looking to the left or right. How do I achieve this? any ideas how to do this?
sorry for my bad english
any help will be appreciated :)
here's my code:
import cv2
import mediapipe as mp
import numpy as np
import time
mp_face_mesh = mp.solutions.face_mesh
face_mesh = mp_face_mesh.FaceMesh(min_detection_confidence=0.5, min_tracking_confidence=0.5)
mp_drawing = mp.solutions.drawing_utils
drawing_spec = mp_drawing.DrawingSpec(thickness=1, circle_radius=1)
cap = cv2.VideoCapture(0)
while cap.isOpened():
success, image = cap.read()
image = cv2.resize(image, (780, 350))
image = cv2.flip(image, 1)
#start = time.time()
# Flip the image horizontally for a later selfie-view display
# Also convert the color space from BGR to RGB
image = cv2.cvtColor(cv2.flip(image, 1), cv2.COLOR_BGR2RGB)
# To improve performance
image.flags.writeable = False
# Get the result
results = face_mesh.process(image)
# To improve performance
image.flags.writeable = True
# Convert the color space from RGB to BGR
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
img_h, img_w, img_c = image.shape
face_3d = []
face_2d = []
if results.multi_face_landmarks:
for face_landmarks in results.multi_face_landmarks:
for idx, lm in enumerate(face_landmarks.landmark):
if idx == 33 or idx == 263 or idx == 1 or idx == 61 or idx == 291 or idx == 199:
if idx == 1:
nose_2d = (lm.x * img_w, lm.y * img_h)
nose_3d = (lm.x * img_w, lm.y * img_h, lm.z * 3000)
x, y = int(lm.x * img_w), int(lm.y * img_h)
# Get the 2D Coordinates
face_2d.append([x, y])
# Get the 3D Coordinates
face_3d.append([x, y, lm.z])
# Convert it to the NumPy array
face_2d = np.array(face_2d, dtype=np.float64)
# Convert it to the NumPy array
face_3d = np.array(face_3d, dtype=np.float64)
# The camera matrix
focal_length = 1 * img_w
cam_matrix = np.array([ [focal_length, 0, img_h / 2],
[0, focal_length, img_w / 2],
[0, 0, 1]])
# The distortion parameters
dist_matrix = np.zeros((4, 1), dtype=np.float64)
# Solve PnP
success, rot_vec, trans_vec = cv2.solvePnP(face_3d, face_2d, cam_matrix, dist_matrix)
# Get rotational matrix
rmat, jac = cv2.Rodrigues(rot_vec)
# Get angles
angles, mtxR, mtxQ, Qx, Qy, Qz = cv2.RQDecomp3x3(rmat)
# Get the y rotation degree
x = angles[0] * 360
y = angles[1] * 360
z = angles[2] * 360
# See where the user's head tilting
if y < -10:
# i want to know how long this guy been looking to his left or right or up or down
text = "Looking Left"
elif y > 10:
text = "Looking Right"
elif x < -10:
text = "Looking Down"
elif x > 10:
text = "Looking Up"
else:
text = "Forward"
# Display the nose direction
nose_3d_projection, jacobian = cv2.projectPoints(nose_3d, rot_vec, trans_vec, cam_matrix, dist_matrix)
p1 = (int(nose_2d[0]), int(nose_2d[1]))
p2 = (int(nose_2d[0] + y * 10) , int(nose_2d[1] - x * 10))
cv2.line(image, p1, p2, (255, 0, 0), 3)
# Add the text on the image
cv2.putText(image, text, (20, 50), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 255, 0), 2)
cv2.putText(image, "x: " + str(np.round(x,2)), (500, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
cv2.putText(image, "y: " + str(np.round(y,2)), (500, 100), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
cv2.putText(image, "z: " + str(np.round(z,2)), (500, 150), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
# need some fix here
# end = time.time()
# totalTime = end - start
# fps = 1 / totalTime
#print("FPS: ", fps)
#cv2.putText(image, f'FPS: {int(fps)}', (20,450), cv2.FONT_HERSHEY_SIMPLEX, 1.5, (0,255,0), 2)
mp_drawing.draw_landmarks(
image=image,
landmark_list=face_landmarks,
connections=mp_face_mesh.FACEMESH_CONTOURS,
landmark_drawing_spec=drawing_spec,
connection_drawing_spec=drawing_spec)
cv2.imshow('Head Pose Estimation', image)
if cv2.waitKey(5) & 0xFF == 27:
break
cap.release()

Only detect contours within a square

Hello I have a code which draws me the outlines of shapes/objects which have 3 or 4 vertices. So far everything works. But now I want that only shapes/objects are recognized, which are in a square. I wanted to solve it in such a way that I create a square or a picture, in which a square is represented, with cv2.imread read and then with cv2.matchShapes check, how much the recognized contour agrees with the contour of the square. And if this match is good enough, corresponding objects/shapes within this square should be recognized.
Following is the code I have written so far:
import math
import cv2
import numpy as np
from graphics import *
cam = cv2.VideoCapture(0)
cam.set(cv2.CAP_PROP_FRAME_WIDTH, 1920)
cam.set(cv2.CAP_PROP_FRAME_HEIGHT, 1080)
rect = cv2.imread("Rectangle.png")
gray = cv2.cvtColor(rect, cv2.COLOR_BGR2GRAY)
rectcontour, _ = cv2.findContours(gray, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
while True:
_ , img = cam.read()
img = cv2.resize(img, None, fx = 0.5, fy = 0.5, interpolation=cv2.INTER_AREA)
gray1 = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
#edges = cv2.Canny(gray1, 250, 300)
_ , binary1 = cv2.threshold(gray1, 130, 255, cv2.THRESH_BINARY)
inverted_binary1 = ~binary1
contours1, _ = cv2.findContours(inverted_binary1, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
a = 0
for a in contours1:
x = cv2.matchShapes(contours1[a], rectcontour[0], cv2.CONTOURS_MATCH_I1, 0.0)
print(x)
a = a + 1
for contour in contours1:
area = cv2.contourArea(contour)
if area > 100 and area < 20000:
approx = cv2.approxPolyDP(contour, 0.06 * cv2.arcLength(contour, True), True)
print(len(approx))
if 2 < len(approx) < 5:
moment = cv2.moments(contour)
cx = int(moment['m10'] / moment['m00'])
cy = int(moment['m01'] / moment['m00'])
#print(x, y)
print(cx, cy)
cv2.drawContours(img, [contour], -1, (0, 0, 255), 3)
n = approx.ravel()
print(n)
i = 0
for j in n:
if (i % 2 == 0):
x = n[i]
y = n[i + 1]
# String containing the co-ordinates.
string = str(x) + " " + str(y)
if (i == 0):
# text on topmost co-ordinate.
cv2.putText(img, string, (x, y),cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (255, 0, 0))
else:
# text on remaining co-ordinates.
cv2.putText(img, string, (x, y),cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 255, 0))
i = i + 1
cv2.imshow("schwarz, weiß", inverted_binary1)
cv2.imshow('my webcam', img)
if cv2.waitKey(1) & 0xFF == ord("q"):
break
At the moment I get following error:
Traceback (most recent call last):
File "C:\Users\Paul\PycharmProjects\Opencv\test3.py", line 27, in <module>
x = cv2.matchShapes(contours1[a], rectcontour[0], cv2.CONTOURS_MATCH_I1, 0.0)
TypeError: only integer scalar arrays can be converted to a scalar index
[ WARN:0] global D:\a\opencv-python\opencv-python\opencv\modules\videoio\src\cap_msmf.cpp (438) `anonymous-namespace'::SourceReaderCB::~SourceReaderCB terminating async callback
And I dont exactly know how I can fix that.
The square image I am using:
And another Picture of the shape I actually want to detect:

How to create a meter based on model's probability in Python OpenCV

I have this simple Python code that makes predictions on the emotions of the face (refer to here in case you need to run it), whether the person is happy, sad, etc. It uses cv2 and Keras. Now, I would like to visualize and place a meter on the frame based on the probability of each frame (prob value below which is a percentage). How can I do that?
Something like this. Don't worry about the colors for now.
cap = cv2.VideoCapture(1)
canvasImage = cv2.imread("fg2.png")
x0, x1 = 330, 1290
y0, y1 = 155, 700
prediction_history = []
LOOKBACK = 5 # how far you want to look back
counter = 0
while True:
# Find haar cascade to draw bounding box around face
ret, frame = cap.read()
frame=cv2.flip(frame,3)
if not ret:
break
facecasc = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = facecasc.detectMultiScale(gray,scaleFactor=1.3, minNeighbors=5)
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y-50), (x+w, y+h+10), (255, 0, 0), 2)
roi_gray = gray[y:y + h, x:x + w]
cropped_img = np.expand_dims(np.expand_dims(cv2.resize(roi_gray, (48, 48)), -1), 0)
prediction = model.predict(cropped_img)
maxindex = int(np.argmax(prediction))
text = emotion_dict[maxindex]
prob = round(prediction[0][3]*100, 2)
prediction_history.append(maxindex)
most_common_index = max(set(prediction_history[-LOOKBACK:][::-1]), key = prediction_history.count)
text = emotion_dict[most_common_index]
#if ("Sad" in text) or ("Angry" in text) or ("Disgusted" in text):
# text = "Sad"
if ("Happy" in text) or ("Sad" in text) :
cv2.putText(frame, text+": "+str(prob), (x+20, y-60), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2, cv2.LINE_AA)
dim = (800,480)
frame_shrunk = cv2.resize(frame, (x1 - x0, y1 - y0))
canvasImage[y0:y1, x0:x1] = frame_shrunk
#cv2.imshow('Video', cv2.resize(frame,dim,interpolation = cv2.INTER_CUBIC))
cv2.imshow('Demo', canvasImage)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
There is no built in function in OpenCV for drawing meters, here is a helper function that you can use to draw a meter over an image:
def draw_indicator(img, percentage):
def percentage_to_color(p):
return 0, 255 * p, 255 - (255 * p)
# config
levels = 10
indicator_width = 80
indicator_height = 220
level_width = indicator_width - 20
level_height = int((indicator_height - 20) / levels - 5)
# draw
img_levels = int(percentage * levels)
cv2.rectangle(img, (10, img.shape[0] - (indicator_height + 10)), (10 + indicator_width, img.shape[0] - 10), (0, 0, 0), cv2.FILLED)
for i in range(img_levels):
level_y_b = int(img.shape[0] - (20 + i * (level_height + 5)))
cv2.rectangle(img, (20, level_y_b - level_height), (20 + level_width, level_y_b), percentage_to_color(i / levels), cv2.FILLED)
# test code
img = cv2.imread('a.jpg')
draw_indicator(img, 0.7)
cv2.imshow("test", img)
cv2.waitKey(10000)

How To Draw a Triangle-Arrow With The Positions of Detected Objects

I am making a object detection project.
I have my code. And I have written it by following a tutorial. In the tutorial, the guy drew a rectangle in opencv for every single object which is detected.
But I want to change the rectangle to triangle or Arrow.
let me explain with code===>
In my function, I detect objects.
And here I draw rectangle for detected objects==>
cv2.rectangle(img, (x, y), (x+w,y+h), (255, 0 , 255), 2)
But I want to change this rectangle to a triangle.(And I want to set position of triangle to above of object.
Just like in these images:::
This is the object detection with triangle
[![enter image description here][1]][1]
This is the thing that what I want to make instead of rectangle:::
[![enter image description here][2]][2]
How Can I make a triangle/arrow with positions of my detected objects?
All of my code is here==>
from os.path import sep
import cv2 as cv2
import numpy as np
import json
# Camera feed
cap_cam = cv2.VideoCapture(0)
ret, frame_cam = cap_cam.read()
hey = 0
print(cv2. __version__)
whT = 320
confThreshold =0.5
nmsThreshold= 0.2
classesFile = "coco.names"
classNames = []
with open(classesFile, 'rt') as f:
classNames = f.read().rstrip('\n').split('\n')
print(classNames)
## Model Files
modelConfiguration = "custom-yolov4-tiny-detector.cfg"
modelWeights = "custom-yolov4-tiny-detector_last.weights"
net = cv2.dnn.readNetFromDarknet(modelConfiguration, modelWeights)
net.setPreferableBackend(cv2.dnn.DNN_BACKEND_OPENCV)
net.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU)
def findObjects(outputs,img):
global hey
global previousHey
hT, wT, cT = img.shape
bbox = []
classIds = []
confs = []
for output in outputs:
for det in output:
scores = det[5:]
classId = np.argmax(scores)
confidence = scores[classId]
if confidence > confThreshold:
w,h = int(det[2]*wT) , int(det[3]*hT)
x,y = int((det[0]*wT)-w/2) , int((det[1]*hT)-h/2)
bbox.append([x,y,w,h])
classIds.append(classId)
confs.append(float(confidence))
global indicates
indices = cv2.dnn.NMSBoxes(bbox, confs, confThreshold, nmsThreshold)
hey = 0
for i in indices:
i = i[0]
box = bbox[i]
x, y, w, h = box[0], box[1], box[2], box[3]
# print(x,y,w,h)
cv2.rectangle(img, (x, y), (x+w,y+h), (255, 0 , 255), 2)
#cv2.line(img, (350,400), (x, y), (255,0,0), 4)
#cv2.line(img, (400,400), (x + 50 , y), (255,0,0), 4)
#cv.putText(img,f'{classNames[classIds[i]].upper()} {int(confs[i]*100)}%',
#(x, y-10), cv.FONT_HERSHEY_SIMPLEX, 0.6, (255, 0, 255), 2)
print('success')
hey = 1
video_frame_counter = 0
while cap_cam.isOpened():
img = cv2.imread('photos' + sep + 'lutfen.jpg')
#BURADA OK VİDEOSU OYNATILACAK
#if not decetiona diye dene yarın.
blob = cv2.dnn.blobFromImage(img, 1 / 255, (whT, whT), [0, 0, 0], 1, crop=False)
net.setInput(blob)
layersNames = net.getLayerNames()
outputNames = [(layersNames[i[0] - 1]) for i in net.getUnconnectedOutLayers()]
outputs = net.forward(outputNames)
findObjects(outputs,img)
cv2.imshow('Image', img)
# Video feed
if hey == 1:
filename = 'photos' + sep + 'Baslksz-3.mp4'
cap_vid = cv2.VideoCapture(filename)
if hey == 0:
filename = 'photos' + sep + 'vid2.mp4'
cap_vid = cv2.VideoCapture(filename)
print(hey)
ret, frame_vid = cap_vid.read()
#cap_cam.set(cv2.CAP_PROP_FRAME_WIDTH, 1920)
#cap_cam.set(cv2.CAP_PROP_FRAME_HEIGHT, 1080)
# Resize the camera frame to the size of the video
height = int(cap_vid.get(cv2.CAP_PROP_FRAME_HEIGHT))
width = int(cap_vid.get(cv2.CAP_PROP_FRAME_WIDTH))
# Capture the next frame from camera
ret, frame_cam = cap_cam.read()
video_frame_counter += 1
if video_frame_counter == cap_vid.get(cv2.CAP_PROP_FRAME_COUNT):
video_frame_counter = 0
cap_vid.set(cv2.CAP_PROP_POS_FRAMES, 0)
frame_cam = cv2.resize(frame_cam, (width, height), interpolation = cv2.INTER_AREA)
#ret = cap_vid.set(cv2.CAP_PROP_POS_MSEC, time_passed)
ret, frame_vid = cap_vid.read()
if not ret:
print('Cannot read from video stream')
break
# Blend the two images and show the result
tr = 0.4 # transparency between 0-1, show camera if 0
frame = ((1-tr) * frame_cam.astype(np.float) + tr * frame_vid.astype(np.float)).astype(np.uint8)
cv2.imshow('Transparent result', frame)
if cv2.waitKey(1) == 27: # ESC is pressed
break
cap_cam.release()
cap_vid.release()
cv2.destroyAllWindows()
The easy way
You can use the cv.arrowedLine() function that will draw something similar to what you want. For example, to draw a red arrow above your rectangle:
center_x = x + w//2
cv2.arrowedLine(img, (center_x, y-50), (center_x, y-5), (0,0,255), 2, 8, 0, 0.5)
which should give a result similar to the image below. Take a look at the OpenCV documentation for the description of the parameters of the function. You can change its size, thickness, color, etc.
Custom arrow shape
If you want more control over the shape of your arrow, you can define a contour (vertex by vertex) and use cv.drawContours() to render it. For example:
# define the arrow shape
shape = np.array([[[0,0],[-25,-25],[-10,-25],[-10,-50],
[10,-50],[10,-25],[25,-25]]])
# move it to the desired position
cx = x + w // 2
cy = y - 5
shape[:,:,0] += cx
shape[:,:,1] += cy
# draw it
cv2.drawContours(img, shape, -1, (0, 255, 0), -1)
This snippet will give you the image below. You can adjust the shape by altering the vertices in the shape array, or look at the documentation to change the way OpenCV draws it.

Categories

Resources