Opencv Camshift with threading not working - python

I'm trying to run the opencv/samples/python2 provided code for Camshift using threads.I've created two objects for App() class which call the run method.
##
from threading import Thread
from multiprocessing import Process
import numpy as np
import cv2
import video
class App(object):
def __init__(self, video_src):
self.cam = video.create_capture(video_src)
ret, self.frame = self.cam.read()
cv2.namedWindow('camshift')
cv2.setMouseCallback('camshift', self.onmouse)
self.selection = None
self.drag_start = None
self.tracking_state = 0
self.show_backproj = False
def onmouse(self, event, x, y, flags, param):
x, y = np.int16([x, y]) # BUG
if event == cv2.EVENT_LBUTTONDOWN:
self.drag_start = (x, y)
self.tracking_state = 0
if self.drag_start:
if flags & cv2.EVENT_FLAG_LBUTTON:
h, w = self.frame.shape[:2]
xo, yo = self.drag_start
x0, y0 = np.maximum(0, np.minimum([xo, yo], [x, y]))
x1, y1 = np.minimum([w, h], np.maximum([xo, yo], [x, y]))
self.selection = None
if x1-x0 > 0 and y1-y0 > 0:
self.selection = (x0, y0, x1, y1)
else:
self.drag_start = None
if self.selection is not None:
self.tracking_state = 1
def show_hist(self):
bin_count = self.hist.shape[0]
bin_w = 24
img = np.zeros((256, bin_count*bin_w, 3), np.uint8)
for i in xrange(bin_count):
h = int(self.hist[i])
cv2.rectangle(img, (i*bin_w+2, 255), ((i+1)*bin_w-2, 255-h), (int(180.0*i/bin_count), 255, 255), -1)
img = cv2.cvtColor(img, cv2.COLOR_HSV2BGR)
cv2.imshow('hist', img)
def run(self):
while True:
ret, self.frame = self.cam.read()
vis = self.frame.copy()
hsv = cv2.cvtColor(self.frame, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv, np.array((0., 60., 32.)), np.array((180., 255., 255.)))
if self.selection:
x0, y0, x1, y1 = self.selection
self.track_window = (x0, y0, x1-x0, y1-y0)
hsv_roi = hsv[y0:y1, x0:x1]
mask_roi = mask[y0:y1, x0:x1]
hist = cv2.calcHist( [hsv_roi], [0], mask_roi, [16], [0, 180] )
cv2.normalize(hist, hist, 0, 255, cv2.NORM_MINMAX);
self.hist = hist.reshape(-1)
#self.show_hist()
vis_roi = vis[y0:y1, x0:x1]
cv2.bitwise_not(vis_roi, vis_roi)
vis[mask == 0] = 0
if self.tracking_state == 1:
self.selection = None
prob = cv2.calcBackProject([hsv], [0], self.hist, [0, 180], 1)
prob &= mask
term_crit = ( cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1 )
track_box, self.track_window = cv2.CamShift(prob, self.track_window, term_crit)
print track_box[0]
if self.show_backproj:
vis[:] = prob[...,np.newaxis]
try: cv2.ellipse(vis, track_box, (0, 0, 255), 2)
except: print track_box
cv2.imshow('camshift', vis)
ch = 0xFF & cv2.waitKey(5)
if ch == 27:
break
if ch == ord('b'):
self.show_backproj = not self.show_backproj
cv2.destroyAllWindows()
if __name__ == '__main__':
import sys
try: video_src = sys.argv[1]
except: video_src = 0
print __doc__
left=App(1)# 1=device id for left camera
right=App(2)#2=device id for right camera
threadLeft=Process(target=left.run())
threadRight=Process(target=right.run())
threadRight.start()
threadLeft.start()
On execution , two windows appear one for right and other for left.However the onmouse() is not being called when i'm dragging the mouse on either of the windows.Seems that the windows freeze.Following which, on closing either one of them , the pattern which i made previously on the window to track any object , gets used in the second unclosed window automatically.
So is there any other method with which i can call this code for my two camera's .Any help is appreciated.Thanks

To create a Process, give a function to target. In the above code, left.run() is executed, not passed to Process.
Change the code to this:
threadLeft = Process(target=left.run)
threadRight = Process(target=right.run)

Related

yolov7 deepSORT object tracking IDs are different in all video frames

I am trying to detect objects in a certain area using yolov7 and deepSORT algorithm, but in the results I get, I see that the IDs are always changing. I leave 3 photos for you to understand.
As you can see the IDs are different in all frames.
`
#class base virtual zone tracking
import random
import torch
import numpy as np
from models.experimental import attempt_load
from utils.torch_utils import TracedModel
from utils.datasets import letterbox
from utils.plots import plot_one_box, plot_one_box_center
from utils.general import check_img_size, non_max_suppression, scale_coords
import cv2
import time
from google.colab.patches import cv2_imshow
from shapely.geometry import Point
from shapely.geometry.polygon import Polygon
#deep sort
import os
import tensorflow as tf
physical_devices = tf.config.experimental.list_physical_devices('GPU')
if len(physical_devices) > 0:
tf.config.experimental.set_memory_growth(physical_devices[0], True)
from tensorflow.compat.v1 import ConfigProto
from deep_sort.tracker import Tracker
from deep_sort.detection import Detection
import matplotlib.pyplot as plt
from deep_sort import preprocessing, nn_matching
from tracking_helpers import read_class_names, create_box_encoder
from detection_helpers import *
class YOLOv7:
def __init__(self, weights: str, image_size:int,device:str):
self.device = device
self.weights = weights
self.model = attempt_load(self.weights, map_location=self.device) # Model Load FP32
self.stride = int(self.model.stride.max())
self.image_size = check_img_size(image_size, self.stride)
if self.device != 'cpu':
self.half = True
else:
self.half = False
if self.half:
self.model.half() # FP16
self.names = self.model.module.names if hasattr(self.model , 'module') else self.model.names
color_values = [[random.randint(0, 255) for _ in range(3)] for _ in range(len(self.names))]
self.colors = {i:color_values[i] for i in range(len(self.names))}
def detect(self, raw_image: np.ndarray, conf_thresh =0.45, iou_thresh =0.45, classes = [0]): #default class people
# Run inference
if self.device != 'cpu':
self.model(torch.zeros(1, 3, self.image_size, self.image_size).to(self.device).type_as(next(self.model.parameters())))
with torch.no_grad():
image = letterbox(raw_image, self.image_size, stride=self.stride)[0]
image = image[:, :, ::-1].transpose(2, 0, 1)
image = np.ascontiguousarray(image)
image = torch.from_numpy(image).to(self.device)
image = image.half() if self.half else image.float()
image /= 255.0
if image.ndimension() == 3:
image = image.unsqueeze(0)
# Inference
detections = self.model(image, augment=False)[0]
# Apply NMS
detections = non_max_suppression(detections, conf_thresh, iou_thresh, classes=classes, agnostic=False)[0]
# Rescale boxes from img_size to raw image size
detections[:, :4] = scale_coords(image.shape[2:], detections[:, :4], raw_image.shape).round()
return detections
def tracking(self, video_frame, yolo_dets, inside_poly = True, count_objects:bool=False,verbose=False, reID_model_path = "./deep_sort/model_weights/mars-small128.pb", nms_max_overlap:float=1.0, max_cosine_distance:float=0.4, nn_budget:float=None):
class_names = read_class_names()
encoder = create_box_encoder(reID_model_path, batch_size=1)
nms_max_overlap = nms_max_overlap
metric = nn_matching.NearestNeighborDistanceMetric("cosine", max_cosine_distance, nn_budget)
tracker = Tracker(metric)
*xyxy, conf, cls = yolo_dets
frame = cv2.cvtColor(video_frame, cv2.COLOR_BGR2RGB)
if yolo_dets is None:
bboxes = []
scores = []
classes = []
num_objects = 0
else:
bboxes = yolo_dets[:,:4]
bboxes[:,2] = bboxes[:,2] - bboxes[:,0] # convert from xyxy to xywh
bboxes[:,3] = bboxes[:,3] - bboxes[:,1]
scores = yolo_dets[:,4]
classes = yolo_dets[:,-1]
num_objects = bboxes.shape[0]
#how many object you track
names = []
for i in range(num_objects): # loop through objects and use class index to get class name
class_indx = int(classes[i])
class_name = class_names[class_indx]
names.append(class_name)
names = np.array(names)
count = len(names)
if count_objects:
cv2.putText(frame, "both inside and outside the polygon detection: {}".format(count), (5, 35), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1.5, (0, 0, 0), 2)
# DeepSORT tacker work starts here
features = encoder(frame, bboxes) # encode detections and feed to tracker. [No of BB / detections per frame, embed_size]
detections = [Detection(bbox, score, class_name, feature) for bbox, score, class_name, feature in zip(bboxes, scores, names, features)] # [No of BB per frame] deep_sort.detection.Detection object
cmap = plt.get_cmap('tab20b') #initialize color map
colors = [cmap(i)[:3] for i in np.linspace(0, 1, 20)]
boxs = np.array([d.tlwh for d in detections]) # run non-maxima supression below
scores = np.array([d.confidence for d in detections])
classes = np.array([d.class_name for d in detections])
indices = preprocessing.non_max_suppression(boxs, classes, nms_max_overlap, scores)
detections = [detections[i] for i in indices]
tracker.predict() # Call the tracker
tracker.update(detections) # updtate using Kalman Gain
for track in tracker.tracks: # update new findings AKA tracks
#if not track.is_confirmed() or track.time_since_update > 1:
#continue
bbox = track.to_tlbr()
class_name = track.get_class()
color = colors[int(track.track_id) % len(colors)] # draw bbox on screen
color = [i * 255 for i in color]
#drawing poly
#pts = np.array([[6,449], [1052, 2], [1914, 6], [1766, 1074], [2, 1076]])
#frame = cv2.polylines(frame, [pts], True, (0,0,255), 5)
#creating poly
#poli = Polygon([(6,449), (1052, 2), (1914, 6), (1766, 1074), (2, 1076)])
#center = (int((bbox[0] + bbox[2]) / 2), int((bbox[1] + bbox[3]) / 2)) #center point ( (x1 + x2) / 2, (y1 + y2) / 2 )
#point = Point(center)
if inside_poly:
#drawing poly
pts = np.array([[6,449], [1052, 2], [1914, 6], [1766, 1074], [2, 1076]])
frame = cv2.polylines(frame, [pts], True, (0,0,255), 5)
#creating poly
poli = Polygon([(6,449), (1052, 2), (1914, 6), (1766, 1074), (2, 1076)])
center = (int((bbox[0] + bbox[2]) / 2), int((bbox[1] + bbox[3]) / 2)) #center point ( (x1 + x2) / 2, (y1 + y2) / 2 )
point = Point(center)
if poli.contains(point):
cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), color, 2)
cv2.rectangle(frame, (int(bbox[0]), int(bbox[1]-30)), (int(bbox[0])+(len(class_name)+len(str(track.track_id)))*17, int(bbox[1])), color, -1)
cv2.putText(frame, class_name + " : " + str(track.track_id),(int(bbox[0]), int(bbox[1]-11)),0, 0.6, (255,255,255),1, lineType=cv2.LINE_AA)
cv2.putText(frame, "0", center,0, 0.6, (255,255,255),1, lineType=cv2.LINE_AA)
else:
cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), color, 2)
cv2.rectangle(frame, (int(bbox[0]), int(bbox[1]-30)), (int(bbox[0])+(len(class_name)+len(str(track.track_id)))*17, int(bbox[1])), color, -1)
cv2.putText(frame, class_name + " : " + str(track.track_id),(int(bbox[0]), int(bbox[1]-11)),0, 0.6, (255,255,255),1, lineType=cv2.LINE_AA)
if verbose == 2:
print("Tracker ID: {}, Class: {}, BBox Coords (xmin, ymin, xmax, ymax): {}".format(str(track.track_id), class_name, (int(bbox[0]), int(bbox[1]), int(bbox[2]), int(bbox[3]))))
result = np.asarray(frame)
result = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
return result
if __name__=='__main__':
yolov7=YOLOv7(weights='yolov7x.pt', device='cpu', image_size=800)
cap = cv2.VideoCapture('street5sn.mp4')
torch.cuda.empty_cache()
#writer
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) # by default VideoCapture returns float instead of int
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = int(cap.get(cv2.CAP_PROP_FPS))
codec = cv2.VideoWriter_fourcc(*"DIVX")
out = cv2.VideoWriter("./output/video_out_track5sn-d2.mp4", codec, fps, (width, height))
while True:
t1 = time.time()
ret, frame = cap.read()
if not ret:
break
detections=yolov7.detect(frame)
vir = yolov7.tracking(frame, detections, count_objects = True, inside_poly = False)
out.write(vir)
cv2_imshow(vir) #colab imshow kodu
print("add frame ...")
if cv2.waitKey(1) & 0xFF == ord('q'):
break
out.release()
cap.release()
cv2.destroyAllWindows()
`
I use this repo repo
I did not make any changes to other files.

NameError: name 'face_frame' is not defined

This code has been taken from github.I have installed all the Dependencies.
What could be the possible fix for this issue?
If I try to run this project I get these errors
Traceback (most recent call last):
File "c:\Project\Drowsiness-Detection-System-for-Drivers\driver_drowsiness.py", line 102, in <module>
cv2.imshow("Result of detector", face_frame)
NameError: name 'face_frame' is not defined
[ WARN:0#19.631] global D:\a\opencv-python\opencv-python\opencv\modules\videoio\src\cap_msmf.cpp (539) `anonymous-namespace'::SourceReaderCB::~SourceReaderCB terminating async callback
# Importing OpenCV Library for basic image processing functions
import cv2
# Numpy for array related functions
import numpy as np
# Dlib for deep learning based Modules and face landmark detection
import dlib
# face_utils for basic operations of conversion
from imutils import face_utils
# Initializing the camera and taking the instance
cap = cv2.VideoCapture(0)
# Initializing the face detector and landmark detector
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")
# status marking for current state
sleep = 0
drowsy = 0
active = 0
status = ""
color = (0, 0, 0)
def compute(ptA, ptB):
dist = np.linalg.norm(ptA - ptB)
return dist
def blinked(a, b, c, d, e, f):
up = compute(b, d) + compute(c, e)
down = compute(a, f)
ratio = up/(2.0*down)
# Checking if it is blinked
if(ratio > 0.25):
return 2
elif(ratio > 0.21 and ratio <= 0.25):
return 1
else:
return 0
while True:
_, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = detector(gray)
# detected face in faces array
for face in faces:
x1 = face.left()
y1 = face.top()
x2 = face.right()
y2 = face.bottom()
face_frame = frame.copy()
cv2.rectangle(face_frame, (x1, y1), (x2, y2), (0, 255, 0), 2)
landmarks = predictor(gray, face)
landmarks = face_utils.shape_to_np(landmarks)
# The numbers are actually the landmarks which will show eye
left_blink = blinked(landmarks[36], landmarks[37],
landmarks[38], landmarks[41], landmarks[40], landmarks[39])
right_blink = blinked(landmarks[42], landmarks[43],
landmarks[44], landmarks[47], landmarks[46], landmarks[45])
# Now judge what to do for the eye blinks
if(left_blink == 0 or right_blink == 0):
sleep += 1
drowsy = 0
active = 0
if(sleep > 6):
status = "SLEEPING !!!"
color = (255, 0, 0)
elif(left_blink == 1 or right_blink == 1):
sleep = 0
active = 0
drowsy += 1
if(drowsy > 6):
status = "Drowsy !"
color = (0, 0, 255)
else:
drowsy = 0
sleep = 0
active += 1
if(active > 6):
status = "Active :)"
color = (0, 255, 0)
cv2.putText(frame, status, (100, 100),
cv2.FONT_HERSHEY_SIMPLEX, 1.2, color, 3)
for n in range(0, 68):
(x, y) = landmarks[n]
cv2.circle(face_frame, (x, y), 1, (255, 255, 255), -1)
cv2.imshow("Frame", frame)
cv2.imshow("Result of detector", face_frame)
key = cv2.waitKey(1)
if key == 27:
break
There is an issue in the module itself for face_frame variable usage, which is already reported in the github
reported issue for face_frame
Working Code:-
# Importing OpenCV Library for basic image processing functions
import cv2
# Numpy for array related functions
import numpy as np
# Dlib for deep learning based Modules and face landmark detection
import dlib
# face_utils for basic operations of conversion
from imutils import face_utils
# Initializing the camera and taking the instance
cap = cv2.VideoCapture(0)
# Initializing the face detector and landmark detector
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")
# status marking for current state
sleep = 0
drowsy = 0
active = 0
status = ""
color = (0, 0, 0)
def compute(ptA, ptB):
dist = np.linalg.norm(ptA - ptB)
return dist
def blinked(a, b, c, d, e, f):
up = compute(b, d) + compute(c, e)
down = compute(a, f)
ratio = up/(2.0*down)
# Checking if it is blinked
if(ratio > 0.25):
return 2
elif(ratio > 0.21 and ratio <= 0.25):
return 1
else:
return 0
while True:
_, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = detector(gray)
face_frame = frame.copy()
# detected face in faces array
for face in faces:
x1 = face.left()
y1 = face.top()
x2 = face.right()
y2 = face.bottom()
cv2.rectangle(face_frame, (x1, y1), (x2, y2), (0, 255, 0), 2)
landmarks = predictor(gray, face)
landmarks = face_utils.shape_to_np(landmarks)
# The numbers are actually the landmarks which will show eye
left_blink = blinked(landmarks[36], landmarks[37],
landmarks[38], landmarks[41], landmarks[40], landmarks[39])
right_blink = blinked(landmarks[42], landmarks[43],
landmarks[44], landmarks[47], landmarks[46], landmarks[45])
# Now judge what to do for the eye blinks
if(left_blink == 0 or right_blink == 0):
sleep += 1
drowsy = 0
active = 0
if(sleep > 6):
status = "SLEEPING !!!"
color = (255, 0, 0)
elif(left_blink == 1 or right_blink == 1):
sleep = 0
active = 0
drowsy += 1
if(drowsy > 6):
status = "Drowsy !"
color = (0, 0, 255)
else:
drowsy = 0
sleep = 0
active += 1
if(active > 6):
status = "Active :)"
color = (0, 255, 0)
cv2.putText(frame, status, (100, 100),
cv2.FONT_HERSHEY_SIMPLEX, 1.2, color, 3)
for n in range(0, 68):
(x, y) = landmarks[n]
cv2.circle(face_frame, (x, y), 1, (255, 255, 255), -1)
cv2.imshow("Frame", frame)
cv2.imshow("Result of detector", face_frame)
key = cv2.waitKey(1)
if key == 27:
break

Extract cellular images inside form data

I have a collection of images as below -
Example 1
Example 2
Example 3
These represent dates in DDMMYYYY format. For each of these images, I want to save each digit as a separate image.For example 1, I wish to save 7,9,0,8,5,8,7,1 as separate images sliced from the original image. So far, I have tried various methods described on different stackoverflow & blogposts but none of them seems to work.
Code to extract boxes surrounding dates -
from glob import glob
import cv2 as cv
import numpy as np
from tqdm import tqdm
class ExtractRectangle:
def __init__(self):
super().__init__()
self.minLinLength_h = 70
self.minLinLength_v = 5
self.maxLineGap = 20
def is_horizontal(self, line, thresh=5):
return abs(line[1] - line[3]) <= thresh
def is_vertical(self, line, thresh=5):
return abs(line[0] - line[2]) <= thresh
def get_lines(self, canny, horizontal=True):
lines = []
if horizontal:
linesP = cv.HoughLinesP(
canny,
rho=1,
theta=np.pi / 180,
threshold=10,
lines=None,
minLineLength=self.minLinLength_h,
maxLineGap=20,
)
else:
linesP = cv.HoughLinesP(
canny,
rho=1,
theta=np.pi / 180,
threshold=10,
lines=None,
minLineLength=self.minLinLength_v,
maxLineGap=20,
)
if linesP is not None:
for i in range(0, len(linesP)):
l = linesP[i][0]
if self.is_horizontal(l, 3) and horizontal:
lines.append(l)
elif self.is_vertical(l, 3):
lines.append(l)
return lines
def remove_whitespace(self, img):
# https://stackoverflow.com/questions/48395434/how-to-crop-or-remove-white-background-from-an-image
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
th, threshed = cv.threshold(gray, 127, 255, cv.THRESH_BINARY_INV)
kernel = cv.getStructuringElement(cv.MORPH_ELLIPSE, (11, 11))
morphed = cv.morphologyEx(threshed, cv.MORPH_CLOSE, kernel)
cnts = cv.findContours(morphed, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)[-2]
cnt = sorted(cnts, key=cv.contourArea)[-1]
x, y, w, h = cv.boundingRect(cnt)
dst = img[y : y + h, x : x + w]
return dst
def process_image(self, filename, path):
errenous = False
img = cv.imread(cv.samples.findFile(filename))
img = self.remove_whitespace(img)
cImage = np.copy(img)
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
canny = cv.Canny(gray, 100, 200)
horizontal_lines = self.get_lines(canny)
horizontal_lines = sorted(horizontal_lines, key=lambda a_entry: a_entry[..., 1])
vertical_lines = self.get_lines(canny, horizontal=False)
vertical_lines = sorted(vertical_lines, key=lambda a_entry: a_entry[..., 0])
if len(horizontal_lines) > 0:
initial_line = horizontal_lines[0]
final_line = horizontal_lines[-1]
# LeftTop(x1, y1) -> RightTop(x2, y1) -> RightBottom(x2, y2) -> LeftBottom(x1, y2)
y1 = initial_line[1]
y2 = final_line[1]
bottom = min(y1, y2)
top = max(y1, y2)
# post whitespace removal, dates should only be the major component
if (top-bottom) / img.shape[0] < 0.6:
errenous = True
else:
errenous = True
if len(vertical_lines) > 0:
initial_line = vertical_lines[0]
final_line = vertical_lines[-1]
x1 = initial_line[0]
x2 = final_line[0]
left = min(x1, x2)
right = max(x1, x2)
# as dates occupy majority of the horizontal space
if (right-left) / img.shape[1] < 0.95:
errenous = True
else:
errenous = True
if not errenous:
# cImage = cv.rectangle(cImage, (left, bottom), (right, top), (255, 0, 0), 2)
cImage = cImage[
bottom : bottom + (top - bottom), left : left + (right - left)
]
cv.imwrite(f"{path}/{filename.split('/')[-1]}", cImage)
if __name__ == "__main__":
extract = ExtractRectangle()
test_files = glob("data/raw/test/*.png")
test_path = "data/processed/test/"
for path in tqdm(test_files):
extract.process_image(path, test_path)
train_files = glob("data/raw/train/*.png")
train_path = "data/processed/train/"
for path in tqdm(train_files):
extract.process_image(path, train_path)
Resultant detection for above images -
Example 1
Example 2
Example 3
Some other samples

How to use cursor to move video with OpenCV?

I am currently trying to implement a function that will allow me to use the cursor to "grab" a video and move it around. I want to be able to only move the video when the cursor is held down and moved. I have defined the mouse events, captured the coordinates of my cursor but I am not sure how to write the function to crop the video.
x1, x2, y1, y2 = 0, 0, 0, 0
mouse_down = False
def mouse_event_callback(event, x, y, flags, param):
global x1, x2, y1, y2, mouse_down
if event == cv2.EVENT_LBUTTONDOWN:
x1, y1 = x, y
mouse_down = True
elif event == cv2.EVENT_MOUSEMOVE:
if mouse_down:
x2, y2 = x, y
mouse_grab_video(x1, x2, y1, y2)
elif event == cv2.EVENT_LBUTTONUP:
mouse_down = False
def mouse_grab_video(x1, x2, y1, y2):
...
Any help would be much appreciated!
You can crop the image directly based on the coordinate you get from your mouse event.
#!/usr/bin/env python3
import argparse
import cv2
import numpy as np
from PIL import Image
drawing = False # true if mouse is pressed
ix,iy = -1,-1
refPt = []
img = ""
clone = ""
ROIRegion = []
# mouse callback function
def draw_rectangle(event,x,y,flags,param):
global ix,iy,drawing,img,clone,refPt, ROIRegion
if event == cv2.EVENT_LBUTTONDOWN:
drawing = True
ix,iy = x,y
refPt = [(x, y)]
ROIRegion.append(refPt)
#clone = img.copy()
elif event == cv2.EVENT_MOUSEMOVE:
if drawing == True:
img = clone.copy()
cv2.rectangle(img,(ix,iy),(x,y),(0,255,0),3)
a=x
b=y
if a != x | b != y:
cv2.rectangle(img,(ix,iy),(x,y),(0,0,0),-1)
elif event == cv2.EVENT_LBUTTONUP:
drawing = False
refPt.append((x,y))
img = clone.copy()
cv2.rectangle(img, (ix,iy),(x,y), (0, 255, 0), 2)
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required=True, help="Path to the image")
args = vars(ap.parse_args())
img = cv2.imread(args["image"])
img = np.array(img)
clone = img.copy()
cv2.namedWindow('image')
cv2.setMouseCallback('image',draw_rectangle)
while(1):
cv2.imshow('image',img)
k = cv2.waitKey(1) & 0xFF
if k == ord("r"):
del ROIRegion[-1]
del refPt[-1]
img = clone.copy()
elif k == 27:
break
#Crop image to ROI
for region in range(len(ROIRegion)):
cv2.rectangle(img, ROIRegion[region][0],ROIRegion[region][1], (0, 255, 0), 2)
roi = clone[ROIRegion[region][0][1]:ROIRegion[region][1][1], ROIRegion[region][0][0]:ROIRegion[region][1][0]]
For zoom you may use EVENT_MOUSEWHEEL to increment scale and resize the image based on the scale. update imshow for latest image you scale. you can refer to this link

Extract optical flow as data (numbers) from live feed (webcam) using Python openCV

first of all, I am new to programming though I would like to learn especially python. my background in animation and CGI.
I have python 2.7 and openCV x64 installed on windows. I tested optical flow example they have (opt_flow.py) (the green arrows) I like that, but I am trying to understand how I can get the data out as values. I am not interested in seeing the camera output or the green arrows I just want the data out to use it later.is there a way to do that?
for example: the value of x, y and the length of the green arrows.
Thank you all
You can get the optical flow vectors (green arrows) in the draw_flow function of opt_flow.py. Here is how I would do it :
#!/usr/bin/env python
'''
example to show optical flow
USAGE: opt_flow.py [<video_source>]
Keys:
1 - toggle HSV flow visualization
2 - toggle glitch
Keys:
ESC - exit
'''
# Python 2/3 compatibility
from __future__ import print_function
import numpy as np
import math
import cv2
import video
def draw_flow(img, flow, step=16):
global arrows
h, w = img.shape[:2]
y, x = np.mgrid[step/2:h:step, step/2:w:step].reshape(2,-1).astype(int)
fx, fy = flow[y,x].T
lines = np.vstack([x, y, x+fx, y+fy]).T.reshape(-1, 2, 2)
lines = np.int32(lines + 0.5)
vis = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
cv2.polylines(vis, lines, 0, (0, 255, 0))
for (x1, y1), (x2, y2) in lines:
arrows.append([x1,y1, math.sqrt((x2-x1)*(x2-x1) + (y2-y1)*(y2-y1))])
cv2.circle(vis, (x1, y1), 1, (0, 255, 0), -1)
return vis
def draw_hsv(flow):
h, w = flow.shape[:2]
fx, fy = flow[:,:,0], flow[:,:,1]
ang = np.arctan2(fy, fx) + np.pi
v = np.sqrt(fx*fx+fy*fy)
hsv = np.zeros((h, w, 3), np.uint8)
hsv[...,0] = ang*(180/np.pi/2)
hsv[...,1] = 255
hsv[...,2] = np.minimum(v*4, 255)
bgr = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
return bgr
def warp_flow(img, flow):
h, w = flow.shape[:2]
flow = -flow
flow[:,:,0] += np.arange(w)
flow[:,:,1] += np.arange(h)[:,np.newaxis]
res = cv2.remap(img, flow, None, cv2.INTER_LINEAR)
return res
if __name__ == '__main__':
import sys
print(__doc__)
try:
fn = sys.argv[1]
except IndexError:
fn = 0
arrows = []
cam = video.create_capture(fn)
ret, prev = cam.read()
prevgray = cv2.cvtColor(prev, cv2.COLOR_BGR2GRAY)
show_hsv = False
show_glitch = False
cur_glitch = prev.copy()
while True:
ret, img = cam.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
flow = cv2.calcOpticalFlowFarneback(prevgray, gray, None, 0.5, 3, 15, 3, 5, 1.2, 0)
prevgray = gray
arrows.clear()
finalImg = draw_flow(gray,flow)
print(arrows)
cv2.imshow('flow', finalImg)
if show_hsv:
cv2.imshow('flow HSV', draw_hsv(flow))
if show_glitch:
cur_glitch = warp_flow(cur_glitch, flow)
cv2.imshow('glitch', cur_glitch)
ch = cv2.waitKey(5)
if ch == 27:
break
if ch == ord('1'):
show_hsv = not show_hsv
print('HSV flow visualization is', ['off', 'on'][show_hsv])
if ch == ord('2'):
show_glitch = not show_glitch
if show_glitch:
cur_glitch = img.copy()
print('glitch is', ['off', 'on'][show_glitch])
cv2.destroyAllWindows()
In the code above, I'm saving the optical flow vectors (start point coordinates and vector length) in the global variable arrows like so :
arrows.append([x1,y1, math.sqrt((x2-x1)*(x2-x1) + (y2-y1)*(y2-y1))])
with (x1, y1) the arrow's start point and (x2, y2) the arrow's end point.
Hope it helps.

Categories

Resources