I have prepared some code for it to lock when I get up from computer and get away from it
but when I use it with a casual algorithm, it turns off immediately because it does not detect my face in some movements. For this, I want it to wait 3 seconds when it does not detect my face, check it again and if it still does not detect my face, I want it to lock but when I use the time.sleep method, webcam video freezes and works as face does not exist even my face at camera, what kind of working algorithm do you suggest for this?
from multiprocessing.connection import wait
import cv2
import time
import pyautogui
import ctypes
from math import sin, cos, radians
camera = cv2.VideoCapture(0)
face = cv2.CascadeClassifier("haarcascade_frontalface_alt2.xml")
settings = {
'scaleFactor': 1.3,
'minNeighbors': 3,
'minSize': (50, 50),
'flags': cv2.CASCADE_FIND_BIGGEST_OBJECT|cv2.CASCADE_DO_ROUGH_SEARCH
}
def rotate_image(image, angle):
if angle == 0: return image
height, width = image.shape[:2]
rot_mat = cv2.getRotationMatrix2D((width/2, height/2), angle, 0.9)
result = cv2.warpAffine(image, rot_mat, (width, height), flags=cv2.INTER_LINEAR)
return result
def rotate_point(pos, img, angle):
if angle == 0: return pos
x = pos[0] - img.shape[1]*0.4
y = pos[1] - img.shape[0]*0.4
newx = x*cos(radians(angle)) + y*sin(radians(angle)) + img.shape[1]*0.4
newy = -x*sin(radians(angle)) + y*cos(radians(angle)) + img.shape[0]*0.4
return int(newx), int(newy), pos[2], pos[3]
while True:
ret, img = camera.read()
for angle in [0, -25, 25]:
rimg = rotate_image(img, angle)
detected = face.detectMultiScale(rimg, **settings)
if len(detected):
detected = [rotate_point(detected[-1], img, -angle)]
break
for x, y, w, h in detected[-1:]:
cv2.rectangle(img, (x, y), (x+w, y+h), (255,0,0), 2)
cv2.imshow('facedetect', img)
if cv2.waitKey(5) != -1:
break
if 0==(len(detected)):
time.sleep(3)
if 1==(len(detected)):
pass
else:
ctypes.windll.user32.LockWorkStation()
cv2.destroyWindow("facedetect")```
set a variable with the last timestamp where you wouldn't have detected a face. On every loop, if you detect your face again, set this variable to None, if this variable is not None and variable + 3secondes <= current timestamp, lock your station.
import time
unseen_from = None
while True:
# etc etc
detected = bool(detected) # empty list == False, True otherwise
if unseen_from is None:
detected = None if detected else time.time()
elif detected:
unseen_from = None
else if detected_from + 3 < time.time():
ctypes.windll.user32.LockWorkStation()
live coding, I don't have a windows to test this on, but the idea is there
Related
Here's a picture of a maze solver, when the BFS function goes over the image it goes around the image
I know it's possible to just crop the image manually, but I want a function where the computer can automatically detect non black edges and delete them. How would I do this?
#Program that will read an image and pass through to folder using OpenCV, select starting and ending points, and solve the maze using a BFS function
#will be imported on to flask backend
'''
Requirements: pip3 install opencv-python (or contrib version depending on console)
'''
#libraries
import imghdr
import cv2
import numpy as np
import threading
import colorsys
#first we will read the image, and countour it to a gray image so reading the image will be easier
#set threshhold value
img = cv2.imread("Mazes/maze3.jpg", cv2.IMREAD_GRAYSCALE)
_, img = cv2.threshold(img, 120, 255, cv2.THRESH_BINARY)
img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
h, w = img.shape[:2]
#seperate function to display image
def solution():
global img
#show image
cv2.imshow("Maze Solver", img)
#run mouse pointer function for user to click points
cv2.setMouseCallback('Maze Solver', maze_points)
while True:
#after thread is ran display image
cv2.imshow("Maze Solver", img)
cv2.waitKey(1)
#this is a function that will map the cursor as a point so user can press twice to determine the start and end points
class Point(object):
def __init__(self, x = 0, y = 0):
self.x = x
self.y = y
def __add__(self, other):
return Point(self.x + other.x, self.y + other.y)
def __eq__(self, other):
return self.x == other.x and self.y == other.y
#variable that will store the # of times mouse is clicked
num_of_clicks = 0
#determine how big pointer will appear on screen
mouse_click_size = 2
#initialize start and end pointer so user clicks
start_point = Point()
end_point = Point()
#subtree that will point nodes in directions that they go in
subtree = [Point(0, -1), Point(0, 1), Point(1, 0), Point(-1, 0)]
#this function is to call the mouse to click two points on the image (just for opencv, if needed can input through console)
def maze_points(running, pX, pY, flags, param):
#globals
global img, start_point, end_point, num_of_clicks
#if the running is true then run
if running == cv2.EVENT_LBUTTONUP:
#start point clikc
if num_of_clicks == 0:
cv2.rectangle(img, (pX - mouse_click_size, pY - mouse_click_size),
(pX + mouse_click_size, pY + mouse_click_size), (0, 0, 255), -1)
start_point = Point(pX, pY)
print("start = ", start_point.x, start_point.y)
num_of_clicks += 1
#end point click
elif num_of_clicks == 1:
cv2.rectangle(img, (pX - mouse_click_size, pY - mouse_click_size),
(pX + mouse_click_size, pY + mouse_click_size), (0, 200, 50), -1)
end_point = Point(pX, pY)
print("end = ", end_point.x, end_point.y)
num_of_clicks += 1
#this is the bfs function that will search through all nodes and cells using bfs
def solve_maze(start, end):
#globals
global img, h, w
const = 10000
#if a path is found display (debugging)
true_path = False
#set queue
queue = []
#if cell has been checked for valid path
cell_checked = [[0 for j in range(w)] for i in range(h)]
#parent subtree for checked cells
tree_begin = [[Point() for j in range(w)] for i in range(h)]
#store starting point for maze
queue.append(start)
cell_checked[start.y][start.x] = 1
#loop to search through nodes until maze is empty
while len(queue) > 0:
valid_paths = queue.pop(0)
#will search through subtree by surrounding cells
for nodes in subtree:
cell = valid_paths + nodes
point_x = cell.x
point_y = cell.y
solution_max = 0
#to solve this we will determine all borders as black and search through them
# if cell(a surrounding pixel) is in range of image, not visited, !(B==0 && G==0 && R==0) i.e. pixel is
# not black as black represents border
if (point_x >= 0 and point_x < w and point_y >= 0 and point_y < h and cell_checked[point_y][point_x] == solution_max and
(img[point_y][point_x][0] != 0 or img[point_y][point_x][1] != 0 or img[point_y][point_x][2] != 0)):
queue.append(cell)
cell_checked[cell.y][cell.x] = cell_checked[valid_paths.y][valid_paths.x] + 1 # Later
#bfs function characteristics (blue)
img[cell.y][cell.x] = list(reversed(
[i * 255 for i in colorsys.rgb_to_hsv(cell_checked[cell.y][cell.x] / const, 1, 1)])
)
#once tree path is found
tree_begin[cell.y][cell.x] = valid_paths
#end path once end pixel is found
if cell == end:
true_path = True
del queue[:]
break
#list of paths to trace it
path_nodes = []
#display the found path
if true_path:
valid_paths = end
#loop to get path from found correct path from bfs, end to start, and reverse it to display path from start
while valid_paths != start:
path_nodes.append(valid_paths)
valid_paths = tree_begin[valid_paths.y][valid_paths.x]
path_nodes.append(valid_paths)
path_nodes.reverse()
#display path
for valid_paths in path_nodes:
img[valid_paths.y][valid_paths.x] = [0, 0, 255]
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3,3))
#p = cv2.dilate(img[p.y], kernel, iterations=2)
#console display
print("Path Found")
else:
print("Path Not Found")
#console function for points
print("Select start and end points : ")
#use a thread function to simoultaneously run display function, mouse function, and run BFS through it
mazeSolver = threading.Thread(target=solution, args=())
#daemon will join any other exisitng thread
mazeSolver.daemon = True
mazeSolver.start()
#when clicks are less than 2 don't run any functions
while num_of_clicks < 2:
pass
#solve maze
solve_maze(start_point, end_point)
cv2.waitKey(0)
'''
50 images tested with 93% accuracy rate
'''
I'm using win10 and latest version of OpenCV.
I am working on a project where I need to detect face from the image and store it to directory for further use.
I am using openCV (Haar_cascade_classifier)
It is detecting only straight head. for detection of the tilted head.I tried tune version haar_cascade.but the problem it is only detecting face in live video.If I run webcamera and detect using haar_cascade_classifier_atl2.xml file . it detect the tilted head but if If I run loop from my image directory and pass images to detect face it is not detecting the same angle.means It is detecting in video but not from image (same angle)
here is the code
import cv2
from math import sin, cos, radians
import time
import fnmatch
import os
total_number_of_frame = 0
framedirectory = "D:/finalpaper/extractedframes"
number_of_extracted_frames = len(fnmatch.filter(os.listdir('D:/finalpaper/extractedframes'), '*.png'))
print(number_of_extracted_frames)
face = cv2.CascadeClassifier(cv2.data.haarcascades + "haarcascade_frontalface_alt2.xml")
settings = {
'scaleFactor': 1.3,
'minNeighbors': 3,
'minSize': (50, 50)
}
def rotate_image(image, angle):
if angle == 0: return image
height, width = image.shape[:2]
rot_mat = cv2.getRotationMatrix2D((width/2, height/2), angle, 0.9)
result = cv2.warpAffine(image, rot_mat, (width, height), flags=cv2.INTER_LINEAR)
return result
def rotate_point(pos, img, angle):
if angle == 0: return pos
x = pos[0] - img.shape[1]*0.4
y = pos[1] - img.shape[0]*0.4
newx = x*cos(radians(angle)) + y*sin(radians(angle)) + img.shape[1]*0.4
newy = -x*sin(radians(angle)) + y*cos(radians(angle)) + img.shape[0]*0.4
return int(newx), int(newy), pos[2], pos[3]
for i in range(number_of_extracted_frames):
framenumber = str(i)+".png"
#joining path to directory
filepath = os.path.join(framedirectory,framenumber)
frame = cv2.imread(filepath)
for angle in [0, -25, 25]:
rimg = rotate_image(frame, angle)
detected = face.detectMultiScale(rimg, **settings)
if len(detected):
detected = [rotate_point(detected[-1], frame, -angle)]
break
# Make a copy as we don't want to draw on the original image:
for x, y, w, h in detected[-1:]:
cv2.rectangle(frame, (x, y), (x+w, y+h), (255,0,0), 2)
cv2.imshow("frame",frame)
cv2.waitKey(700)
If above code I use to run with web camera it detect face but if I am using my loop it is not detecting tilted head.
I have made a code to stimulate the game using face detection. I find the centre of the face and upon its movement, I press the keys with pynput library. The code is working perfectly fine but its just a small issue whenever it detects a movement of the point it presses the keyboard key more than once. I want to limit the pressed key to 1.
'''
import cv2
import numpy as np
from pynput.keyboard import Key, Controller
import time
keyboard = Controller()
wc = cv2.VideoCapture(0)
time.sleep(2)
for i in range(40):
ret, img = wc.read()
img = cv2.flip(img,1)
imgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(imgGray, 1.1, 4)
for (x, y, w, h) in faces:
cv2.rectangle(img, (x,y), (x + w, y + h), (255, 0, 0), 2)
centre = [int((x + w + x)/2), int((y + h + y)/2)]
faceCascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
wc = cv2.VideoCapture(0)
# Read until video is completed
while(wc.isOpened()):
# Capture frame-by-frame
ret, img = wc.read()
img = cv2.flip(img,1)
if ret == True:
imgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(imgGray, 1.1, 4)
for (x, y, w, h) in faces:
cv2.rectangle(img, (x,y), (x + w, y + h), (255, 0, 0), 2)
centre_new = [int((x + w + x)/2), int((y + h + y)/2)]
cv2.circle(img, (centre_new[0], centre_new[1]), 0, (0,0,255), 5)
if centre_new[0] - centre[0] > 100 :
keyboard.press(Key.right)
keyboard.release(Key.right)
print('right')
if centre_new[0] - centre[0] < -100 :
keyboard.press(Key.left)
keyboard.release(Key.left)
print('left')
if centre_new[1] - centre[1] < -100 :
keyboard.press(Key.up)
keyboard.release(Key.up)
print('up')
if centre_new[1] - centre[1] > 100 :
keyboard = Controller()
keyboard.press(Key.down)
keyboard.release(Key.down)
print('down')
# Display the resulting frame
cv2.imshow('Face',img)
# Press Q on keyboard to exit
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Break the loop
else:
break
# When everything done, release the video capture object
wc.release()
# Closes all the frames
cv2.destroyAllWindows()
'''
I get this kind of output:
'''
up
up
up
up
up
up
up
right
right
right
right
right
right
right
right
right
up
up
up
up
up
'''
Define some home zone where face should return to distinguish one keystroke from another and use flag to watch it. Is this what you looking for?
keystroke_zone = 100
home_zone = keystroke_zone - 10 # or whatever smaller than
is_home = True
while(wc.isOpened()):
...
if is_home:
if centre_new[0] - centre[0] > keystroke_zone :
keyboard.press(Key.right)
keyboard.release(Key.right)
print('right')
is_home = False
if centre_new[0] - centre[0] < -keystroke_zone :
keyboard.press(Key.left)
keyboard.release(Key.left)
print('left')
is_home = False
if centre_new[1] - centre[1] < -keystroke_zone :
keyboard.press(Key.up)
keyboard.release(Key.up)
print('up')
is_home = False
if centre_new[1] - centre[1] > keystroke_zone :
keyboard = Controller()
keyboard.press(Key.down)
keyboard.release(Key.down)
print('down')
is_home = False
else:
if abs(centre_new[0] - centre[0]) < home_zone or\
abs(centre_new[1] - centre[1]) < home_zone:
is_home = True
i have followed this tutorial for face tracking using servo motors
website:https://embeditelectronics.com/blog/project/face-tracker/
github:https://github.com/embeditelectronics/Face-Tracker/blob/master/python-face-tracker/face.py
but the thing is the hardware he used in the tutorial is different from the hardware i have used
right now i'm using adafruit PCA9685 to connect my servos to my raspberry pi
i have tried changing the code according to my adafruit board using the github provided example
from picamera.array import PiRGBArray
from picamera import PiCamera
import time
import cv2
# from pisoc import *
import Adafruit_PCA9685
pwm = Adafruit_PCA9685.PCA9685()
position=90
class Point(object):
def __init__(self, x, y):
self.x = x
self.y = y
def Track(pan, tilt, center, target = Point(160, 120), threshold = Point(16, 24), delta = Point(4, 3)):
global position
position=90
if (center.x > target.x + threshold.x):
position=position-delta.x
pwm.set_pwm(0, 0,position)
# pan.SetAngle(pan.ReadAngle() - delta.x)
elif (center.x < target.x - threshold.x):
position=position+delta.x
pwm.set_pwm(0, 0,position)
# pan.SetAngle(pan.ReadAngle() + delta.x)
if (center.y > target.y + threshold.y):
position=position+delta.x
pwm.set_pwm(1, 0,position)
# tilt.SetAngle(tilt.ReadAngle() + delta.y)
elif (center.y < target.y - threshold.y):
position=position-delta.x
pwm.set_pwm(1, 0,position)
# tilt.SetAngle(tilt.ReadAngle() - delta.y)
if __name__ == "__main__":
# PiSoC(log_level = 'debug')
pan= pwm.set_pwm(0, 0,position)
tilt=pwm.set_pwm(1,0,position)
# pan = Servo(0, max_angle = 320)
# tilt = Servo(1, max_angle = 240)
camera = PiCamera()
camera.resolution = (640, 480)
camera.framerate = 32
rawCapture = PiRGBArray(camera, size = camera.resolution)
face_cascade = cv2.CascadeClassifier('/home/pi/Downloads/lbpcascade_frontalface.xml')
scale = (camera.resolution[0]/320.0, camera.resolution[1]/240.0)
time.sleep(0.1)
# pan.Start()
# tilt.Start()
for frame in camera.capture_continuous(rawCapture, format = 'bgr', use_video_port = True):
image = frame.array
resized = cv2.resize(image, (320, 240))
gray = cv2.cvtColor(resized,cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.1, 5)
if len(faces) > 0:
for (x, y, w, h) in faces:
Track(pan, tilt, Point(x + w/2.0, y+ h/2.0))
break
faces_resized = [(int(scale[0]*x), int(scale[1]*y), int(scale[0]*w), int(scale[1]*h)) for (x, y, w, h) in faces]
for (x,y,w,h) in faces_resized:
cv2.rectangle(image,(x,y),(x+w,y+h),(255,255,0),2)
cv2.imshow("Result", image)
key = cv2.waitKey(1) & 0xFF
rawCapture.truncate(0)
if key == ord('q') or key == 27:
break
# pan.Stop()
# tilt.Stop()
here is the complete code
but the thing i'm stuck with this is, the pi-camera can detect my face but the servo motors are not functioned as expected
and i don't understand the connection between the servo motors and the code part which detects my face i know somewhere there is a missing connection but im not sure where exactly the thing is
and i'm not even sure if this the best way to do face tracking i have tried a lot other ways and ended up with many blunder errors
if you have a better version of this code or any tutorial please do suggest me
*******updated****
from picamera.array import PiRGBArray
from picamera import PiCamera
import time
import cv2
# from pisoc import *
import Adafruit_PCA9685
pwm = Adafruit_PCA9685.PCA9685()
position=90
FRAME_W = 180
FRAME_H = 100
cam_pan = 90
cam_tilt = 60
pwm.set_pwm_freq(50)
pwm.set_pwm(0, 0,120)
pwm.set_pwm(1, 0,120)
class Point(object):
def __init__(self, x, y):
self.x = x
self.y = y
def Track(pan, tilt, center, target = Point(160, 120), threshold = Point(16, 24), delta = Point(4, 3)):
global position
position=90
if (center.x > target.x + threshold.x):
position=position-delta.x
pwm.set_pwm(0, 0,position)
# pan.SetAngle(pan.ReadAngle() - delta.x)
elif (center.x < target.x - threshold.x):
position=position+delta.x
pwm.set_pwm(0, 0,position)
# pan.SetAngle(pan.ReadAngle() + delta.x)
if (center.y > target.y + threshold.y):
position=position+delta.x
pwm.set_pwm(1, 0,position)
# tilt.SetAngle(tilt.ReadAngle() + delta.y)
elif (center.y < target.y - threshold.y):
position=position-delta.x
pwm.set_pwm(1, 0,position)
# tilt.SetAngle(tilt.ReadAngle() - delta.y)
if __name__ == "__main__":
# PiSoC(log_level = 'debug')
pan= pwm.set_pwm(0, 0,position)
tilt=pwm.set_pwm(1,0,position)
# pan = Servo(0, max_angle = 320)
# tilt = Servo(1, max_angle = 240)
camera = PiCamera()
camera.resolution = (640, 480)
camera.framerate = 32
rawCapture = PiRGBArray(camera, size = camera.resolution)
face_cascade = cv2.CascadeClassifier('/home/pi/Downloads/lbpcascade_frontalface.xml')
scale = (camera.resolution[0]/320.0, camera.resolution[1]/240.0)
time.sleep(0.1)
# pan.Start()
# tilt.Start()
for frame in camera.capture_continuous(rawCapture, format = 'bgr', use_video_port = True):
image = frame.array
resized = cv2.resize(image, (320, 240))
gray = cv2.cvtColor(resized,cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.1, 5)
if len(faces) > 0:
for (x, y, w, h) in faces:
Track(pan, tilt, Point(x + w/2.0, y+ h/2.0))
break
faces_resized = [(int(scale[0]*x), int(scale[1]*y), int(scale[0]*w), int(scale[1]*h)) for (x, y, w, h) in faces]
for (x,y,w,h) in faces_resized:
cv2.rectangle(image,(x,y),(x+w,y+h),(255,255,0),2)
cv2.imshow("Result", image)
key = cv2.waitKey(1) & 0xFF
rawCapture.truncate(0)
if key == ord('q') or key == 27:
break
# pan.Stop()
# tilt.Stop()
now the servo motors are moving but just a 0.5 right /0.5 left based on the face direction
not sure if you spotted it yet but you are setting the position to 90 everytime the function is run so its never going to get past one step as its always reset to 90 when called again.
def Track(pan, tilt, center, target = Point(160, 120), threshold = Point(16, 24), delta = Point(4, 3)):
global position
position=90
if (center.x > target.x + threshold.x):
position=position-delta.x
you should move the initialization of position to outside the function.
hope it helps
TIP If you fail to get many\any responses when you post issues its usually because the answer is staring at you and you need to either research or check your code again.
first of all, I am new to programming though I would like to learn especially python. my background in animation and CGI.
I have python 2.7 and openCV x64 installed on windows. I tested optical flow example they have (opt_flow.py) (the green arrows) I like that, but I am trying to understand how I can get the data out as values. I am not interested in seeing the camera output or the green arrows I just want the data out to use it later.is there a way to do that?
for example: the value of x, y and the length of the green arrows.
Thank you all
You can get the optical flow vectors (green arrows) in the draw_flow function of opt_flow.py. Here is how I would do it :
#!/usr/bin/env python
'''
example to show optical flow
USAGE: opt_flow.py [<video_source>]
Keys:
1 - toggle HSV flow visualization
2 - toggle glitch
Keys:
ESC - exit
'''
# Python 2/3 compatibility
from __future__ import print_function
import numpy as np
import math
import cv2
import video
def draw_flow(img, flow, step=16):
global arrows
h, w = img.shape[:2]
y, x = np.mgrid[step/2:h:step, step/2:w:step].reshape(2,-1).astype(int)
fx, fy = flow[y,x].T
lines = np.vstack([x, y, x+fx, y+fy]).T.reshape(-1, 2, 2)
lines = np.int32(lines + 0.5)
vis = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
cv2.polylines(vis, lines, 0, (0, 255, 0))
for (x1, y1), (x2, y2) in lines:
arrows.append([x1,y1, math.sqrt((x2-x1)*(x2-x1) + (y2-y1)*(y2-y1))])
cv2.circle(vis, (x1, y1), 1, (0, 255, 0), -1)
return vis
def draw_hsv(flow):
h, w = flow.shape[:2]
fx, fy = flow[:,:,0], flow[:,:,1]
ang = np.arctan2(fy, fx) + np.pi
v = np.sqrt(fx*fx+fy*fy)
hsv = np.zeros((h, w, 3), np.uint8)
hsv[...,0] = ang*(180/np.pi/2)
hsv[...,1] = 255
hsv[...,2] = np.minimum(v*4, 255)
bgr = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
return bgr
def warp_flow(img, flow):
h, w = flow.shape[:2]
flow = -flow
flow[:,:,0] += np.arange(w)
flow[:,:,1] += np.arange(h)[:,np.newaxis]
res = cv2.remap(img, flow, None, cv2.INTER_LINEAR)
return res
if __name__ == '__main__':
import sys
print(__doc__)
try:
fn = sys.argv[1]
except IndexError:
fn = 0
arrows = []
cam = video.create_capture(fn)
ret, prev = cam.read()
prevgray = cv2.cvtColor(prev, cv2.COLOR_BGR2GRAY)
show_hsv = False
show_glitch = False
cur_glitch = prev.copy()
while True:
ret, img = cam.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
flow = cv2.calcOpticalFlowFarneback(prevgray, gray, None, 0.5, 3, 15, 3, 5, 1.2, 0)
prevgray = gray
arrows.clear()
finalImg = draw_flow(gray,flow)
print(arrows)
cv2.imshow('flow', finalImg)
if show_hsv:
cv2.imshow('flow HSV', draw_hsv(flow))
if show_glitch:
cur_glitch = warp_flow(cur_glitch, flow)
cv2.imshow('glitch', cur_glitch)
ch = cv2.waitKey(5)
if ch == 27:
break
if ch == ord('1'):
show_hsv = not show_hsv
print('HSV flow visualization is', ['off', 'on'][show_hsv])
if ch == ord('2'):
show_glitch = not show_glitch
if show_glitch:
cur_glitch = img.copy()
print('glitch is', ['off', 'on'][show_glitch])
cv2.destroyAllWindows()
In the code above, I'm saving the optical flow vectors (start point coordinates and vector length) in the global variable arrows like so :
arrows.append([x1,y1, math.sqrt((x2-x1)*(x2-x1) + (y2-y1)*(y2-y1))])
with (x1, y1) the arrow's start point and (x2, y2) the arrow's end point.
Hope it helps.