AttributeError: 'cv2.VideoCapture' object has no attribute 'get_frame' - python

Traceback (most recent call last):
File "c:\Users\user\Desktop\face_recognition\face_recog.py", line 103, in <module>
frame = face_recog.get_frame()
File "c:\Users\user\Desktop\face_recognition\face_recog.py", line 41, in get_frame
frame = self.capture.get_frame(self)
AttributeError: 'cv2.VideoCapture' object has no attribute 'get_frame'def get_frame(self):
# Grab a single frame of video
frame = self.capture.get_frame()
# Resize frame of video to 1/4 size for faster face recognition processing
small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
# Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
rgb_small_frame = small_frame[:, :, ::-1]
# Only process every other frame of video to save time
if self.process_this_frame:
# Find all the faces and face encodings in the current frame of video
self.face_locations = face_recognition.face_locations(rgb_small_frame)
self.face_encodings = face_recognition.face_encodings(rgb_small_frame, self.face_locations)
self.face_names = []
for face_encoding in self.face_encodings:
# See if the face is a match for the known face(s)
distances = face_recognition.face_distance(self.known_face_encodings, face_encoding)
min_value = min(distances)
# tolerance: How much distance between faces to consider it a match. Lower is more strict.
# 0.6 is typical best performance.
name = "Unknown"
if min_value < 0.6:
index = np.argmin(distances)
name = self.known_face_names[index]
self.face_names.append(name)
self.process_this_frame = not self.process_this_frame
# Display the results
for (top, right, bottom, left), name in zip(self.face_locations, self.face_names):
# Scale back up face locations since the frame we detected in was scaled to 1/4 size
top *= 4
right *= 4
bottom *= 4
left *= 4
# Draw a box around the face
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
# Draw a label with a name below the face
cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)
return frame
def get_jpg_bytes(self):
frame = self.get_frame()
# We are using Motion JPEG, but OpenCV defaults to capture raw images,
# so we must encode it into JPEG in order to correctly display the
# video stream.
ret, jpg = cv2.imencode('.jpg', frame)
return jpg.tobytes()
# show the frame
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
# if the `q` key was pressed, break from the loop
if key == ord("q"):
break
# do a bit of cleanup
cv2.destroyAllWindows()
print('finish')

Related

how to videostream on anvil server

#anvil.server.callable
def track_criminal(track_face_encoding):
face_locations = []
face_encodings = []
face_names = []
video_capture = cv2.VideoCapture(0)
while True:
# Grab a single frame of video
ret, frame = video_capture.read()
# Resize frame of video to 1/4 size for faster face recognition processing
small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
# Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
rgb_small_frame = small_frame[:, :, ::-1]
face_locations = face_recognition.face_locations(rgb_small_frame)
face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)
for face_encoding in face_encodings:
matches = face_recognition.compare_faces([track_face_encoding], face_encoding)
name = "Unknown"
face_distances = face_recognition.face_distance(track_face_encoding, face_encoding)
best_match_index = np.argmin(face_distances)
if matches[best_match_index]:
name = "Detected"
face_names.append(name)
for (top, right, bottom, left), name in zip(face_locations, face_names):
top *= 4
right *= 4
bottom *= 4
left *= 4
# Draw a box around the face
cv2.rectangle(frame, (left, top), (right, bottom), (255, 153, 153), 2)
# Draw a label with a name below the face
cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (255, 153, 153), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (53, 255, 153), 1)
cv2.imwrite("image.jpeg",frame)
media_objects=anvil.media.from_file("image.jpeg",'image/jpeg')
yield (media_objects)
#above code is on the jupyter notebook
def track_button_click(self, **event_args):
track_image = anvil.server.call("track_encoding",self.file_loader_1.file)
self.image_2.source= anvil.server.call("track_criminal",track_image)
#above code is on anvil
I am trying to make a face detection app on anvil but I am not able to videostream using my webcamera
error I am receiving is
anvil.server.SerializationError: Cannot serialize return value from function. Cannot serialize <class 'generator'> object at msg['response']
at track, line 22
Can anyone tell me a way to videostream on anvil. I tried using return earlier and using while in the code on anvil to call "self.image_2.source= anvil.server.call("track_criminal",track_image)" this function and load image to anvil till some condition specified but I was facing with 2 problems : firstly Thst were not returning continously it was lagging and it was taking time to shift from one image to other ; Second : after loading 2 3 images there arises an axis error stating "Axiserror: axis 1 is out of bound for array of dimension 1".
Can anyone help me to do face recognition via livestreaming on anvil.

OpenCV video writing decreases FPS drastically. How to optimize performance?

I'm working on a project that involves object detection + sort tracking.
I have scripts to work with videos an camera using OpenCV in the Coral dev board.
The main issue is when make usage of the VideoWriter to save the output of the detections.
For camera script it's usage decreases fps rate from 11 to 2.3 and for video script from 6-7 to 2.
Is there a way to solve/optimize this issue.
Here is my portion of code that grabs frames, detects and tracks, and then writes.
# Read frames
while(video.isOpened()):
# Acquire frame and resize to expected shape [1xHxWx3]
ret, frame = video.read()
if not ret:
break
# Debug info
frame_count += 1
print("[INFO] Processing frame: {}".format(frame_count))
if FLIP:
frame = cv2.flip(frame, 1)
if ROTATE != 0:
frame = cv2.rotate(frame, ROTATE) # Rotate image on given angle
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) # Convert to RGB
frame = cv2.resize(frame, (VIDEO_WIDTH, VIDEO_HEIGHT)) # resize frame to output dims
frame_resized = cv2.resize(frame_rgb, (width, height)) # resize to fit tf model dims
input_data = np.expand_dims(frame_resized, axis=0)
# Normalize pixel values if using a floating model (i.e. if model is non-quantized)
if floating_model:
input_data = (np.float32(input_data) - input_mean) / input_std
# Initialize writer
if (writer is None) and (SAVE_VIDEO) :
writer = cv2.VideoWriter(VIDEO_OUTPUT, cv2.VideoWriter_fourcc(*'XVID'), args.fps, (VIDEO_WIDTH, VIDEO_HEIGHT))
# Perform the actual detection by running the model with the image as input
#s_detection_time = time.time()
interpreter.set_tensor(input_details[0]['index'],input_data)
interpreter.invoke()
#e_detection_time = time.time()
#print("[INFO] Detection time took: {} seconds".format(e_detection_time-s_detection_time))
# Retrieve detection results
boxes = interpreter.get_tensor(output_details[0]['index'])[0] # Bounding box coordinates of detected objects
classes = interpreter.get_tensor(output_details[1]['index'])[0] # Class index of detected objects
scores = interpreter.get_tensor(output_details[2]['index'])[0] # Confidence of detected objects
#num = interpreter.get_tensor(output_details[3]['index'])[0] # Total number of detected objects (inaccurate and not needed)
#print("[INFO] Boxes: {}".format(boxes))
detections = np.array([[]])
#s_detections_loop = time.time()
# Loop over all detections and draw detection box if confidence is above minimum threshold
for i in range(len(scores)):
if ((scores[i] > min_conf_threshold) and (scores[i] <= 1.0)):
#print("[INFO] Box ", i , ": ", boxes[i])
# Get bounding box coordinates and draw box
# Interpreter can return coordinates that are outside of image dimensions, need to force them to be within image using max() and min()
ymin = int(max(1,(boxes[i][0] * VIDEO_HEIGHT)))
xmin = int(max(1,(boxes[i][1] * VIDEO_WIDTH)))
ymax = int(min(VIDEO_HEIGHT,(boxes[i][2] * VIDEO_HEIGHT)))
xmax = int(min(VIDEO_WIDTH,(boxes[i][3] * VIDEO_WIDTH)))
# Calculate centroid of bounding box
#centroid_x = int((xmin + xmax) / 2)
#centroid_y = int((ymin + ymax) / 2)
# Format detection for sort and append to current detections
detection = np.array([[xmin, ymin, xmax, ymax]])
#f.write("Box {}: {}\n".format(i, detection[:4]))
#print("[INFO] Size of detections: ", detections.size)
if detections.size == 0:
detections = detection
else:
detections = np.append(detections, detection, axis=0)
# Draw a circle indicating centroid
#print("[INFO] Centroid of box ", i, ": ", (centroid_x, centroid_y))
#cv2.circle(frame, (centroid_x, centroid_y), 6, (0, 0, 204), -1)
# Calculate area of rectangle
#obj_height = (ymin + ymax)
#print("[INFO] Object height: ", obj_height)
# Check if centroid passes ROI
# Draw the bounding box
#cv2.rectangle(frame, (xmin,ymin), (xmax,ymax), (0, 0, 255), 4)
#print("[INFO] Object passing ROI")
#print("[INFO] Object height: ", obj_height)
#counter += 1
#print("[INFO] Object out of ROI")
# Draw the bounding box
#cv2.rectangle(frame, (xmin,ymin), (xmax,ymax), (10, 255, 0), 4)
#print("[INFO] Total objects counted: ", counter)
# Draw label
"""object_name = labels[int(classes[i])] # Look up object name from "labels" array using class index
label = '%s: %d%%' % (object_name, int(scores[i]*100)) # Example: 'person: 72%'
labelSize, baseLine = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.7, 2) # Get font size
label_ymin = max(ymin, labelSize[1] + 10) # Make sure not to draw label too close to top of window
cv2.rectangle(frame, (xmin, label_ymin-labelSize[1]-10), (xmin+labelSize[0], label_ymin+baseLine-10), (255, 255, 255), cv2.FILLED) # Draw white box to put label text in
cv2.putText(frame, label, (xmin, label_ymin-7), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0), 2) # Draw label text
"""
#f.write("\n")
#e_detection_loop = time.time()
#print("[INFO] Detection loop time took {} seconds".format(e_detection_loop-s_detections_loop))
#s_tracker_update = time.time()
# Update sort tracker
print("[INFO] Current Detections: ", detections.astype(int))
objects_tracked = tracker.update(detections.astype(int))
#e_tracker_update = time.time()
#print("[INFO] Updating trackers state took {} seconds".format(e_tracker_update-s_tracker_update))
#s_draw_tracked = time.time()
# Process every tracked object
for object_tracked in objects_tracked:
if object_tracked.active:
bbox_color = (0, 128, 255)
else:
bbox_color = (10, 255, 0)
bbox = object_tracked.get_state().astype(int)
# Draw the bbox rectangle
cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), bbox_color, 4)
# Calculate centroid of bounding box
centroid = (object_tracked.last_centroid[0], object_tracked.last_centroid[1])
# Draw the centroid
cv2.circle(frame, centroid, 6, (0, 0, 204), -1)
label = '{} [{}]'.format(OBJECT_NAME,object_tracked.id) # Example: 'object [1]'
labelSize, baseLine = cv2.getTextSize(label, FONT, 0.7, 2) # Get font size
label_ymin = max(bbox[1], labelSize[1] + 10) # Make sure not to draw label too close to top of window
cv2.rectangle(frame, (bbox[0], label_ymin-labelSize[1]-10), (bbox[0]+labelSize[0], label_ymin+baseLine-10), (255, 255, 255), cv2.FILLED) # Draw white box to put label text in
cv2.putText(frame, label, (bbox[0], label_ymin-7), FONT, 0.7, (0, 0, 0), 2) # Draw label text
#e_draw_tracked = time.time()
#print("[INFO] Drawing tracked objects took {} seconds".format(e_draw_tracked-s_draw_tracked))
# Update fps count
fps.update()
fps.stop()
# Prepare fps display
fps_label = "FPS: {0:.2f}".format(fps.fps())
cv2.rectangle(frame, (0, 0), (int(VIDEO_WIDTH*0.6), int(VIDEO_HEIGHT*0.07)), (255, 255, 255), cv2.FILLED)
cv2.putText(frame, fps_label, (int(VIDEO_WIDTH*0.01), int(VIDEO_HEIGHT*0.05)), FONT, 1.5, (10, 255, 0), 3)
# Prepare total and active objects count display
total_objects_text = "TOTAL {}S: {}".format(OBJECT_NAME,tracker.total_trackers)
active_objects_text = "ACTIVE {}S: {}".format(OBJECT_NAME,tracker.active_trackers)
cv2.putText(frame, total_objects_text, (int(VIDEO_WIDTH*0.1+VIDEO_WIDTH*0.06), int(VIDEO_HEIGHT*0.05)), FONT, 1.5, (0, 0, 255), 3) # Draw label text
cv2.putText(frame, active_objects_text, (int(VIDEO_WIDTH*0.1+VIDEO_WIDTH*0.27), int(VIDEO_HEIGHT*0.05)), FONT, 1.5, (0, 128, 255), 3) # Draw label text
# Draw horizontal boundaries
cv2.line(frame, (LEFT_BOUNDARY, int(VIDEO_HEIGHT*0.07)), (LEFT_BOUNDARY, VIDEO_HEIGHT), (0, 255, 255), 4)
#cv2.line(frame, (RIGHT_BOUNDARY, 0), (RIGHT_BOUNDARY, VIDEO_HEIGHT), (0, 255, 255), 4)
#s_trackers_state = time.time()
tracker.update_trackers_state()
#e_trackers_state = time.time()
#print("[INFO] Updating trackers state took {} seconds".format(e_trackers_state-s_trackers_state))
# All the results have been drawn on the frame, so it's time to display it.
cv2.imshow('Object detector', frame)
# Center window
if not IS_CENTERED:
cv2.moveWindow('Object detector', 0, 0)
IS_CENTERED = True
if SAVE_VIDEO:
writer.write(frame)
print("\n\n")
# Press 'q' to quit
if cv2.waitKey(1) == ord('q'):
break
Thanks in advance for any help!
An important thing when trying to optimize/improve code performance is to categorize and measure your code execution. Only after identifying what is actually causing the bottleneck or performance decrease then can you can work on improving those sections of code. For this approach I'm assuming that you're both reading and saving frames in the same thread. So if you're facing a performance reduction due to I/O latency then this method can help otherwise if you find out that your problem is due to CPU processing limitations then this method will not give you a performance boost.
That being said, the approach is to use threading. The idea is to create another separate thread for obtaining the frames as cv2.VideoCapture.read() is blocking. This can be expensive and cause latency as the main thread has to wait until it has obtained a frame. By putting this operation into a separate thread that just focuses on grabbing frames and processing/saving the frames in the main thread, it dramatically improves performance due to I/O latency reduction. Here's a simple example on how to use threading to read frames in one thread and show/save frames in the main thread. Be sure to change the capture_src to your stream.
Code
from threading import Thread
import cv2
class VideoWritingThreading(object):
def __init__(self, src=0):
# Create a VideoCapture object
self.capture = cv2.VideoCapture(src)
# Default resolutions of the frame are obtained (system dependent)
self.frame_width = int(self.capture.get(3))
self.frame_height = int(self.capture.get(4))
# Set up codec and output video settings
self.codec = cv2.VideoWriter_fourcc('M','J','P','G')
self.output_video = cv2.VideoWriter('output.avi', self.codec, 30, (self.frame_width, self.frame_height))
# Start the thread to read frames from the video stream
self.thread = Thread(target=self.update, args=())
self.thread.daemon = True
self.thread.start()
def update(self):
# Read the next frame from the stream in a different thread
while True:
if self.capture.isOpened():
(self.status, self.frame) = self.capture.read()
def show_frame(self):
# Display frames in main program
if self.status:
cv2.imshow('frame', self.frame)
# Press Q on keyboard to stop recording
key = cv2.waitKey(1)
if key == ord('q'):
self.capture.release()
self.output_video.release()
cv2.destroyAllWindows()
exit(1)
def save_frame(self):
# Save obtained frame into video output file
self.output_video.write(self.frame)
if __name__ == '__main__':
capture_src = 'your stream link!'
video_writing = VideoWritingThreading(capture_src)
while True:
try:
video_writing.show_frame()
video_writing.save_frame()
except AttributeError:
pass

OpenCV waitKey in python doesn't work on mac

I'm writing code to face recognition in python and i'm using open cv on mac (PyCharm).
I don't understand why:
if cv2.waitKey(1) & 0xFF == ord('q'):
break
doesn't work on mac, but on windows this code work.
In particular q doesn't trigger the if.
I try to change with:
k = cv2.waitKey(0)
if k == 27:
break
Below write my code
def run(self):
video_capture = cv2.VideoCapture(0)
try:
while self.active:
# Grab a single frame of video
ret, frame = video_capture.read()
# Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
rgb_frame = frame[:, :, ::-1]
# Find all the faces and face enqcodings in the frame of video
face_locations = face_recognition.face_locations(rgb_frame)
face_encodings = face_recognition.face_encodings(rgb_frame, face_locations)
# Loop through each face in this frame of video
for (top, right, bottom, left), face_encoding in zip(face_locations, face_encodings):
# See if the face is a match for the known face(s)
matches = face_recognition.compare_faces(self.known_face_encodings, face_encoding)
name = "Unknown"
# If a match was found in known_face_encodings, just use the first one.
if True in matches:
first_match_index = matches.index(True)
name = self.known_face_names[first_match_index]
# Draw a box around the face
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
# Draw a label with a name below the face
cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)
if (name != "Unknown"):
self.reco.emit(name)
self.deactivate()
video_capture.release()
cv2.destroyAllWindows()
else:
self.unreco.emit()
self.deactivate()
video_capture.release()
cv2.destroyAllWindows()
# Display the resulting image
# cv2.imshow('Video', frame)
# Hit 'q' on the keyboard to quit!
if cv2.waitKey(1) & 0xFF == ord('q'):
break
except:
print("error while recognize")

OpenCV 3 Python 3 calibrate camera for radial distortion gives error

I have a problem while running a function in Python 3 with OpenCV 3, I want to calibrate a video with a calibration data .npz and it results in an error message on remapping matrix line.
The calibrated video output is great but it came with an error message, which prevents any further execution. How could I make the script ignore the error and continue?
Any answer will be greatly appreciated.
This is the code:
import cv2, time, sys
import numpy as np
import matplotlib.pyplot as plt
filename = 'vtest3.mp4'
filename_2 = 'vtest3_undistort.mp4'
crop = 0.5
#FIRST FUNCTION (UNDISTORTION)
def undistortion(filename):
print('Loading data files')
npz_calib_file = np.load('calibration_data.npz')
distCoeff = npz_calib_file['distCoeff']
intrinsic_matrix = npz_calib_file['intrinsic_matrix']
npz_calib_file.close()
print('Finished loading files')
print(' ')
print('Starting to undistort the video....')
# Opens the video import and sets parameters
video = cv2.VideoCapture(filename)
# Checks to see if a the video was properly imported
status = video.isOpened()
if status == True:
FPS = video.get(cv2.CAP_PROP_FPS)
width = video.get(cv2.CAP_PROP_FRAME_WIDTH)
height = video.get(cv2.CAP_PROP_FRAME_HEIGHT)
size = (int(width), int(height))
total_frames = video.get(cv2.CAP_PROP_FRAME_COUNT)
frame_lapse = (1 / FPS) * 1000
# Initializes the export video file
codec = cv2.VideoWriter_fourcc(*'DIVX')
video_out = cv2.VideoWriter(str(filename[:-4]) + '_undistort.mp4', codec, FPS, size, 1)
# Initializes the frame counter
current_frame = 0
newMat, ROI = cv2.getOptimalNewCameraMatrix(intrinsic_matrix, distCoeff, size, alpha=crop,
centerPrincipalPoint=1)
mapx, mapy = cv2.initUndistortRectifyMap(intrinsic_matrix, distCoeff, None, newMat, size, m1type=cv2.CV_32FC1)
while current_frame < total_frames:
success, image = video.read()
current_frame = video.get(cv2.CAP_PROP_POS_FRAMES)
dst = cv2.remap(image, mapx, mapy, cv2.INTER_LINEAR)
# dst = cv2.undistort(image, intrinsic_matrix, distCoeff, None)
video_out.write(dst)
video.release()
video_out.release()
else:
print('Error: Could not load video')
sys.exit()
#SECOND FUNCTION
def hauptprogramm(filename_2):
# Create an empty List for x- and y-Positions that will be filled in for-loop
# with the .append function
xposition = [] # Empty list for x-Positions
yposition = [] # Empty list for y-Positions
# Open the Video with die VideoCapture function
# Name the video that is opened cap
cap = cv2.VideoCapture(filename_2)
# set the framenumber to the last frame just to take a picture
cap.set(1, cap.get(cv2.CAP_PROP_FRAME_COUNT)); # Where frame_no is the frame you want
ret, frame = cap.read() # Read the frame
cv2.imwrite('kurve.jpg', frame)
# set the framenumber to 1, which is the first frame of the video
# from here we begin to analyse the video
cap.set(1, 1);
# cv2.imshow('window_name', frame) # show frame on window
# pos_frame = cap.get(cv2.CAP_PROP_POS_FRAMES)
# start an infinite while loop by setting while True
# the loop will go on until we break it
while True:
# read a frame of the video and name it frame0
# read the next frame of the video and name it frame1
# set both frames retangle by writing ret,frame
ret, frame0 = cap.read()
ret, frame1 = cap.read()
ret, frame2 = cap.read()
# the program will lateron compare every pixel from frame0 to frame1 and
# locate the areas where many pixels have changed
# we only are interested in the area where the missile is flying
# the moving of the catapult itself for exemple would cause trouble
# so we paint lines over the areas we are not interested in these lines
# will be painted every loop over frame1
cv2.line(frame1, (0, 0), (0, 511), (255, 0, 0), 200)
cv2.line(frame1, (0, 500), (1000, 500), (255, 0, 0), 350)
cv2.line(frame1, (0, 350), (150, 350), (255, 0, 0), 400)
# the same lines will be painted over frame2
cv2.line(frame2, (0, 0), (0, 511), (255, 0, 0), 200)
cv2.line(frame2, (0, 500), (1000, 500), (255, 0, 0), 350)
cv2.line(frame2, (0, 350), (150, 350), (255, 0, 0), 400)
# for the function that will compare frame1 and frame2 both frames have
# to be in black and white
# the next function will be used to convert frame1 and frame2 to black and
# white and will name them gray1 and gray2
gray1 = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)
gray2 = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)
# the absdiff function will compare the gray1 and gray2 images and returns
# an image named diffimage
diffimage = cv2.absdiff(gray1, gray2)
# the result of that function returns a black and white image
# to get clear results the image will be converted to a binary image
# the new image called diffimage only has pixels that are white or black
ret, threshold = cv2.threshold(diffimage, 30, 255, cv2.THRESH_BINARY)
# to reduce the image noise we will make the threshold image blurry by using
# the blur function
threshold = cv2.blur(threshold, (25, 25))
# when converting the blurry image into a threshold image for a secound
# time the small noisy pixels of the image will be gone and the areas where
# a lot of pixels have changed in will be clearer and bigger
ret, threshold = cv2.threshold(threshold, 30, 255, cv2.THRESH_BINARY)
# ========================find the white areas ==================
# now we want to find the white areas in the picture that show
# our flying missile
# !exakte funktion noch nicht verstanden
image, contours, hierarchy = cv2.findContours(threshold.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# !exakte funktion noch nicht verstanden
for cnt in contours:
area = cv2.contourArea(cnt)
# to reduce the chance of tracking background noise that might be left
# even after reducing it with the blur and threshold functions
# we only track the position of if the white area is bigger than x=10
if area > 10:
# we are looking for the bounding rectangle that is around our area
# x and y equal the pixelposition of the top right point of the rectangle
# w and h equal the width and height of the rectangle
x, y, w, h = cv2.boundingRect(cnt)
# we are interested in the center of the rectangle so we add w/2 to x
# and h/2 to y
# because the x- and y-positions have to be integer we round the values
# and declare them as such
xpos = int(round(x + w / 2))
ypos = int(round(y + h / 2))
# to see if everything works fine wile running the program the positions
# can be printed in the console by removing the hashtag
# print("x= ", xpos)
# print("y= ", ypos)
# now the list of x- and y-positions will be extended by every loop
# with these list of positions we can work later on for the
# post-processing of the curve the object is flying
xposition.append(xpos)
yposition.append(ypos)
# not necessary to post-process but a nice visualisation and checking
# if everything works as it is supposed to
# a small circle will be painted on the x- and y-position
# therefore we take the "empty" frame0 that does not have the colored
# lines painted into
cv2.circle(frame0, (xpos, ypos), 5, (0, 255, 0), -1)
# a rectangle will be surrounding the center just to keep track of the
# moving object
cv2.rectangle(frame0, (x, y), (x + w, y + h), (255, 0, 0), 3)
# write the x- and y- position in each frame
cv2.putText(frame0, 'x = ' + str(xpos), (xpos - 20, ypos - 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
(0, 0, 255), 2, cv2.LINE_AA)
cv2.putText(frame0, 'y = ' + str(ypos), (xpos - 20, ypos + 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
(0, 0, 255), 2, cv2.LINE_AA)
# x-Positionen in Konsole mitschreiben
# print(xposition)
# ========================= Open Windows ====================
# the imshow functions open a window and show the selected frame each
# while loop (play the video)
# by removing the hashtag the selected video will open when running the
# program
# 1. Show the original frames and paint the previous visulisation
cv2.imshow('Orginal mit Verfolgung', frame0)
# 2. Show the threshold frames
cv2.imshow('threshold frames', threshold)
# 3. Show the painted lines in frame1 and frame2 (same parameters)
cv2.imshow('mit Balken', frame1)
# if we press the ESC button the loop will break
if cv2.waitKey(10) == 27: # mit ESC verlassen
break
# if we reach the total number of frames the loop will break
if cap.get(cv2.CAP_PROP_POS_FRAMES) == cap.get(cv2.CAP_PROP_FRAME_COUNT):
# If the number of captured frames is equal to the total number of frames,
# we stop
break
# after we break the loop we release the cap and force all windows to close
cap.release()
cv2.destroyAllWindows()
# does not make sense, just a way to fix a bug in opencv
cv2.waitKey(1)
cv2.waitKey(1)
cv2.waitKey(1)
cv2.waitKey(1)
cv2.waitKey(1)
cv2.waitKey(1)
cv2.waitKey(1)
cv2.waitKey(1)
cv2.waitKey(1)
cv2.waitKey(1)
cv2.waitKey(1)
cv2.waitKey(100)
# =================== Spielerei ====================
# cap2 = cv2.VideoCapture("vtest3.m4v")
# while True:
# cap2.set(1,cv2.CAP_PROP_FRAME_COUNT)
# ret, frame = cap.read()
# cv2.imshow('window_name', frame)
#
# if cv2.waitKey(10) == 27: # mit ESC verlassen
# break
#
# cap.release()
# cv2.destroyAllWindows()
# cv2.waitKey(1)
# cv2.waitKey(1)
# cv2.waitKey(1)
# cv2.waitKey(1)
# cv2.waitKey(1)
# cv2.waitKey(1)
# cv2.waitKey(1)
# ========== Plot öffnen und speichern ===================
plt.plot(np.asarray(xposition), np.asarray(yposition), 'g^')
plt.axis('equal')
plt.savefig('positionen.png')
# ============== Image lesen und auswerten ===============
image = cv2.imread('kurve.jpg')
for i in range(0, len(xposition)):
cv2.circle(image, (xposition[i], yposition[i]), 5, (0, 255, 0), -1)
cv2.imwrite('positionen2.png', image)
undistortion(filename)
hauptprogramm(filename_2)

How to obtain time markers for video splitting using python/OpenCV

I'm working on my MSc project which is researching automated deletion of low value content in digital file stores. I'm specifically looking at the sort of long shots that often occur in natural history filming whereby a static camera is left rolling in order to capture the rare snow leopard or whatever. These shots may only have some 60s of useful content with perhaps several hours of worthless content either side.
As a first step I have a simple motion detection program from Adrian Rosebrock's tutorial [http://www.pyimagesearch.com/2015/05/25/basic-motion-detection-and-tracking-with-python-and-opencv/#comment-393376]. Next I intend to use FFMPEG to split the video.
What I would like help with is how to get in and out points based on the first and last points that motion is detected in the video.
Here is the code should you wish to see it...
# import the necessary packages
import argparse
import datetime
import imutils
import time
import cv2
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video", help="path to the video file")
ap.add_argument("-a", "--min-area", type=int, default=500, help="minimum area size")
args = vars(ap.parse_args())
# if the video argument is None, then we are reading from webcam
if args.get("video", None) is None:
camera = cv2.VideoCapture(0)
time.sleep(0.25)
# otherwise, we are reading from a video file
else:
camera = cv2.VideoCapture(args["video"])
# initialize the first frame in the video stream
firstFrame = None
# loop over the frames of the video
while True:
# grab the current frame and initialize the occupied/unoccupied
# text
(grabbed, frame) = camera.read()
text = "Unoccupied"
# if the frame could not be grabbed, then we have reached the end
# of the video
if not grabbed:
break
# resize the frame, convert it to grayscale, and blur it
frame = imutils.resize(frame, width=500)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (21, 21), 0)
# if the first frame is None, initialize it
if firstFrame is None:
firstFrame = gray
continue
# compute the absolute difference between the current frame and
# first frame
frameDelta = cv2.absdiff(firstFrame, gray)
thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1]
# dilate the thresholded image to fill in holes, then find contours
# on thresholded image
thresh = cv2.dilate(thresh, None, iterations=2)
(_, cnts, _) = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# loop over the contours
for c in cnts:
# if the contour is too small, ignore it
if cv2.contourArea(c) < args["min_area"]:
continue
# compute the bounding box for the contour, draw it on the frame,
# and update the text
(x, y, w, h) = cv2.boundingRect(c)
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
text = "Occupied"
# draw the text and timestamp on the frame
cv2.putText(frame, "Room Status: {}".format(text), (10, 20),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
cv2.putText(frame, datetime.datetime.now().strftime("%A %d %B %Y %I:%M:%S%p"),
(10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1)
# show the frame and record if the user presses a key
cv2.imshow("Security Feed", frame)
cv2.imshow("Thresh", thresh)
cv2.imshow("Frame Delta", frameDelta)
key = cv2.waitKey(1) & 0xFF
# if the `q` key is pressed, break from the lop
if key == ord("q"):
break
# cleanup the camera and close any open windows
camera.release()
cv2.destroyAllWindows()

Categories

Resources