failing to save video after applying saliency - python

I am attempting to save the output of the code proposed here: https://www.programmersought.com/article/34317272452/
The output video is created, but when I try to reproduce the result, there is this message: 'an error occurred could not demultiplex stream'. Also, I have tried several forms to save the video; for example, I have used the shape of the frames, but it neither recognizes them. Could anyone help to figure out where I am doing wrong?
def read_video(video):
# Saliency detection algorithm
saliency_algorithm = "BinWangApr2014"
start_frame = 0
if saliency_algorithm is None or video is None:
print('Please set saliency_algorithm and video')
return
cap = cv2.VideoCapture(video)
video = cv2.VideoWriter('out_j.avi',cv2.VideoWriter_fourcc(*'XVID'), 5, (360,360))
# Set the video start frame
cap.set(cv2.CAP_PROP_POS_FRAMES, start_frame)
_, frame = cap.read()
if frame is None:
print('Please set saliency_algorithm and video')
return
image = frame.copy()
if saliency_algorithm.find("BinWangApr2014")==0:
saliency_algorithm = cv2.saliency.MotionSaliencyBinWangApr2014_create()
# set the size of the data structure
saliency_algorithm.setImagesize(image.shape[1], image.shape[0])
# Initialization
saliency_algorithm.init()
paused = False
while True:
if not paused:
_, frame = cap.read()
if frame is None:
break
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
start = cv2.getTickCount()
success, saliencyMap = saliency_algorithm.computeSaliency(frame)
duration = (cv2.getCPUTickCount() - start)/ \
cv2.getTickFrequency()
#print("computeBinaryMap cost time is: {} ms".format(duration * 1000))
video.write(frame)
#cv2.imshow('image', frame)
cv2.imshow('saliencyMap', saliencyMap*255)
c = cv2.waitKey(2)
c = chr(c) if c !=-1 else 0
if c == 'q':
break
if c == 'p':
paused = not paused
#video.write(frame)
cv2.destroyAllWindows()
video.release()
#camera.stop()
return
Thanks in advance

Related

I want to print the rgb values of frames of a video in python

I want to print the RGB values of frames of a video in python. I tried writing a code in python but the for loop where we print the newframe goes into an infinite loop. Guide me on how shall I overcome this, as an output it should print the RGB value of each pixel. I have attached the code below.
import numpy
import numpy as np
import cv2 as cv
count=0
def messageToBinary(message):
if type(message) == str:
return ''.join([ format(ord(i), "08b") for i in message ])
elif type(message) == bytes or type(message) == np.ndarray:
return [ format(i, "08b") for i in message ]
elif type(message) == int or type(message) == np.uint8:
return format(message, "08b")
else:
raise TypeError("Input type not supported")
vidcap = cv.VideoCapture("video.mp4")
if not vidcap.isOpened():
print("Cannot open")
exit()
while True:
# Capture frame-by-frame
ret, frame = vidcap.read()
# -------------------------------------------------------------> step 2 - split
# if frame is read correctly ret is True
if not ret:
print("Can't receive frame (stream end?). Exiting ...")
break
# Our operations on the frame come here
gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
# Display the resulting frame
cv.imshow('frame', gray)
width, height, d= frame.shape
print("reshaped...")
row=int(width*height)
newframe = frame.reshape(row,3)
# print(newframe)
# print(newframe[i][j])
#the loop below is going into infinite looping
for i in range(0,row):
for j in range(0,3):
print(newframe[i][j])
if cv.waitKey(1) == ord('q'):
break
# When everything is done, release the capture
vidcap.release()
cv.destroyAllWindows()

yolov5 reduce the frame rate while using the camera as a source?

I am working on a project to predict the pedestrian path using a history and so on so i am using this paper and trying to run and implement the missing annotation in this
https://github.com/JunweiLiang/Multiverse/blob/master/SimAug
so i used yolov5 to solve this issue but i found that the fps is 30 and it takes a long time to process all these frames so i wanted to decrease the fps so for example it take only 2 frames per second and run the pipeline
(detection --> tracking) --> segmentation --> prediction and so on.
so basically here is what i did
import numpy as np
import cv2
import datetime
import queue
from threading import Thread
# global variables
stop_thread = False # controls thread execution
def start_capture_thread(cap, queue):
# global stop_thread
i=0
# continuously read fames from the camera
while True:
_, img = cap.read()
queue.put(img)
# cv2.imwrite('Images/frame{:d}.jpg'.format(i), img)
i=i+1
# if (stop_thread):
# break
def main():
global stop_thread
# create display window
cv2.namedWindow("webcam", cv2.WINDOW_NORMAL)
# initialize webcam capture object
cap = cv2.VideoCapture(0)
#cap = cv2.VideoCapture(0 + cv2.CAP_DSHOW)
# retrieve properties of the capture object
cap_width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
cap_height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
cap_fps = cap.get(cv2.CAP_PROP_FPS)
print('* Capture width:', cap_width)
print('* Capture height:', cap_height)
print('* Capture FPS:', cap_fps)
# create a queue
frames_queue = queue.Queue(maxsize=0)
# start the capture thread: reads frames from the camera (non-stop) and stores the result in img
t = Thread(target=start_capture_thread, args=(cap, frames_queue,), daemon=True) # a deamon thread is killed when the application exits
t.start()
# initialize time and frame count variables
last_time = datetime.datetime.now()
frames = 0
cur_fps = 0
i=0
while (True):
if (frames_queue.empty()):
continue
if i%5 !=0 :
_ = frames_queue.get()
i+=1
continue
# blocks until the entire frame is read
frames += 1
# # measure runtime: current_time - last_time
# delta_time = datetime.datetime.now() - last_time
# elapsed_time = delta_time.total_seconds()
# # compute fps but avoid division by zero
# if (elapsed_time != 0):
# cur_fps = np.around(frames / elapsed_time, 1)
# retrieve an image from the queue
img = frames_queue.get()
cv2.imwrite('Images/frame{:d}.jpg'.format(i), img)
i+=1
# for i in range(10):
# _ = frames_queue.get()
# i+=1
# TODO: process the image here if needed
# draw FPS text and display image
# if (img is not None):
# cv2.putText(img, 'FPS: ' + str(cur_fps), (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 2, cv2.LINE_AA)
# cv2.imshow("webcam", img)
# wait 1ms for ESC to be pressed
key = cv2.waitKey(1)
if (key == 27):
stop_thread = True
break
# release resources
cv2.destroyAllWindows()
cap.release()
if __name__ == "__main__":
main()
and it works so i want to use this in the track.py in yolov5 instead of the
dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt and not jit)
but i can't modify it as it used in the code like this
for frame_idx, (path, img, im0s, vid_cap, s) in enumerate(dataset):
but i couldn't full understand this line
so i wanted to modify the LoadStreams function in here https://github.com/ultralytics/yolov5/blob/master/utils/datasets.py but i am kinda stuck here

Opencv fails to save video when cropped using numpy slicing

I'm using OpenCV (3.4.6) to capture video from a Raspberry Pi HQ camera, attached to a Raspberry Pi 4 running Raspbian 10. Bounding boxes suggested by an object detection algorithm are then drawn onto the captured frames, and the output saved to a video file.
Everything works great, except when I try to crop the video to my region of interest.
Using the following line successfully crops the image (as confirmed by cv2.imshow):
frame = frame[0:720, 280:1000]
But, it also stops the output being saved by out.write - I just get an empty file, and being new to both Python and OpenCV, I'm struggling to work out why.
I've trimmed down the code to the relevant part below (apologies - it's still a bit long - I'm not entirely sure what might be important):
# begin video capture
try:
vid = cv2.VideoCapture(int(video_path), cv2.CAP_V4L2)
except:
vid = cv2.VideoCapture(video_path)
out = None
# set capture resolution
inputcodec = cv2.VideoWriter_fourcc('M', 'J', 'P', 'G') # forces MJPG codec
vid.set(6, inputcodec) # sets inputcodec for video capture
vid.set(3, 1280) # sets video capture width
vid.set(4, 720) # sets video capture height
if FLAGS.output:
# by default VideoCapture returns float instead of int
width = int(vid.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = int(vid.get(cv2.CAP_PROP_FPS))
codec = cv2.VideoWriter_fourcc(*FLAGS.output_format)
out = cv2.VideoWriter(FLAGS.output, codec, fps, (width, height))
while True:
return_value, frame = vid.read()
frame = frame[0:720, 280:1000] # crop video to square region of interest
if return_value:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
image = Image.fromarray(frame)
else:
print('Video has ended or failed, try a different video format!')
break
frame_size = frame.shape[:2]
image_data = cv2.resize(frame, (input_size, input_size))
image_data = image_data / 255.
image_data = image_data[np.newaxis, ...].astype(np.float32)
start_time = time.time()
if FLAGS.framework == 'tflite':
interpreter.set_tensor(input_details[0]['index'], image_data)
interpreter.invoke()
pred = [interpreter.get_tensor(output_details[i]['index']) for i in range(len(output_details))]
if FLAGS.model == 'yolov3' and FLAGS.tiny == True:
boxes, pred_conf = filter_boxes(pred[1], pred[0], score_threshold=0.25,
input_shape=tf.constant([input_size, input_size]))
else:
boxes, pred_conf = filter_boxes(pred[0], pred[1], score_threshold=0.25,
input_shape=tf.constant([input_size, input_size]))
else:
batch_data = tf.constant(image_data)
pred_bbox = infer(batch_data)
for key, value in pred_bbox.items():
boxes = value[:, :, 0:4]
pred_conf = value[:, :, 4:]
boxes, scores, classes, valid_detections = tf.image.combined_non_max_suppression(
boxes=tf.reshape(boxes, (tf.shape(boxes)[0], -1, 1, 4)),
scores=tf.reshape(
pred_conf, (tf.shape(pred_conf)[0], -1, tf.shape(pred_conf)[-1])),
max_output_size_per_class=50,
max_total_size=50,
iou_threshold=FLAGS.iou,
score_threshold=FLAGS.score
)
pred_bbox = [boxes.numpy(), scores.numpy(), classes.numpy(), valid_detections.numpy()]
image = utils.draw_bbox(frame, pred_bbox)
fps = 1.0 / (time.time() - start_time)
print("FPS: %.2f" % fps)
result = np.asarray(image)
cv2.namedWindow("result", cv2.WINDOW_AUTOSIZE)
result = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
if not FLAGS.dont_show:
cv2.imshow("result", result)
if FLAGS.output:
out.write(result)
if cv2.waitKey(1) & 0xFF == ord('q'): break
cv2.destroyAllWindows()
Any suggestions for how to resolve this would be greatly appreciated.
You created the VideoWriter and set its width and height to 1280 by 720. Later you told it to save 720 by 720 crops. That is the problem. frame = frame[0:720, 280:1000] is a 720 by 720 array.
Decide what dimensions your video should have, and use them consistently. A video stream can't change resolution.

GluonCV object detector - inference on batches of images

I'm running object detection on a webcam stream or on video files, using Python on Ubuntu18.04. So far, I've been running the inference frame-by-frame, using this code:
def main():
ctx = mx.gpu(0)
# Load pretrained model
net = gcv.model_zoo.get_model('ssd_512_mobilenet1.0_coco', pretrained=True)
net.hybridize()
# Load the webcam handler
cap = cv2.VideoCapture(0)
count_frame = 0
while(True):
print(f"Frame: {count_frame}")
# Load frame from the camera
ret, frame = cap.read()
# print(type(frame))
if (cv2.waitKey(25) & 0xFF == ord('q')) or (ret == False):
cv2.destroyAllWindows()
cap.release()
print("Done!!!")
break
# Image pre-processing
frame = mx.nd.array(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)).astype('uint8')
frame_nd, frame_np = gcv.data.transforms.presets.ssd.transform_test(frame, short=512, max_size=700)
# Run frame through network
frame_nd = frame_nd.as_in_context(ctx)
class_IDs, scores, bounding_boxes = net(frame_nd)
# Display result with cv
img = gcv.utils.viz.cv_plot_bbox(frame_np, bounding_boxes[0], scores[0], class_IDs[0], thresh=0.3, class_names=net.classes)
gcv.utils.viz.cv_plot_image(img)
count_frame += 1
cv2.destroyAllWindows()
cap.release()
if __name__ == "__main__":
main()
I wanted to try an alternative version, where I don't perform the detection frame-by-frame, but on batches of frames.
I tried this way:
create an empty list at the beginning;
append every frame (after image pre-processing) to the list;
after N frames (say, N=50), convert the list to a mx.nd.array;
feed said array to the model;
so, with this code:
def main():
ctx = mx.gpu(0)
# Load a pretrained model
net = gcv.model_zoo.get_model('ssd_512_mobilenet1.0_coco', pretrained=True)
net.hybridize()
# Load the webcam handler
cap = cv2.VideoCapture(0)
count_frame = 0
batch = []
while(True):
print(f"Frame: {count_frame}")
# Load frame from the camera
ret, frame = cap.read()
if (cv2.waitKey(25) & 0xFF == ord('q')) or (ret == False):
cv2.destroyAllWindows()
cap.release()
print("Done!!!")
break
# Image pre-processing
frame = mx.nd.array(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)).astype('uint8')
frame_nd, frame_np = gcv.data.transforms.presets.ssd.transform_test(frame, short=512, max_size=700)
batch.append(frame_nd)
if (count_frame>0) and (count_frame%50 == 0):
print("\tStarting detection.")
batch_nd = mx.nd.array(batch)
batch_nd = batch_nd.as_in_context(ctx)
class_IDs, scores, bounding_boxes = net(batch)
print("\tDetection performed.")
count_frame += 1
cv2.destroyAllWindows()
cap.release()
if __name__ == "__main__":
main()
The problem is that, when I run it, the execution gets completely stuck when reaching the line:
batch_nd = mx.nd.array(batch)
for reference, this is the output:
Frame: 0
Frame: 1
Frame: 2
Frame: 3
Frame: 4
Frame: 5
Frame: 6
Frame: 7
Frame: 8
Frame: 9
Frame: 10
Frame: 11
Frame: 12
Frame: 13
Frame: 14
Frame: 15
Frame: 16
Frame: 17
Frame: 18
Frame: 19
Frame: 20
Starting detection.
any clue what I'm doing wrong? Is there any better way to send batches of frames to the model?

Speed up the creation of a circular ROI on a 4K 25 fps video in python opencv

I am working with a 2h-long 4K MP4 video shot at 25 fps. I am using openCV in python 3. From the video, I need to extract 3 circular ROIs.
Because of the large number of frames (212831), processing the whole video frame by frame with the code below takes over 24h on a 64 GB RAM workstation. Is there a way to speed up the processing or a workaround that does something slightly different to a similar effect?
Here is the code.
import numpy as np
import cv2
import sys
import time
# name of source video and paths
video = 'C0023_2hanalysis'
input_vidpath = 'path_to_video/' + video + '.MP4'
output_vidpath = 'path_to_video/' + video + '-withROI.MP4'
codec = 'avc1'
# set ROI coordinates extrapolated from last video frame as well as fixed parameters for analysis (radius)
x = 1188 # in pixels
y = 1204 # in pixels
radius = 75
# set parameters for output video as identical to input
fps = 25.0
scaling = 1.0 # output vs input video speed (?)
## import video
cap = cv2.VideoCapture(input_vidpath)
if cap.isOpened() == False:
sys.exit('Video file cannot be read! Please check input_vidpath to ensure it is correctly pointing to the video file')
## Video writer class to output video
fourcc = cv2.VideoWriter_fourcc(*codec) # concatenate the 4 chars to a fourcc code, i.e. the 4-char name of the codec used to compress the frames
# adjust output frame size to scaling if any is applied
#(frame shape is given as height,width , so the output needs to be re-ordered to match VideoWriter arguments)
o_height = cap.read()[1].shape[0]
o_width = cap.read()[1].shape[1]
output_framesize = (int(o_width*scaling),int(o_height*scaling))
out = cv2.VideoWriter(filename = output_vidpath, fourcc = 0x7634706d, fps = fps, frameSize = output_framesize, isColor = True)
## apply ROI frame by frame and thread them back into output video
start = time.time()
f = -1
last = 0
while(True):
# Capture frame-by-frame
ret, frame = cap.read() #'return' value (T/F) and frame
this = cap.get(1) # get 'CV_CAP_PROP_POS_FRAMES'
if ret == True:
#frame = cv2.resize(frame, None, fx = scaling, fy = scaling, interpolation = cv2.INTER_LINEAR) # no need to resize in this case
# Apply mask to area of interest
mask = np.zeros((o_height,o_width), np.uint8)
mask = cv2.circle(mask,(x,y),radius,255,thickness=-1) #image, row and column coord of centre of circle, radius, color (black), thickness
frame[mask == 0] = 0
out.write(frame)
key = cv2.waitKey(1) & 0xFF
# if the `q` key was pressed, break from the loop
if key == ord("q"):
break
f += 1
if f%1000==0:
print(f)
if last == this:
break
last = this
## When everything done, release the capture
cap.release()
out.release()
cv2.destroyAllWindows()
cv2.waitKey(1)
## End time and duration
end = time.time()
duration = end - start
print("--- %s seconds ---" %duration)
This is a common mistake. You shouldn't call waitKey(1) when you want to process the frames as fast possible. That function is basically add a short sleep after processing each frame, and that sleep time is much longer that processing time.
You just need to remove that, and still you can just kill the process if want to stop in the middle.
import numpy as np
import cv2
import sys
import time
# name of source video and paths
video = 'C0023_2hanalysis'
input_vidpath = 'path_to_video/' + video + '.MP4'
output_vidpath = 'path_to_video/' + video + '-withROI.MP4'
codec = 'avc1'
# set ROI coordinates extrapolated from last video frame as well as fixed parameters for analysis (radius)
x = 1188 # in pixels
y = 1204 # in pixels
radius = 75
# set parameters for output video as identical to input
fps = 25.0
scaling = 1.0 # output vs input video speed (?)
## import video
cap = cv2.VideoCapture(input_vidpath)
if cap.isOpened() == False:
sys.exit('Video file cannot be read! Please check input_vidpath to ensure it is correctly pointing to the video file')
## Video writer class to output video
fourcc = cv2.VideoWriter_fourcc(*codec) # concatenate the 4 chars to a fourcc code, i.e. the 4-char name of the codec used to compress the frames
# adjust output frame size to scaling if any is applied
#(frame shape is given as height,width , so the output needs to be re-ordered to match VideoWriter arguments)
o_height = cap.read()[1].shape[0]
o_width = cap.read()[1].shape[1]
output_framesize = (int(o_width*scaling),int(o_height*scaling))
out = cv2.VideoWriter(filename = output_vidpath, fourcc = 0x7634706d, fps = fps, frameSize = output_framesize, isColor = True)
## apply ROI frame by frame and thread them back into output video
start = time.time()
f = -1
last = 0
while(True):
# Capture frame-by-frame
ret, frame = cap.read() #'return' value (T/F) and frame
this = cap.get(1) # get 'CV_CAP_PROP_POS_FRAMES'
if ret == True:
#frame = cv2.resize(frame, None, fx = scaling, fy = scaling, interpolation = cv2.INTER_LINEAR) # no need to resize in this case
# Apply mask to area of interest
mask = np.zeros((o_height,o_width), np.uint8)
mask = cv2.circle(mask,(x,y),radius,255,thickness=-1) #image, row and column coord of centre of circle, radius, color (black), thickness
frame[mask == 0] = 0
out.write(frame)
f += 1
if f%1000==0:
print(f)
if last == this:
break
last = this
## When everything done, release the capture
cap.release()
out.release()
cv2.destroyAllWindows()
cv2.waitKey(1)
## End time and duration
end = time.time()
duration = end - start
print("--- %s seconds ---" %duration)

Categories

Resources