I'm using OpenCV and RTSP source to continuously record video from the RTSP source for 1 minutes.
And then start the process again and create new one.
Recording works fine for a while and then the program would only create files that are 6kb in size that are not playable.
import numpy as np
import cv2
import time
def get_output(out=None):
#Specify the path and name of the video file as well as the encoding, fps and resolution
if out:
out.release()
return cv2.VideoWriter(str(time.strftime('%d %m %Y - %H %M %S' )) + '.avi', cv2.VideoWriter_fourcc(*'MJPG'),
24, (320,240))
cap = cv2.VideoCapture("rtsp://wowzaec2demo.streamlock.net/vod/mp4:BigBuckBunny_115k.mp4")
next_time = time.time() + 60
out = get_output()
while True:
if time.time() > next_time:
next_time += 60
out = get_output(out)
# Capture frame-by-frame
ret, frame = cap.read()
if ret:
out.write(frame)
cv2.imshow('Frame', frame)
# Break the loop
else:
break
cap.release()
cv2.destroyAllWindows()
I am reading an rtsp(local rtsp link) stream from my cctv camera connected on LAN.
My Main Goal :-
I want to perform some processing on the frames and want to display via m3u8 in real time or nearly real time so that i can display in the frontend using hls.js.
Currently i am trying to create video in realtime so that using ffmpeg i can create the m3u8 .
Sharing my code below.
import cv2
from moviepy.editor import *
import numpy as np
import time
url = "rtsp://username:password#192.168.1.100:10554/Streaming/channels/401"
cap = cv2.VideoCapture(url)
def make_video_file(clips):
try:
print(f"clips = {clips}")
video_clip = concatenate_videoclips(clips,method='compose')
video_clip.write_videofile("video-output.mp4",fps=30)
except Exception as e:
print(e)
FRAME_COUNTER = 0
NUMBER_OF_FRAMES = 30
CLIPS = [0 for i in range(NUMBER_OF_FRAMES)]
while True:
ret, frame = cap.read()
# print(frame)
if not ret:
continue
CLIPS.pop(0)
CLIPS.append(ImageClip(frame).set_duration(1))
if FRAME_COUNTER == NUMBER_OF_FRAMES:
try:
FRAME_COUNTER = 0
make_video_file(CLIPS)
except:
pass
cv2.imshow('frame', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
NUMBER_OF_FRAMES += 1
cap.release()
cv2.destroyAllWindows()
I want to break my video into frames.
I am using the following code:
import cv2
import numpy as np
import os
# Playing video from file:
cap = cv2.VideoCapture('myvideo.mp4')
cap.set(cv2.CAP_PROP_FPS, 5)
try:
if not os.path.exists('data'):
os.makedirs('data')
except OSError:
print ('Error: Creating directory of data')
currentFrame = 0
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
# Saves image of the current frame in jpg file
name = './data/frame' + str(currentFrame) + '.jpg'
print ('Creating...' + name)
cv2.imwrite(name, frame)
# To stop duplicate images
currentFrame += 1
if not ret: break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
I have set the FPS = 5 and 'myvideo.mp4' is 0:55 sec long. So, I'd expect to have 55*5 = 275 frames, but the code above gives me a lot more frames and it doesn't stop generating frames. Is something wrong in the code?
if you want a proper framerate you can do
framerate = vid.get(5)
instead of
cap.set(cv2.CAP_PROP_FPS, 5)
this will give you the exact framerate
So I want to write a code in python where I extract photos as frames from an rtsp camera (live streaming).
But I would want these photos to be stored with timestamp and date as well which I think I have done. My only challenge is that I want these photos to automatically save to my local computer every minute and ends after 24 hours.
How do I go about this?
This is my current code
imagesFolder = "C:/Users/<user>/documents"
cap = cv2.VideoCapture("rtsp://username:password#cameraIP/axis-media/media.amp")
frameRate = cap.get(5) #frame rate
count = 0
while cap.isOpened():
frameId = cap.get(1) # current frame number
ret, frame = cap.read()
if (ret != True):
break
if (frameId % math.floor(frameRate) == 0):
filename = imagesFolder + "/image_" + str(datetime.now().strftime("%d-%m-%Y_%I-%M-%S_%p")) + ".jpg"
cv2.imwrite(filename, frame)
cap.release()
print ("Done!")
cv2.destroyAllWindows()
You may simply wait 60 seconds between frame capturing, and break the loop after 24*60 cycles.
I tried testing my code using public RTSP stream, but I am getting black frames, so I can't test my code.
Here is the code:
import cv2
import time
from datetime import datetime
import getpass
#imagesFolder = "C:/Users/<user>/documents"
# https://stackoverflow.com/questions/842059/is-there-a-portable-way-to-get-the-current-username-in-python
imagesFolder = "C:/Users/" + getpass.getuser() + "/documents"
#cap = cv2.VideoCapture("rtsp://username:password#cameraIP/axis-media/media.amp")
# Use public RTSP Streaming for testing, but I am getting black frames!
cap = cv2.VideoCapture("rtsp://wowzaec2demo.streamlock.net/vod/mp4:BigBuckBunny_115k.mov")
frameRate = cap.get(5) #frame rate
count = 0
while cap.isOpened():
start_time = time.time()
frameId = cap.get(1) # current frame number
ret, frame = cap.read()
if (ret != True):
break
filename = imagesFolder + "/image_" + str(datetime.now().strftime("%d-%m-%Y_%I-%M-%S_%p")) + ".jpg"
cv2.imwrite(filename, frame)
# Show frame for testing
cv2.imshow('frame', frame)
cv2.waitKey(1)
count += 1
#Break loop after 24*60 minus
if count > 24*60:
break
elapsed_time = time.time() - start_time
# Wait for 60 seconds (subtract elapsed_time in order to be accurate).
time.sleep(60 - elapsed_time)
cap.release()
print ("Done!")
cv2.destroyAllWindows()
Update:
The code sample above is not working - the first frame is repeated every minute.
Suggested solution:
Grab all the video frame, and save a frame every minute.
The one minute time delta is going to be accurate up to 0.2 seconds in case of 5Hz video.
Use separate timer for measuring 24 hours.
Here is the updated code (reading from public RTSP):
import cv2
import time
from datetime import datetime
import getpass
imagesFolder = "C:/Users/" + getpass.getuser() + "/documents"
#cap = cv2.VideoCapture("rtsp://username:password#cameraIP/axis-media/media.amp")
# Use public RTSP Streaming for testing:
cap = cv2.VideoCapture("rtsp://wowzaec2demo.streamlock.net/vod/mp4:BigBuckBunny_115k.mov")
#cap = cv2.VideoCapture("test2.mp4")
frameRate = cap.get(5) #frame rate
cur_time = time.time() # Get current time
# start_time_24h measures 24 hours
start_time_24h = cur_time
# start_time_1min measures 1 minute
start_time_1min = cur_time - 59 # Subtract 59 seconds for start grabbing first frame after one second (instead of waiting a minute for the first frame).
while cap.isOpened():
frameId = cap.get(1) # current frame number
ret, frame = cap.read()
if (ret != True):
break
cur_time = time.time() # Get current time
elapsed_time_1min = cur_time - start_time_1min # Time elapsed from previous image saving.
# If 60 seconds were passed, reset timer, and store image.
if elapsed_time_1min >= 60:
# Reset the timer that is used for measuring 60 seconds
start_time_1min = cur_time
filename = imagesFolder + "/image_" + str(datetime.now().strftime("%d-%m-%Y_%I-%M-%S_%p")) + ".jpg"
#filename = "image_" + str(datetime.now().strftime("%d-%m-%Y_%I-%M-%S_%p")) + ".jpg"
cv2.imwrite(filename, frame)
# Show frame for testing
cv2.imshow('frame', frame)
cv2.waitKey(1)
elapsed_time_24h = time.time() - start_time_24h
#Break loop after 24*60*60 seconds
if elapsed_time_24h > 24*60*60:
break
#time.sleep(60 - elapsed_time) # Sleeping is a bad idea - we need to grab all the frames.
cap.release()
print ("Done!")
cv2.destroyAllWindows()
Now the images from the public RTSP look OK:
Update:
You may try capturing the video stream using FFmpeg (instead of OpenCV).
Read the following blog: Read and Write Video Frames in Python Using FFMPEG
In case you are using Windows OS, download the latest stable 64-bit static version from here (currently 4.2.2).
Extract the zip file, and place ffmpeg.exe in the same folder as your Python script.
Here is the code (capturing using FFmpeg as sub-process and stdout as a PIPE):
import cv2
import time
from datetime import datetime
import getpass
import numpy as np
import subprocess as sp
imagesFolder = "C:/Users/" + getpass.getuser() + "/documents"
#cap = cv2.VideoCapture("rtsp://username:password#cameraIP/axis-media/media.amp")
# Use public RTSP Streaming for testing:
in_stream = "rtsp://wowzaec2demo.streamlock.net/vod/mp4:BigBuckBunny_115k.mov"
cap = cv2.VideoCapture(in_stream)
#cap = cv2.VideoCapture("test2.mp4")
frameRate = cap.get(5) #frame rate
# Get resolution of input video
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
# Release VideoCapture - it was used just for getting video resolution
cap.release()
#in_stream = "rtsp://xxx.xxx.xxx.xxx:xxx/Streaming/Channels/101?transportmode=multicast",
#Use public RTSP Streaming for testing
in_stream = "rtsp://wowzaec2demo.streamlock.net/vod/mp4:BigBuckBunny_115k.mov"
# http://zulko.github.io/blog/2013/09/27/read-and-write-video-frames-in-python-using-ffmpeg/
FFMPEG_BIN = "ffmpeg" # on Linux ans Mac OS (also works on Windows when ffmpeg.exe is in the path)
#FFMPEG_BIN = "ffmpeg.exe" # on Windows
command = [ FFMPEG_BIN,
'-i', in_stream,
'-f', 'image2pipe',
'-pix_fmt', 'bgr24',
'-vcodec', 'rawvideo', '-an', '-']
# Open sub-process that gets in_stream as input and uses stdout as an output PIPE.
pipe = sp.Popen(command, stdout=sp.PIPE, bufsize=10**8)
cur_time = time.time() # Get current time
# start_time_24h measures 24 hours
start_time_24h = cur_time
# start_time_1min measures 1 minute
start_time_1min = cur_time - 30 # Subtract 30 seconds for start grabbing first frame after 30 seconds (instead of waiting a minute for the first frame).
while True:
# read width*height*3 bytes from stdout (= 1 frame)
raw_frame = pipe.stdout.read(width*height*3)
if len(raw_frame) != (width*height*3):
print('Error reading frame!!!') # Break the loop in case of an error (too few bytes were read).
break
cur_time = time.time() # Get current time
elapsed_time_1min = cur_time - start_time_1min # Time elapsed from previous image saving.
# If 60 seconds were passed, reset timer, and store image.
if elapsed_time_1min >= 60:
# Reset the timer that is used for measuring 60 seconds
start_time_1min = cur_time
# Transform the byte read into a numpy array, and reshape it to video frame dimensions
frame = np.fromstring(raw_frame, np.uint8)
frame = frame.reshape((height, width, 3))
filename = imagesFolder + "/image_" + str(datetime.now().strftime("%d-%m-%Y_%I-%M-%S_%p")) + ".jpg"
#filename = "image_" + str(datetime.now().strftime("%d-%m-%Y_%I-%M-%S_%p")) + ".jpg"
cv2.imwrite(filename, frame)
# Show frame for testing
cv2.imshow('frame', frame)
cv2.waitKey(1)
elapsed_time_24h = time.time() - start_time_24h
#Break loop after 24*60*60 seconds
if elapsed_time_24h > 24*60*60:
break
#time.sleep(60 - elapsed_time) # Sleeping is a bad idea - we need to grab all the frames.
print ("Done!")
pipe.kill() # Kill the sub-process after 24 hours
cv2.destroyAllWindows()
I tried using the apscheduler API but to no avail. Maybe someone can look at this differently and make it work.
import cv2
import math
import datetime
from datetime import datetime
from apscheduler.schedulers.blocking import BlockingScheduler
imagesFolder = "C:/Users/" + getpass.getuser() + "/documents"
cap = cv2.VideoCapture("rtsp://username:password#CameraIP/axis-media/media.amp")
frameRate = cap.get(5) #frame rate
count = 0
def some_job():
while cap.isOpened():
frameId = cap.get(1)
ret, frame = cap.read()
if (ret != True):
break
if (frameId % math.floor(frameRate) == 0):
filename = imagesFolder + "/image_" + str(datetime.now().strftime("%d-%m-%Y_%I-%M-%S_%p")) + ".jpg"
cv2.imwrite(filename, frame)
scheduler = BlockingScheduler()
scheduler.add_job(some_job, 'interval', seconds=60, start_date='2020-04-07 16:23:00', end_date='2020-04-08 16:23:00')
scheduler.start()
cap.release()
print ("Done!")
# Closes all the frames
cv2.destroyAllWindows()
This works somewhat better than other solutions. The only challenge here is that the photos stop saving after the first 3 minutes (first 3 photos which are taken in seconds but saved later during each minute) have been saved. The solution now is to ensure that it saves every minute up to 24 hours before it stops.
import cv2
import time
import getpass
import numpy as np
import subprocess as sp
from datetime import datetime
imagesFolder = "C:/Users/" + getpass.getuser() + "/documents"
# RTSP Streaming:
in_stream = "rtsp://username:password#cameraIP/axis-media/media.amp"
cap = cv2.VideoCapture(in_stream)
frameRate = cap.get(5) #frame rate
# Get resolution of input video
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
# Release VideoCapture - it was used just for getting video resolution
cap.release()
in_stream = "rtsp://username:password#cameraIP/axis-media/media.amp"
FFMPEG_BIN = "ffmpeg.exe" # on Windows
# Suspecting camera supports TCP protocol hence added: '-rtsp_transport', 'tcp'
command = [ FFMPEG_BIN,
'-rtsp_transport', 'tcp',
'-i', in_stream,
'-f', 'image2pipe',
'-pix_fmt', 'bgr24',
'-vcodec', 'rawvideo', '-an', '-']
# Open sub-process that gets in_stream as input and uses stdout as an output PIPE.
pipe = sp.Popen(command, stdout=sp.PIPE, bufsize=10**8)
cur_time = time.time() # Get current time
# start_time_24h measures 24 hours
start_time_24h = cur_time
# start_time_1min measures 1 minute
start_time_1min = cur_time - 30 # Subtract 30 seconds for start grabbing first frame after 30 seconds (instead of waiting a minute for the first frame).
while True:
# read width*height*3 bytes from stdout (= 1 frame)
raw_frame = pipe.stdout.read(width*height*3)
if len(raw_frame) != (width*height*3):
print('Error reading frame!!!') # Break the loop in case of an error (too few bytes were read).
break
cur_time = time.time() # Get current time
elapsed_time_1min = cur_time - start_time_1min # Time elapsed from previous image saving.
# If 60 seconds were passed, reset timer, and store image.
if elapsed_time_1min >= 60:
# Reset the timer that is used for measuring 60 seconds
start_time_1min = cur_time
# Transform the byte read into a numpy array, and reshape it to video frame dimensions
frame = np.fromstring(raw_frame, np.uint8)
frame = frame.reshape((height, width, 3))
filename = imagesFolder + "/image_" + str(datetime.now().strftime("%d-%m-%Y_%I-%M-%S_%p")) + ".jpg"
#filename = "image_" + str(datetime.now().strftime("%d-%m-%Y_%I-%M-%S_%p")) + ".jpg"
cv2.imwrite(filename, frame)
# Show frame for testing
cv2.imshow('frame', frame)
cv2.waitKey(1)
elapsed_time_24h = time.time() - start_time_24h
#Break loop after 24*60*60 seconds
if elapsed_time_24h > 24*60*60:
break
#time.sleep(60 - elapsed_time) # Sleeping is a bad idea - we need to grab all the frames.
print ("Done!")
pipe.kill() # Kill the sub-process after 24 hours
cv2.destroyAllWindows()
I am able to extract the frames of a certain test.mp4 file using the following code:
import cv2
def get_frames():
cap = cv2.VideoCapture('test.mp4')
i = 0
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
cv2.imwrite('test_'+str(i)+'.jpg', frame)
i += 1
cap.release()
cv2.destroyAllWindows()
A lot of the frames that are extracted are useless (they're nearly identical). I need to be able to set a certain rate at which the frame extraction can be done.
I think you need to just skip frames based on a fixed cycle.
import cv2
def get_frames():
cap = cv2.VideoCapture('test.mp4')
i = 0
# a variable to set how many frames you want to skip
frame_skip = 10
# a variable to keep track of the frame to be saved
frame_count = 0
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
if i > frame_skip - 1:
frame_count += 1
cv2.imwrite('test_'+str(frame_count*frame_skip)+'.jpg', frame)
i = 0
continue
i += 1
cap.release()
cv2.destroyAllWindows()
Try below logic. Here, we are waiting for a period of time(based on frame rate) and reading the last frame.
def get_frames():
cap = cv2.VideoCapture('test.mp4')
frame_rate = 10
prev = 0
i = 0
while cap.isOpened():
time_elapsed = time.time() - prev
ret, frame = cap.read()
if not ret:
break
if time_elapsed > 1./frame_rate:
# print(time_elapsed)
prev = time.time()
cv2.imwrite('./data/sample1/test_'+str(i)+'.jpg', frame)
i += 1
cap.release()
cv2.destroyAllWindows()
As an alternative to writing your own code to do this, have you considered using FFMPEG? FFMPEG has the ability to extract all frames from a video and save them as images, it also can extract frames at a lower frame rate than the source video.
See here for a demonstration of what I think you're trying to do, and the arguments to give ffmpeg to do so.