I want to capture image and store it on my local system after every n seconds, I can not set frame(5) as I want video and detection to run completely. Currently my code is capturing image whenever a condition is failed.
def create_alert(self):
count = 0
cap = cv2.VideoCapture(0)
while cap.isOpened():
r,f = cap.read()
try:
info = ppe.detection(f)
x,y,w,h,label,conf = info[0]
if label == "lineman_fail":
# engine.say("Warning")
# engine.runAndWait()
ppe.take_screenshot(f,count)
count+=1
print(count)
print("Something wrong")
# cv2.imwrite("img_"+str(count)+".jpg",f)
except Exception as e:
print("_______-",e)
cv2.imshow("image",f)
if cv2.waitKey(1) & 0xFF == ord("q") :
break
cap.release()
cv2.destroyAllWindows()
def take_screenshot(self,frame,count):
prev = time.time()
cv2.imwrite("screen_shot/img_"+str(count)+".jpg",frame)
In order to capture image every n seconds try using datetime library, and find the difference between current time and elapsed time. Then use cv2's imwrite
while True:
current = time()
yolo_v4.delta += current - previous
previous = current
frame = camera.get_frame()
if yolo_v4.delta > 10:
ct=datetime.datetime.now().strftime("%Y_%m_%d_%H_%M_%S")
cv2.imwrite("screen_shot/img_"+str(ct)+".jpg",frame)
I'm trying to read video into a buffer using threads in OpenCV and I get "Assertion fctx->async_lock failed at libavcodec/pthread_frame.c:167" the reason I want to use threads to do this is so I can read a lot of frames into a list at a time so it's fast. I need all the frames and can not skip any frames. I thought multi-threading would be the way to go. my code works single-threaded aka with "buffer_refill = 1".
import threading
import cv2
cap = cv2.VideoCapture('data/temp/testing/test.mp4')
frames = []
total_frames = cap.get(cv2.CAP_PROP_FRAME_COUNT)
frames_left = total_frames
threads = []
print(total_frames)
min_buffer = 120
buffer_refill = 60
def buffer():
print('buffer')
frame = cap.grab()
ret, frame = cap.retrieve()
frames.append(frame)
def get_buffer(num):
global frames_left
for i in range(num):
frames_left -= 1
if frames_left > 0:
t = threading.Thread(target=buffer)
t.start()
threads.append(t)
for thread in threads:
thread.join()
print('block')
while(cap.isOpened()):
if frames_left > 0 and len(frames) < min_buffer:
get_buffer(buffer_refill)
else:
cv2.imshow('Frame',frames[0])
frames.pop(0)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
I'm using OpenCV and RTSP source to continuously record video from the RTSP source for 1 minutes.
And then start the process again and create new one.
Recording works fine for a while and then the program would only create files that are 6kb in size that are not playable.
import numpy as np
import cv2
import time
def get_output(out=None):
#Specify the path and name of the video file as well as the encoding, fps and resolution
if out:
out.release()
return cv2.VideoWriter(str(time.strftime('%d %m %Y - %H %M %S' )) + '.avi', cv2.VideoWriter_fourcc(*'MJPG'),
24, (320,240))
cap = cv2.VideoCapture("rtsp://wowzaec2demo.streamlock.net/vod/mp4:BigBuckBunny_115k.mp4")
next_time = time.time() + 60
out = get_output()
while True:
if time.time() > next_time:
next_time += 60
out = get_output(out)
# Capture frame-by-frame
ret, frame = cap.read()
if ret:
out.write(frame)
cv2.imshow('Frame', frame)
# Break the loop
else:
break
cap.release()
cv2.destroyAllWindows()
So I want to write a code in python where I extract photos as frames from an rtsp camera (live streaming).
But I would want these photos to be stored with timestamp and date as well which I think I have done. My only challenge is that I want these photos to automatically save to my local computer every minute and ends after 24 hours.
How do I go about this?
This is my current code
imagesFolder = "C:/Users/<user>/documents"
cap = cv2.VideoCapture("rtsp://username:password#cameraIP/axis-media/media.amp")
frameRate = cap.get(5) #frame rate
count = 0
while cap.isOpened():
frameId = cap.get(1) # current frame number
ret, frame = cap.read()
if (ret != True):
break
if (frameId % math.floor(frameRate) == 0):
filename = imagesFolder + "/image_" + str(datetime.now().strftime("%d-%m-%Y_%I-%M-%S_%p")) + ".jpg"
cv2.imwrite(filename, frame)
cap.release()
print ("Done!")
cv2.destroyAllWindows()
You may simply wait 60 seconds between frame capturing, and break the loop after 24*60 cycles.
I tried testing my code using public RTSP stream, but I am getting black frames, so I can't test my code.
Here is the code:
import cv2
import time
from datetime import datetime
import getpass
#imagesFolder = "C:/Users/<user>/documents"
# https://stackoverflow.com/questions/842059/is-there-a-portable-way-to-get-the-current-username-in-python
imagesFolder = "C:/Users/" + getpass.getuser() + "/documents"
#cap = cv2.VideoCapture("rtsp://username:password#cameraIP/axis-media/media.amp")
# Use public RTSP Streaming for testing, but I am getting black frames!
cap = cv2.VideoCapture("rtsp://wowzaec2demo.streamlock.net/vod/mp4:BigBuckBunny_115k.mov")
frameRate = cap.get(5) #frame rate
count = 0
while cap.isOpened():
start_time = time.time()
frameId = cap.get(1) # current frame number
ret, frame = cap.read()
if (ret != True):
break
filename = imagesFolder + "/image_" + str(datetime.now().strftime("%d-%m-%Y_%I-%M-%S_%p")) + ".jpg"
cv2.imwrite(filename, frame)
# Show frame for testing
cv2.imshow('frame', frame)
cv2.waitKey(1)
count += 1
#Break loop after 24*60 minus
if count > 24*60:
break
elapsed_time = time.time() - start_time
# Wait for 60 seconds (subtract elapsed_time in order to be accurate).
time.sleep(60 - elapsed_time)
cap.release()
print ("Done!")
cv2.destroyAllWindows()
Update:
The code sample above is not working - the first frame is repeated every minute.
Suggested solution:
Grab all the video frame, and save a frame every minute.
The one minute time delta is going to be accurate up to 0.2 seconds in case of 5Hz video.
Use separate timer for measuring 24 hours.
Here is the updated code (reading from public RTSP):
import cv2
import time
from datetime import datetime
import getpass
imagesFolder = "C:/Users/" + getpass.getuser() + "/documents"
#cap = cv2.VideoCapture("rtsp://username:password#cameraIP/axis-media/media.amp")
# Use public RTSP Streaming for testing:
cap = cv2.VideoCapture("rtsp://wowzaec2demo.streamlock.net/vod/mp4:BigBuckBunny_115k.mov")
#cap = cv2.VideoCapture("test2.mp4")
frameRate = cap.get(5) #frame rate
cur_time = time.time() # Get current time
# start_time_24h measures 24 hours
start_time_24h = cur_time
# start_time_1min measures 1 minute
start_time_1min = cur_time - 59 # Subtract 59 seconds for start grabbing first frame after one second (instead of waiting a minute for the first frame).
while cap.isOpened():
frameId = cap.get(1) # current frame number
ret, frame = cap.read()
if (ret != True):
break
cur_time = time.time() # Get current time
elapsed_time_1min = cur_time - start_time_1min # Time elapsed from previous image saving.
# If 60 seconds were passed, reset timer, and store image.
if elapsed_time_1min >= 60:
# Reset the timer that is used for measuring 60 seconds
start_time_1min = cur_time
filename = imagesFolder + "/image_" + str(datetime.now().strftime("%d-%m-%Y_%I-%M-%S_%p")) + ".jpg"
#filename = "image_" + str(datetime.now().strftime("%d-%m-%Y_%I-%M-%S_%p")) + ".jpg"
cv2.imwrite(filename, frame)
# Show frame for testing
cv2.imshow('frame', frame)
cv2.waitKey(1)
elapsed_time_24h = time.time() - start_time_24h
#Break loop after 24*60*60 seconds
if elapsed_time_24h > 24*60*60:
break
#time.sleep(60 - elapsed_time) # Sleeping is a bad idea - we need to grab all the frames.
cap.release()
print ("Done!")
cv2.destroyAllWindows()
Now the images from the public RTSP look OK:
Update:
You may try capturing the video stream using FFmpeg (instead of OpenCV).
Read the following blog: Read and Write Video Frames in Python Using FFMPEG
In case you are using Windows OS, download the latest stable 64-bit static version from here (currently 4.2.2).
Extract the zip file, and place ffmpeg.exe in the same folder as your Python script.
Here is the code (capturing using FFmpeg as sub-process and stdout as a PIPE):
import cv2
import time
from datetime import datetime
import getpass
import numpy as np
import subprocess as sp
imagesFolder = "C:/Users/" + getpass.getuser() + "/documents"
#cap = cv2.VideoCapture("rtsp://username:password#cameraIP/axis-media/media.amp")
# Use public RTSP Streaming for testing:
in_stream = "rtsp://wowzaec2demo.streamlock.net/vod/mp4:BigBuckBunny_115k.mov"
cap = cv2.VideoCapture(in_stream)
#cap = cv2.VideoCapture("test2.mp4")
frameRate = cap.get(5) #frame rate
# Get resolution of input video
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
# Release VideoCapture - it was used just for getting video resolution
cap.release()
#in_stream = "rtsp://xxx.xxx.xxx.xxx:xxx/Streaming/Channels/101?transportmode=multicast",
#Use public RTSP Streaming for testing
in_stream = "rtsp://wowzaec2demo.streamlock.net/vod/mp4:BigBuckBunny_115k.mov"
# http://zulko.github.io/blog/2013/09/27/read-and-write-video-frames-in-python-using-ffmpeg/
FFMPEG_BIN = "ffmpeg" # on Linux ans Mac OS (also works on Windows when ffmpeg.exe is in the path)
#FFMPEG_BIN = "ffmpeg.exe" # on Windows
command = [ FFMPEG_BIN,
'-i', in_stream,
'-f', 'image2pipe',
'-pix_fmt', 'bgr24',
'-vcodec', 'rawvideo', '-an', '-']
# Open sub-process that gets in_stream as input and uses stdout as an output PIPE.
pipe = sp.Popen(command, stdout=sp.PIPE, bufsize=10**8)
cur_time = time.time() # Get current time
# start_time_24h measures 24 hours
start_time_24h = cur_time
# start_time_1min measures 1 minute
start_time_1min = cur_time - 30 # Subtract 30 seconds for start grabbing first frame after 30 seconds (instead of waiting a minute for the first frame).
while True:
# read width*height*3 bytes from stdout (= 1 frame)
raw_frame = pipe.stdout.read(width*height*3)
if len(raw_frame) != (width*height*3):
print('Error reading frame!!!') # Break the loop in case of an error (too few bytes were read).
break
cur_time = time.time() # Get current time
elapsed_time_1min = cur_time - start_time_1min # Time elapsed from previous image saving.
# If 60 seconds were passed, reset timer, and store image.
if elapsed_time_1min >= 60:
# Reset the timer that is used for measuring 60 seconds
start_time_1min = cur_time
# Transform the byte read into a numpy array, and reshape it to video frame dimensions
frame = np.fromstring(raw_frame, np.uint8)
frame = frame.reshape((height, width, 3))
filename = imagesFolder + "/image_" + str(datetime.now().strftime("%d-%m-%Y_%I-%M-%S_%p")) + ".jpg"
#filename = "image_" + str(datetime.now().strftime("%d-%m-%Y_%I-%M-%S_%p")) + ".jpg"
cv2.imwrite(filename, frame)
# Show frame for testing
cv2.imshow('frame', frame)
cv2.waitKey(1)
elapsed_time_24h = time.time() - start_time_24h
#Break loop after 24*60*60 seconds
if elapsed_time_24h > 24*60*60:
break
#time.sleep(60 - elapsed_time) # Sleeping is a bad idea - we need to grab all the frames.
print ("Done!")
pipe.kill() # Kill the sub-process after 24 hours
cv2.destroyAllWindows()
I tried using the apscheduler API but to no avail. Maybe someone can look at this differently and make it work.
import cv2
import math
import datetime
from datetime import datetime
from apscheduler.schedulers.blocking import BlockingScheduler
imagesFolder = "C:/Users/" + getpass.getuser() + "/documents"
cap = cv2.VideoCapture("rtsp://username:password#CameraIP/axis-media/media.amp")
frameRate = cap.get(5) #frame rate
count = 0
def some_job():
while cap.isOpened():
frameId = cap.get(1)
ret, frame = cap.read()
if (ret != True):
break
if (frameId % math.floor(frameRate) == 0):
filename = imagesFolder + "/image_" + str(datetime.now().strftime("%d-%m-%Y_%I-%M-%S_%p")) + ".jpg"
cv2.imwrite(filename, frame)
scheduler = BlockingScheduler()
scheduler.add_job(some_job, 'interval', seconds=60, start_date='2020-04-07 16:23:00', end_date='2020-04-08 16:23:00')
scheduler.start()
cap.release()
print ("Done!")
# Closes all the frames
cv2.destroyAllWindows()
This works somewhat better than other solutions. The only challenge here is that the photos stop saving after the first 3 minutes (first 3 photos which are taken in seconds but saved later during each minute) have been saved. The solution now is to ensure that it saves every minute up to 24 hours before it stops.
import cv2
import time
import getpass
import numpy as np
import subprocess as sp
from datetime import datetime
imagesFolder = "C:/Users/" + getpass.getuser() + "/documents"
# RTSP Streaming:
in_stream = "rtsp://username:password#cameraIP/axis-media/media.amp"
cap = cv2.VideoCapture(in_stream)
frameRate = cap.get(5) #frame rate
# Get resolution of input video
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
# Release VideoCapture - it was used just for getting video resolution
cap.release()
in_stream = "rtsp://username:password#cameraIP/axis-media/media.amp"
FFMPEG_BIN = "ffmpeg.exe" # on Windows
# Suspecting camera supports TCP protocol hence added: '-rtsp_transport', 'tcp'
command = [ FFMPEG_BIN,
'-rtsp_transport', 'tcp',
'-i', in_stream,
'-f', 'image2pipe',
'-pix_fmt', 'bgr24',
'-vcodec', 'rawvideo', '-an', '-']
# Open sub-process that gets in_stream as input and uses stdout as an output PIPE.
pipe = sp.Popen(command, stdout=sp.PIPE, bufsize=10**8)
cur_time = time.time() # Get current time
# start_time_24h measures 24 hours
start_time_24h = cur_time
# start_time_1min measures 1 minute
start_time_1min = cur_time - 30 # Subtract 30 seconds for start grabbing first frame after 30 seconds (instead of waiting a minute for the first frame).
while True:
# read width*height*3 bytes from stdout (= 1 frame)
raw_frame = pipe.stdout.read(width*height*3)
if len(raw_frame) != (width*height*3):
print('Error reading frame!!!') # Break the loop in case of an error (too few bytes were read).
break
cur_time = time.time() # Get current time
elapsed_time_1min = cur_time - start_time_1min # Time elapsed from previous image saving.
# If 60 seconds were passed, reset timer, and store image.
if elapsed_time_1min >= 60:
# Reset the timer that is used for measuring 60 seconds
start_time_1min = cur_time
# Transform the byte read into a numpy array, and reshape it to video frame dimensions
frame = np.fromstring(raw_frame, np.uint8)
frame = frame.reshape((height, width, 3))
filename = imagesFolder + "/image_" + str(datetime.now().strftime("%d-%m-%Y_%I-%M-%S_%p")) + ".jpg"
#filename = "image_" + str(datetime.now().strftime("%d-%m-%Y_%I-%M-%S_%p")) + ".jpg"
cv2.imwrite(filename, frame)
# Show frame for testing
cv2.imshow('frame', frame)
cv2.waitKey(1)
elapsed_time_24h = time.time() - start_time_24h
#Break loop after 24*60*60 seconds
if elapsed_time_24h > 24*60*60:
break
#time.sleep(60 - elapsed_time) # Sleeping is a bad idea - we need to grab all the frames.
print ("Done!")
pipe.kill() # Kill the sub-process after 24 hours
cv2.destroyAllWindows()
I have a RaspberryPi, a few IP camera's and I would like to get a fresh image from all these camera's every 5 minutes. I have the following script, which open the RTSP feed af grabs images ALL THE TIME, talking 10-25 every second it runs.
Is there a way to open the videofeed an take only 1 image?
import cv2
import time
cap = cv2.VideoCapture('rtsp://192.168.86.81:554/11') # it can be rtsp or http $
ret, frame = cap.read()
while ret:
cv2.imwrite('images/{}.jpg'.format(time.time()), frame)
ret, frame = cap.read()
This solved my problem. I removed time as I do not need this. I will let the aboce code stand in case anybody would want to play around with this
import cv2
cap = cv2.VideoCapture('rtsp://192.168.86.81:554/11') # it can be rtsp or http stream
ret, frame = cap.read()
if cap.isOpened():
_,frame = cap.read()
cap.release() #releasing camera immediately after capturing picture
if _ and frame is not None:
cv2.imwrite('images/latest.jpg', frame)
import cv2
import time
from datetime import datetime
import getpass
#imagesFolder = "C:/Users/<user>/documents"
# https://stackoverflow.com/questions/842059/is-there-a-portable-way-to-get-the-current-username-in-python
imagesFolder = "C:/Users/" + getpass.getuser() + "/documents"
#cap = cv2.VideoCapture("rtsp://192.168.86.81:554/11")
# Use public RTSP Streaming for testing, but I am getting black frames!
cap = cv2.VideoCapture("rtsp://192.168.86.81:554/11")
frameRate = cap.get(5) #frame rate
count = 0
while cap.isOpened():
start_time = time.time()
frameId = cap.get(1) # current frame number
ret, frame = cap.read()
if (ret != True):
break
filename = imagesFolder + "/image_" + str(datetime.now().strftime("%d-%m-%Y_%I-%M-%S_%p")) + ".jpg"
cv2.imwrite(filename, frame)
# Show frame for testing
cv2.imshow('frame', frame)
cv2.waitKey(1)
count += 1
#Break loop after 5*60 minus
if count > 5*60:
break
elapsed_time = time.time() - start_time
# Wait for 60 seconds (subtract elapsed_time in order to be accurate).
time.sleep(60 - elapsed_time)
cap.release()
print ("Done!")
cv2.destroyAllWindows()