Camera wont turn with cv2.VideoCapture(0).release() - python

I made this script:
import cv2
import pyaudio
import wave
import threading
import time
import subprocess
import os
import keyboard
class VideoRecorder():
# Video class based on openCV
def __init__(self):
self.open = True
self.fps = 6 # fps should be the minimum constant rate at which the camera can
self.fourcc = "MJPG" # capture images (with no decrease in speed over time; testing is required)
self.frameSize = (640,480) # video formats and sizes also depend and vary according to the camera used
self.video_filename = "temp_video.avi"
self.video_cap = cv2.VideoCapture(0)
self.video_writer = cv2.VideoWriter_fourcc(*self.fourcc)
self.video_out = cv2.VideoWriter(self.video_filename, self.video_writer, self.fps, self.frameSize)
self.frame_counts = 1
self.start_time = time.time()
# Video starts being recorded
def record(self):
timer_start = time.time()
timer_current = 0
while(self.open==True):
ret, video_frame = self.video_cap.read()
if self.frame_counts > 10:
break
if (ret==True):
self.video_out.write(video_frame)
self.frame_counts += 1
print self.frame_counts
time.sleep(0.16)
else:
#threading.Thread(target=self.stop).start()
break
# 0.16 delay -> 6 fps
time.sleep(1)
self.video_out.release()
cv2.VideoCapture(0).release()
cv2.destroyAllWindows()
dwhuiadhuiahdwia = raw_input("Testtidhwuia?")
# Finishes the video recording therefore the thread too
def stop(self):
print "You made it"
if self.open==True:
self.open=False
self.video_out.release()
self.video_cap.release()
cv2.destroyAllWindows()
hduwahduiwahdiu = raw_input("Press enter to continue...")
else:
pass
# Launches the video recording function using a thread
def start(self):
video_thread = threading.Thread(target=self.record)
video_thread.start()
def start_video_recording():
global video_thread
video_thread = VideoRecorder()
video_thread.start()
def stop_AVrecording():
frame_counts = video_thread.frame_counts
elapsed_time = time.time() - video_thread.start_time
recorded_fps = frame_counts / elapsed_time
print "total frames " + str(frame_counts)
print "elapsed time " + str(elapsed_time)
print "recorded fps " + str(recorded_fps)
video_thread.stop()
# Makes sure the threads have finished
time.sleep(1)
video_thread.stop()
print ".."
start_video_recording()
duiwhaiudhwauidhwa = raw_input("hello")
It should record a video with the camera for about 10 seconds, then save it and then turn off the camera and close the script when the user presses enter.
But it doesn't really work.
It will record a video, and it does save the video but the camera only turns off when I close the script (the camera is on, but isn't recording.)
I know this because the led next to my camera doesn't turn off when I'm prompted to press enter to continue, but does when I press enter and the script closes.
I found this, and I haven't tested it yet. But if I were to use that solution and it worked, I'd have to do it on every computer I run the script on manually.

Related

OpenCV module not repeating loop in Raspberry Pi 4. Working fine on windows

Let me start by saying that I am very new to Python and Raspberry Pi.
I've "made"(more like copied from a lot of diff. sources and compiled) a module on windows to capture images from a web cam on key press and save it in a folder(code attached). It is working fine on windows and repeats the loop but throws an error on Raspberry Pi after the first loop.
Code for windows:-
# Import Modules #######################################################################################################
from datetime import datetime
import cv2
import time
import queue
import threading
# Module Level Variables ###############################################################################################
inpath = "D:\\Python Projects\\OCR Trial2\\Input\\Training Data\\"
outpath = "D:\\Python Projects\\OCR Trial2\\Output\\"
intpath = "D:\\Python Projects\\OCR Trial2\\Intermediate\\"
file_Prefix = 'IMG100'
file_Extension = '.png'
# Class Definitions ####################################################################################################
class VideoCapture:
def __init__(self, name):
self.cap = cv2.VideoCapture(name)
self.q = queue.Queue()
t = threading.Thread(target=self._reader)
t.daemon = True
t.start()
def _reader(self):
while True:
ret, frame = self.cap.read()
if not ret:
break
if not self.q.empty():
try:
self.q.get_nowait()
except queue.Empty:
pass
self.q.put(frame)
def read(self):
return self.q.get()
# Functions ############################################################################################################
def main():
while True:
try:
windowName = "Live Video Feed"
cv2.namedWindow(windowName)
if cv2.waitKey(1) == ord("c"):
time.sleep(1)
now = datetime.now()
formatted_time = now.strftime('%Y-%m-%d %H-%M-%S.%f')[:-3]
cam = VideoCapture(0 + cv2.CAP_DSHOW)
frame1 = cam.read()
cv2.imshow(windowName,frame1)
cv2.imwrite(intpath + file_Prefix + formatted_time + file_Extension, frame1)
print(formatted_time)
else:
continue
except:
pass
# Execute Code #########################################################################################################
if __name__ == "__main__":
main()
Output for Windows:-
2021-01-06 17-20-05.255
2021-01-06 17-20-07.404
2021-01-06 17-20-08.601
2021-01-06 17-20-10.766
2021-01-06 17-20-12.408
Process finished with exit code -1
Code for Raspberry Pi:-
# Import Modules #######################################################################################################
from datetime import datetime
import cv2
import time
import queue
import threading
# Module Level Variables ###############################################################################################
intpath = "/home/pi/Python Images/"
file_Prefix = 'IMG100'
file_Extension = '.png'
# Class Definitions ####################################################################################################
class VideoCapture:
def __init__(self, name):
self.cap = cv2.VideoCapture(name)
self.q = queue.Queue()
t = threading.Thread(target=self._reader)
t.daemon = True
t.start()
def _reader(self):
while True:
ret, frame = self.cap.read()
if not ret:
break
if not self.q.empty():
try:
self.q.get_nowait()
except queue.Empty:
pass
self.q.put(frame)
def read(self):
return self.q.get()
# Functions ############################################################################################################
def main():
while True:
try:
windowName = "Live Video Feed"
cv2.namedWindow(windowName)
if cv2.waitKey(1) == ord("c"):
time.sleep(1)
now = datetime.now()
formatted_time = now.strftime('%Y-%m-%d %H-%M-%S.%f')[:-3]
cam = VideoCapture(0)
frame1 = cam.read()
cv2.imshow(windowName,frame1)
cv2.imwrite(intpath + file_Prefix + formatted_time + file_Extension, frame1)
print(formatted_time)
else:
continue
except:
pass
# Execute Code #########################################################################################################
if __name__ == "__main__":
main()
Output for Raspberry Pi :-
2021-01-06 17-07-59.501
[ WARN:4] global /tmp/pip-wheel-qd18ncao/opencv-python/opencv/modules/videoio/src/cap_v4l.cpp (893) open VIDEOIO(V4L2:/dev/video0): can't open camera by index
Process finished with exit code 137 (interrupted by signal 9: SIGKILL)
Open CV module on Raspberry Pi was installed by PIP and not manually compiled. General OpenCV functions like Video capture and imshow work fine on Raspberry Pi and it captures the first photo successfully but cannot capture the second one.
Please suggest what could be the problem, what can I try next.
Edit 1 - Added this is the whole error after printing the exception:-
/home/pi/PycharmProjects/pythonProject/venv/bin/python "/home/pi/PycharmProjects/pythonProject/Image Capture.py"
2021-01-07 15-07-36.555
[ WARN:4] global /tmp/pip-wheel-qd18ncao/opencv-python/opencv/modules/videoio/src/cap_v4l.cpp (893) open VIDEOIO(V4L2:/dev/video0): can't open camera by index
Traceback (most recent call last):
File "/home/pi/PycharmProjects/pythonProject/Image Capture.py", line 72, in <module>
main()
File "/home/pi/PycharmProjects/pythonProject/Image Capture.py", line 59, in main
frame1 = cam.read()
File "/home/pi/PycharmProjects/pythonProject/Image Capture.py", line 42, in read
return self.q.get()
File "/usr/lib/python3.7/queue.py", line 170, in get
self.not_empty.wait()
File "/usr/lib/python3.7/threading.py", line 296, in wait
waiter.acquire()
KeyboardInterrupt
Process finished with exit code 1
Your mistake can be cam = VideoCapture(0) inside loop.
You should create it only once - before loop.
If you try to use it second time (for example in loop) then system can't access it before it still use previous cam = VideoCapture(0).

stop a thread from within another thread in python

I have a function that replay some steps from a .json file and another fuction which record those steps. I need the recordingScreen function to stop once the playActions function finishes simultaneously (by using the flag) and create a video for each iteration, but it only creates the video for the last file (iteration)
I have tried with a flag that changes from false when the playActions function finishes I have also tried with queue from this example link and using using a threadsafe threading.Event() from this example link. But as I am a beginner I have not been able to implement any of them correctly within my code, which is as follow:
files= ["actions_test_10-07-2020_15-56-43.json", "actions_test_10-08-2020_14-59-00.json"]
date = datetime.today().strftime("%m-%d-%Y_%H-%M-%S")
Stop_recording = False
def main():
initializePyAutoGUI()
countdownTimer()
for i in range(len(files)):
global Stop_recording
Stop_recording = False
t1 = threading.Thread(target=playActions, args=[files[i]])
t2 = threading.Thread(target=recordScreen)
t1.start()
t2.start()
t1.join()
t2.join()
print("Done")
def recordScreen():
output = '{}.avi'.format(date)
img = pyautogui.screenshot()
img = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR)
# get info from img
height, width, channels = img.shape
# Define the codec and create VideoWriter object
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
out = cv2.VideoWriter(output, fourcc, 20.0, (width, height))
while not Stop_recording:
img = pyautogui.screenshot()
image = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR)
out.write(image)
StopIteration(0.5)
out.release()
cv2.destroyAllWindows()
def playActions(filename):
# Read the file
script_dir = os.path.dirname(__file__)
filepath = os.path.join(script_dir, 'recordings', filename)
with open(filepath, 'r') as jsonfile:
# parse the json
data = json.load(jsonfile)
# loop over each action
# Because we are not waiting any time before executing the first action, any delay before the initial
# action is recorded will not be reflected in the playback.
for index, action in enumerate(data):
action_start_time = time()
# look for escape input to exit
if action['button'] == 'Key.esc':
break
# perform the action
if action['type'] == 'keyDown':
key = convertKey(action['button'])
pyautogui.keyDown(key)
print("keyDown on {}".format(key))
elif action['type'] == 'keyUp':
key = convertKey(action['button'])
pyautogui.keyUp(key)
print("keyUp on {}".format(key))
elif action['type'] == 'click' and action['button'] == "Button.right":
pyautogui.rightClick(action['pos'][0], action['pos'][1], duration=0.25)
print("right click on {}".format(action['pos']))
elif action['type'] == 'click' and action['button'] == "Button.left":
# Check if the period between clicks is short and perform a double click then, otherwise
# it performs a single click
if index > 0:
if (data[index]['time']) - (data[index - 1]['time']) < 0.5:
pyautogui.doubleClick(action['pos'][0], action['pos'][1])
print("Double click on {}".format(action['pos']))
pyautogui.leftClick(action['pos'][0], action['pos'][1], duration=0.25)
print("left click on {}".format(action['pos']))
# then sleep until next action should occur
try:
next_action = data[index + 1]
except IndexError:
# this was the last action in the list
break
elapsed_time = next_action['time'] - action['time']
# if elapsed_time is negative, that means our actions are not ordered correctly. throw an error
if elapsed_time < 0:
raise Exception('Unexpected action ordering.')
# adjust elapsed_time to account for our code taking time to run
elapsed_time -= (time() - action_start_time)
if elapsed_time < 0:
elapsed_time = 0
print('sleeping for {}'.format(elapsed_time))
sleep(elapsed_time)
global Stop_recording
Stop_recording = True

Is it posible to save an stream in a variable?

I am trying to save 10 seconds of buffered video using Python, in particular '.h264' format.
In order to do so, I have been using a PiCamera connected to a Raspberry Pi and the script shown below. The main road block I am facing right now is that instead of saving the file directly to a location [stream.copy_to(str(time)+'.h264')] I would like to save it to a variable in order to perform certain operations (e.g. change video resolution) before finally saving it. Any idea how this can be achieve?
Thanks in advance!
import time
import io
import os
import picamera
import datetime as dt
from PIL import Image
import cv2
#obtain current time
def return_currentTime():
return dt.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
#trigger event declaration
def motion_detected():
while True:
print ("Trigger event(y)?")
trigger = input ()
if trigger =="y":
time = return_currentTime()
print ("Buffering...")
stream.copy_to(str(time)+'.h264')
else:
camera.stop_recording()
break
#countdown timer
def countdown (t):
while t:
mins, secs = divmod (t,60)
timer = '{:02d}:{:02d}'.format(mins, secs)
print(timer, end="\r")
time.sleep(1)
t-=1
print('Buffer available!')
camera = picamera.PiCamera()
camera.resolution = (640, 480)
stream = picamera.PiCameraCircularIO(camera, seconds = 5)
#code will work using h264 as format
camera.start_recording (stream, format = 'h264')
countdown(5)
motion_detected()
I don't have a Raspberry Pi, but I have an idea of how you can do it.
I used VideoStream which also supports PiCamera so you can use the below code.
stream = VideoStream(usePiCamera=False,
resolution=(640, 480),
framerate=32).start()
# Wait for a two-second for warming the webcam.
time.sleep(2.0)
Start getting frames
while True:
frame = stream.read()
countdown(5)
motion_detected()
Modify motion_detected() for saving frames.
while True:
frame = stream.read()
countdown(5)
motion_detected(frame)
Now we need to store frames either using array or dictionary.
Dictionary is faster than the array. (source)
We need to initialize a global dictionary on top of the project file.
import time
import datetime as dt
from imutils.video import VideoStream
dictionary = {}
count = 0
We need to modify the motion_detected method, we start by initializing the input parameter
# trigger event declaration
def motion_detected(input_frame):
Second, we define the global variables inside motion_detected
# trigger event declaration
def motion_detected(input_frame):
global dictionary
global count
Unfortunately, VideoStream object has no copy_to attribute, therefore I have to directly assign frame to the dictionary:
def motion_detected(input_frame):
global dictionary
global count
while True:
print("Trigger event(y)?")
trigger = input()
if trigger == "y":
current_time = return_current_time()
print("Buffering...")
# stream.copy_to(str(current_time) + '.h264')
dictionary[count] = input_frame
count += 1
if count == 10:
print("\n10 frames are stored\n")
else:
stream.stop()
break
Now we can perform certain operations like detecting edges.
while True:
frame = stream.read()
countdown(5)
motion_detected(frame)
for stored_frame in dictionary.values():
result = cv2.Canny(image=stored_frame,
threshold1=50,
threshold2=100)
Output:
Saving the frames
To save the frames, you need to enumerate over the stored frames.
for count, stored_frame in enumerate(dictionary.values()):
Then, apply your operation:
for count, stored_frame in enumerate(dictionary.values()):
result = cv2.Canny(image=stored_frame,
threshold1=50,
threshold2=100)
Save it to a folder.
for count, stored_frame in enumerate(dictionary.values()):
result = cv2.Canny(image=stored_frame,
threshold1=50,
threshold2=100)
cv2.imwrite("output/frame_{}.png".format(count), result)
If you want to loop through multiple times, the above code won't work. In this case, you need to iniitialize loop above the while loop.
counter = 0
while True:
frame = stream.read()
countdown(5)
motion_detected(frame)
for stored_frame in dictionary.values():
result = cv2.Canny(image=stored_frame,
threshold1=50,
threshold2=100)
cv2.imwrite("output/frame_{}.png".format(counter), result)
counter += 1
Code:
import cv2
import time
import datetime as dt
from imutils.video import VideoStream
dictionary = {}
count = 0
# obtain current time
def return_current_time():
return dt.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
# trigger event declaration
def motion_detected(input_frame):
global dictionary
global count
while True:
print("Trigger event(y)?")
trigger = input()
if trigger == "y":
current_time = return_current_time()
print("Buffering...")
# stream.copy_to(str(current_time) + '.h264')
dictionary[count] = input_frame
count += 1
if count == 10:
print("\n10 frames are stored\n")
else:
stream.stop()
break
# countdown timer
def countdown(t):
while t:
mins, secs = divmod(t, 60)
timer = '{:02d}:{:02d}'.format(mins, secs)
print(timer, end="\r")
time.sleep(1)
t -= 1
print('Buffer available!')
stream = VideoStream(usePiCamera=False,
resolution=(640, 480),
framerate=32).start()
time.sleep(2.0)
counter = 0
while True:
frame = stream.read()
countdown(5)
motion_detected(frame)
for stored_frame in dictionary.values():
result = cv2.Canny(image=stored_frame,
threshold1=50,
threshold2=100)
cv2.imwrite("output/frame_{}.png".format(counter), result)
counter += 1

PYTHON: how to automatically capture image from RTSP every minute up until 24 hours

So I want to write a code in python where I extract photos as frames from an rtsp camera (live streaming).
But I would want these photos to be stored with timestamp and date as well which I think I have done. My only challenge is that I want these photos to automatically save to my local computer every minute and ends after 24 hours.
How do I go about this?
This is my current code
imagesFolder = "C:/Users/<user>/documents"
cap = cv2.VideoCapture("rtsp://username:password#cameraIP/axis-media/media.amp")
frameRate = cap.get(5) #frame rate
count = 0
while cap.isOpened():
frameId = cap.get(1) # current frame number
ret, frame = cap.read()
if (ret != True):
break
if (frameId % math.floor(frameRate) == 0):
filename = imagesFolder + "/image_" + str(datetime.now().strftime("%d-%m-%Y_%I-%M-%S_%p")) + ".jpg"
cv2.imwrite(filename, frame)
cap.release()
print ("Done!")
cv2.destroyAllWindows()
You may simply wait 60 seconds between frame capturing, and break the loop after 24*60 cycles.
I tried testing my code using public RTSP stream, but I am getting black frames, so I can't test my code.
Here is the code:
import cv2
import time
from datetime import datetime
import getpass
#imagesFolder = "C:/Users/<user>/documents"
# https://stackoverflow.com/questions/842059/is-there-a-portable-way-to-get-the-current-username-in-python
imagesFolder = "C:/Users/" + getpass.getuser() + "/documents"
#cap = cv2.VideoCapture("rtsp://username:password#cameraIP/axis-media/media.amp")
# Use public RTSP Streaming for testing, but I am getting black frames!
cap = cv2.VideoCapture("rtsp://wowzaec2demo.streamlock.net/vod/mp4:BigBuckBunny_115k.mov")
frameRate = cap.get(5) #frame rate
count = 0
while cap.isOpened():
start_time = time.time()
frameId = cap.get(1) # current frame number
ret, frame = cap.read()
if (ret != True):
break
filename = imagesFolder + "/image_" + str(datetime.now().strftime("%d-%m-%Y_%I-%M-%S_%p")) + ".jpg"
cv2.imwrite(filename, frame)
# Show frame for testing
cv2.imshow('frame', frame)
cv2.waitKey(1)
count += 1
#Break loop after 24*60 minus
if count > 24*60:
break
elapsed_time = time.time() - start_time
# Wait for 60 seconds (subtract elapsed_time in order to be accurate).
time.sleep(60 - elapsed_time)
cap.release()
print ("Done!")
cv2.destroyAllWindows()
Update:
The code sample above is not working - the first frame is repeated every minute.
Suggested solution:
Grab all the video frame, and save a frame every minute.
The one minute time delta is going to be accurate up to 0.2 seconds in case of 5Hz video.
Use separate timer for measuring 24 hours.
Here is the updated code (reading from public RTSP):
import cv2
import time
from datetime import datetime
import getpass
imagesFolder = "C:/Users/" + getpass.getuser() + "/documents"
#cap = cv2.VideoCapture("rtsp://username:password#cameraIP/axis-media/media.amp")
# Use public RTSP Streaming for testing:
cap = cv2.VideoCapture("rtsp://wowzaec2demo.streamlock.net/vod/mp4:BigBuckBunny_115k.mov")
#cap = cv2.VideoCapture("test2.mp4")
frameRate = cap.get(5) #frame rate
cur_time = time.time() # Get current time
# start_time_24h measures 24 hours
start_time_24h = cur_time
# start_time_1min measures 1 minute
start_time_1min = cur_time - 59 # Subtract 59 seconds for start grabbing first frame after one second (instead of waiting a minute for the first frame).
while cap.isOpened():
frameId = cap.get(1) # current frame number
ret, frame = cap.read()
if (ret != True):
break
cur_time = time.time() # Get current time
elapsed_time_1min = cur_time - start_time_1min # Time elapsed from previous image saving.
# If 60 seconds were passed, reset timer, and store image.
if elapsed_time_1min >= 60:
# Reset the timer that is used for measuring 60 seconds
start_time_1min = cur_time
filename = imagesFolder + "/image_" + str(datetime.now().strftime("%d-%m-%Y_%I-%M-%S_%p")) + ".jpg"
#filename = "image_" + str(datetime.now().strftime("%d-%m-%Y_%I-%M-%S_%p")) + ".jpg"
cv2.imwrite(filename, frame)
# Show frame for testing
cv2.imshow('frame', frame)
cv2.waitKey(1)
elapsed_time_24h = time.time() - start_time_24h
#Break loop after 24*60*60 seconds
if elapsed_time_24h > 24*60*60:
break
#time.sleep(60 - elapsed_time) # Sleeping is a bad idea - we need to grab all the frames.
cap.release()
print ("Done!")
cv2.destroyAllWindows()
Now the images from the public RTSP look OK:
Update:
You may try capturing the video stream using FFmpeg (instead of OpenCV).
Read the following blog: Read and Write Video Frames in Python Using FFMPEG
In case you are using Windows OS, download the latest stable 64-bit static version from here (currently 4.2.2).
Extract the zip file, and place ffmpeg.exe in the same folder as your Python script.
Here is the code (capturing using FFmpeg as sub-process and stdout as a PIPE):
import cv2
import time
from datetime import datetime
import getpass
import numpy as np
import subprocess as sp
imagesFolder = "C:/Users/" + getpass.getuser() + "/documents"
#cap = cv2.VideoCapture("rtsp://username:password#cameraIP/axis-media/media.amp")
# Use public RTSP Streaming for testing:
in_stream = "rtsp://wowzaec2demo.streamlock.net/vod/mp4:BigBuckBunny_115k.mov"
cap = cv2.VideoCapture(in_stream)
#cap = cv2.VideoCapture("test2.mp4")
frameRate = cap.get(5) #frame rate
# Get resolution of input video
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
# Release VideoCapture - it was used just for getting video resolution
cap.release()
#in_stream = "rtsp://xxx.xxx.xxx.xxx:xxx/Streaming/Channels/101?transportmode=multicast",
#Use public RTSP Streaming for testing
in_stream = "rtsp://wowzaec2demo.streamlock.net/vod/mp4:BigBuckBunny_115k.mov"
# http://zulko.github.io/blog/2013/09/27/read-and-write-video-frames-in-python-using-ffmpeg/
FFMPEG_BIN = "ffmpeg" # on Linux ans Mac OS (also works on Windows when ffmpeg.exe is in the path)
#FFMPEG_BIN = "ffmpeg.exe" # on Windows
command = [ FFMPEG_BIN,
'-i', in_stream,
'-f', 'image2pipe',
'-pix_fmt', 'bgr24',
'-vcodec', 'rawvideo', '-an', '-']
# Open sub-process that gets in_stream as input and uses stdout as an output PIPE.
pipe = sp.Popen(command, stdout=sp.PIPE, bufsize=10**8)
cur_time = time.time() # Get current time
# start_time_24h measures 24 hours
start_time_24h = cur_time
# start_time_1min measures 1 minute
start_time_1min = cur_time - 30 # Subtract 30 seconds for start grabbing first frame after 30 seconds (instead of waiting a minute for the first frame).
while True:
# read width*height*3 bytes from stdout (= 1 frame)
raw_frame = pipe.stdout.read(width*height*3)
if len(raw_frame) != (width*height*3):
print('Error reading frame!!!') # Break the loop in case of an error (too few bytes were read).
break
cur_time = time.time() # Get current time
elapsed_time_1min = cur_time - start_time_1min # Time elapsed from previous image saving.
# If 60 seconds were passed, reset timer, and store image.
if elapsed_time_1min >= 60:
# Reset the timer that is used for measuring 60 seconds
start_time_1min = cur_time
# Transform the byte read into a numpy array, and reshape it to video frame dimensions
frame = np.fromstring(raw_frame, np.uint8)
frame = frame.reshape((height, width, 3))
filename = imagesFolder + "/image_" + str(datetime.now().strftime("%d-%m-%Y_%I-%M-%S_%p")) + ".jpg"
#filename = "image_" + str(datetime.now().strftime("%d-%m-%Y_%I-%M-%S_%p")) + ".jpg"
cv2.imwrite(filename, frame)
# Show frame for testing
cv2.imshow('frame', frame)
cv2.waitKey(1)
elapsed_time_24h = time.time() - start_time_24h
#Break loop after 24*60*60 seconds
if elapsed_time_24h > 24*60*60:
break
#time.sleep(60 - elapsed_time) # Sleeping is a bad idea - we need to grab all the frames.
print ("Done!")
pipe.kill() # Kill the sub-process after 24 hours
cv2.destroyAllWindows()
I tried using the apscheduler API but to no avail. Maybe someone can look at this differently and make it work.
import cv2
import math
import datetime
from datetime import datetime
from apscheduler.schedulers.blocking import BlockingScheduler
imagesFolder = "C:/Users/" + getpass.getuser() + "/documents"
cap = cv2.VideoCapture("rtsp://username:password#CameraIP/axis-media/media.amp")
frameRate = cap.get(5) #frame rate
count = 0
def some_job():
while cap.isOpened():
frameId = cap.get(1)
ret, frame = cap.read()
if (ret != True):
break
if (frameId % math.floor(frameRate) == 0):
filename = imagesFolder + "/image_" + str(datetime.now().strftime("%d-%m-%Y_%I-%M-%S_%p")) + ".jpg"
cv2.imwrite(filename, frame)
scheduler = BlockingScheduler()
scheduler.add_job(some_job, 'interval', seconds=60, start_date='2020-04-07 16:23:00', end_date='2020-04-08 16:23:00')
scheduler.start()
cap.release()
print ("Done!")
# Closes all the frames
cv2.destroyAllWindows()
This works somewhat better than other solutions. The only challenge here is that the photos stop saving after the first 3 minutes (first 3 photos which are taken in seconds but saved later during each minute) have been saved. The solution now is to ensure that it saves every minute up to 24 hours before it stops.
import cv2
import time
import getpass
import numpy as np
import subprocess as sp
from datetime import datetime
imagesFolder = "C:/Users/" + getpass.getuser() + "/documents"
# RTSP Streaming:
in_stream = "rtsp://username:password#cameraIP/axis-media/media.amp"
cap = cv2.VideoCapture(in_stream)
frameRate = cap.get(5) #frame rate
# Get resolution of input video
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
# Release VideoCapture - it was used just for getting video resolution
cap.release()
in_stream = "rtsp://username:password#cameraIP/axis-media/media.amp"
FFMPEG_BIN = "ffmpeg.exe" # on Windows
# Suspecting camera supports TCP protocol hence added: '-rtsp_transport', 'tcp'
command = [ FFMPEG_BIN,
'-rtsp_transport', 'tcp',
'-i', in_stream,
'-f', 'image2pipe',
'-pix_fmt', 'bgr24',
'-vcodec', 'rawvideo', '-an', '-']
# Open sub-process that gets in_stream as input and uses stdout as an output PIPE.
pipe = sp.Popen(command, stdout=sp.PIPE, bufsize=10**8)
cur_time = time.time() # Get current time
# start_time_24h measures 24 hours
start_time_24h = cur_time
# start_time_1min measures 1 minute
start_time_1min = cur_time - 30 # Subtract 30 seconds for start grabbing first frame after 30 seconds (instead of waiting a minute for the first frame).
while True:
# read width*height*3 bytes from stdout (= 1 frame)
raw_frame = pipe.stdout.read(width*height*3)
if len(raw_frame) != (width*height*3):
print('Error reading frame!!!') # Break the loop in case of an error (too few bytes were read).
break
cur_time = time.time() # Get current time
elapsed_time_1min = cur_time - start_time_1min # Time elapsed from previous image saving.
# If 60 seconds were passed, reset timer, and store image.
if elapsed_time_1min >= 60:
# Reset the timer that is used for measuring 60 seconds
start_time_1min = cur_time
# Transform the byte read into a numpy array, and reshape it to video frame dimensions
frame = np.fromstring(raw_frame, np.uint8)
frame = frame.reshape((height, width, 3))
filename = imagesFolder + "/image_" + str(datetime.now().strftime("%d-%m-%Y_%I-%M-%S_%p")) + ".jpg"
#filename = "image_" + str(datetime.now().strftime("%d-%m-%Y_%I-%M-%S_%p")) + ".jpg"
cv2.imwrite(filename, frame)
# Show frame for testing
cv2.imshow('frame', frame)
cv2.waitKey(1)
elapsed_time_24h = time.time() - start_time_24h
#Break loop after 24*60*60 seconds
if elapsed_time_24h > 24*60*60:
break
#time.sleep(60 - elapsed_time) # Sleeping is a bad idea - we need to grab all the frames.
print ("Done!")
pipe.kill() # Kill the sub-process after 24 hours
cv2.destroyAllWindows()

Global variable in python, initliazing and terminating

I'm making a program that lets me record video using the rapsberry pi camera lib for python, there's one small issue, when you run camera = picamera.PiCamera() the camera is enabled and being used until the end of the program, what I would like to do is only enable it when recording and stop when recording is done but still keep my program active.
What I need:
How do I create a global variable for the picamera and how do I terminate it.
Part of my code that's relevant:
camera = picamera.PiCamera()
camera.resolution = (1920, 1080)
filename = ""
#Start recording video into raw file
def start_record():
print("Starting recording");
reset_tmp()
global filename
filename = "vid/" + str(int(time.time()));
camera.start_recording(filename+".h264");
#Stop recording and convert h264 raw file to mp4 and remove raw file
def stop_record():
print("Stopping recording");
reset_tmp()
global filename
camera.stop_recording()
os.system("MP4Box -fps 30 -add "+filename+".h264"+" "+filename+".mp4");
os.system("rm "+filename+".h264");
Updated version of code, functional
for those looking for the solution of the title you must use the del keyword to get rid of variables but the picamera library has a function called .close() to terminate the object, here's my fixed code:
camera = None
filename = ""
#Start recording video into raw file
def start_record():
print("Starting recording");
reset_tmp()
global filename
filename = "vid/" + str(int(time.time()));
global camera
camera = picamera.PiCamera()
camera.resolution = (1920, 1080)
camera.start_recording(filename+".h264");
#Stop recording and convert h264 raw file to mp4 and remove raw file
def stop_record():
print("Stopping recording");
reset_tmp()
global filename
global camera
camera.stop_recording()
camera.close()
os.system("MP4Box -fps 30 -add "+filename+".h264"+" "+filename+".mp4");
os.system("rm "+filename+".h264");
No need for globals here. Just use return values:
import os
import subprocess
import time
import picamera
def start_record(resolution=(1920, 1080)):
"""Start recording video into raw file"""
print("Starting recording")
camera = picamera.PiCamera()
camera.resolution = resolution
reset_tmp()
filename = os.path.join('vid', '{}.h264'.format(int(time.time())))
camera.start_recording(filename)
return camera, filename
def stop_record(camera, filename):
"""Stop recording and convert h264 raw file to mp4 and remove raw file"""
print("Stopping recording")
reset_tmp()
camera.stop_recording()
mp4_fn = os.path.splitext(filename)[0] + '.mp4'
subprocess.call(['MP4Box', '-fps', '30', '-add', mp4_fn])
os.remove(filename)
Now call the start function:
camera, filename = start_record()
and later the stop function:
stop_record(camera, filename)
How about:
camera = None
camera.resolution = (1920, 1080)
filename = ""
#Start recording video into raw file
def start_record():
print("Starting recording");
reset_tmp()
global filename
filename = "vid/" + str(int(time.time()));
global camera
camera = picamera.PiCamera()
camera.start_recording(filename+".h264");
The basic idea would be to move the code to start the camera into a funktion - where global can be used to modify a global variable.

Categories

Resources