As I saw in the Python documentation,
https://docs.python.org/3/library/mmap.html.
Python in Linux can fully support the memory-mapped file. However, while I am trying to apply this idea to my application. I cannot run the sample.
My application is that sending the frames from Python file (client) to the another Python file (server).
Client Code
import mmap
import time
import os
import cv2 as cv
print("Opening camera...")
cap = cv.VideoCapture('/home/hunglv/Downloads/IMG_8442.MOV')
mm = None
try:
while True:
ret, img = cap.read()
if not ret:
break
if mm is None:
mm = mmap.mmap(-1,img.size,mmap.MAP_SHARED, mmap.PROT_WRITE)
# write image
start = time.time()
buf = img.tobytes()
mm.seek(0)
mm.write(buf)
mm.flush()
stop = time.time()
print("Writing Duration:", (stop - start) * 1000, "ms")
except KeyboardInterrupt:
pass
print("Closing resources")
cap.release()
mm.close()
Server Code
import mmap
import time
import os
import cv2 as cv
import numpy as np
shape = (1080, 1920, 3)
n = np.prod(shape)
mm = mmap.mmap(-1, n)
while True:
# read image
print (mm)
start = time.perf_counter()
mm.seek(0)
buf = mm.read(12)
img = np.frombuffer(buf, dtype=np.uint8).reshape(shape)
stop = time.perf_counter()
print("Reading Duration:", (stop - start) * 1000, "ms")
cv.imshow("img", img)
key = cv.waitKey(1) & 0xFF
key = chr(key)
if key.lower() == "q":
break
cv.destroyAllWindows()
mm.close()
On the server-side, I set the memory index at 0, and try to read the bytes from memory. However, it seems to be that the server cannot read correctly the data from client.
[Updated]
I've tried to read out the first 12 bytes at the server-side. The value is constant, not changing anymore.
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
Besides,
The first 12 bytes of a random frame is
b'\xf5\xff\xff\xf0\xfa\xfe\xdf\xe9\xed\xd2\xdc\xe0'
First I found example which probably works but it uses tagName (the same for client and server) and it means it is only for Window:
python-mmap-ipc
Next I found code which works on Linux:
Sharing Python data between processes using mmap.
It creates real file on disk, resizes it to size of image and then it uses its fd in mmap()
I use web camera for test.
Server
import mmap
import time
import os
import cv2
print("Opening camera...")
cap = cv2.VideoCapture(0)
#print(cap.get(cv.CAP_PROP_FRAME_WIDTH)) # 640
#print(cap.get(cv.CAP_PROP_FRAME_HEIGHT)) # 480
shape = (480, 640, 3)
n = (480*640*3)
fd = os.open('/tmp/mmaptest', os.O_CREAT | os.O_TRUNC | os.O_RDWR)
#os.write(fd, b'\x00' * n) # resize file
os.truncate(fd, n) # resize file
mm = None
try:
while True:
ret, img = cap.read()
if not ret:
break
if mm is None:
mm = mmap.mmap(fd, n, mmap.MAP_SHARED, mmap.PROT_WRITE) # it has to be only for writing
# write image
start = time.perf_counter()
buf = img.tobytes()
mm.seek(0)
mm.write(buf)
mm.flush()
stop = time.perf_counter()
print("Writing Duration:", (stop - start) * 1000, "ms")
except KeyboardInterrupt:
pass
print("Closing resources")
cap.release()
mm.close()
Client
import mmap
import time
import os
import cv2
import numpy as np
shape = (480, 640, 3)
n = (480*640*3)
fd = os.open('/tmp/mmaptest', os.O_RDONLY)
mm = mmap.mmap(fd, n, mmap.MAP_SHARED, mmap.PROT_READ) # it has to be only for reading
while True:
# read image
start = time.perf_counter()
mm.seek(0)
buf = mm.read(n)
img = np.frombuffer(buf, dtype=np.uint8).reshape(shape)
stop = time.perf_counter()
print("Reading Duration:", (stop - start) * 1000, "ms")
cv2.imshow("img", img)
key = cv2.waitKey(1) & 0xFF
key = chr(key)
if key.lower() == "q":
break
cv2.destroyAllWindows()
mm.close()
BTW: probably mmap() with -1 (without creating file on disk) could work with threads (or forks) because they share the same memory.
Related
I am getting the following warning:
[ WARN:0#1.140] global /private/var/folders/sy/f16zz6x50xz3113nwtb9bvq00000gp/T/abs_506zufg7xt/croots/recipe/opencv-suite_1664548331847/work/modules/videoio/src/cap_gstreamer.cpp (862) isPipelinePlaying OpenCV | GStreamer warning: GStreamer: pipeline have not been created
I am attempting to run the following code on my MacBook Pro with MacOS 10.15.7 to stream video from a pycamera on my raspberrypi to my MacBook in order to use it as a virtual camera:
import cv2
import numpy as np
import io
import socket
import struct
import time
from Quartz import CoreVideo
import CoreMedia
IMG_W = 1280
IMG_H = 720
server_socket = socket.socket()
server_socket.bind(('0.0.0.0', 8000))
server_socket.listen(0)
connection = server_socket.accept()[0].makefile('rb')
cam = cv2.VideoCapture(0)
cam.set(cv2.CAP_PROP_FRAME_WIDTH, IMG_W)
cam.set(cv2.CAP_PROP_FRAME_HEIGHT, IMG_H)
try:
display_link = CoreVideo.CVDisplayLinkCreateWithActiveCGDisplays(None)
while True:
ret, frame = cam.read()
flipped = cv2.flip(frame, 1)
if frame is not None:
print("frame is not None, proceed with flip")
flipped = cv2.flip(frame, 1)
frame[0 : IMG_H, IMG_W//2 : IMG_W] = flipped[0 : IMG_H, IMG_W//2 : IMG_W]
else:
print("frame is None, skipping flip")
# Read the length of the image as a 32-bit unsigned int.
image_len_packed = connection.read(struct.calcsize('<L'))
if not image_len_packed:
break
image_len = struct.unpack('<L', image_len_packed)[0]
# Read the image data.
image_stream = io.BytesIO()
remaining = image_len
while remaining > 0:
chunk = connection.read(min(remaining, 4096))
if not chunk:
break
image_stream.write(chunk)
remaining -= len(chunk)
# Rewind the stream
image_stream.seek(0)
image = np.array(bytearray(image_stream.read()), dtype=np.uint8)
frame = cv2.imdecode(image, cv2.IMREAD_COLOR)
# Resize the frame to the desired size for the virtual camera
if frame is not None and frame.shape == (IMG_H, IMG_W, 3):
print("frame is not None and has correct shape")
frame = cv2.resize(frame, (IMG_W, IMG_H))
else:
print("frame is None or has incorrect shape, skipping resize")
# Convert the frame to a format suitable for OS X's AVFoundation framework
if frame is not None and frame.shape[0] > 0 and frame.shape[1] > 0:
print("frame is not None and has dimensions")
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2BGRA)
else:
print("frame is None or has no dimensions, skipping cvtColor")
# Create a `CVPixelBuffer` from the image data
if frame is not None:
print("frame and pixel_buffer are not none")
pixel_buffer = CoreVideo.CVPixelBufferCreateWithBytes(
IMG_W, IMG_H, CoreVideo.kCVPixelFormatType_32BGRA,
frame.tobytes(), frame.strides[0],
None, None, None, None, None,
)
# Create a `CMSampleBuffer` from the pixel buffer
sample_buffer = CoreMedia.CMSampleBufferCreateForImageBuffer(
None, pixel_buffer, True, None, None,
CoreMedia.kCMSampleAttachmentKey_DisplayImmediately,
)
# Send the sample buffer to the virtual camera
CoreVideo.CVDisplayLinkStart(display_link)
CoreVideo.CVPixelBufferLockBaseAddress(pixel_buffer, 0)
CoreVideo.CVDisplayLinkRender(display_link, sample_buffer)
CoreVideo.CVPixelBufferUnlockBaseAddress(pixel_buffer, 0)
CoreVideo.CVDisplayLinkStop(display_link)
else:
print("frame is None, skipping")
except Exception as e:
print(e)
finally:
cam.release()
CoreVideo.CVDisplayLinkStop(display_link)
CoreVideo.CVDisplayLinkRelease(display_link)
And here is my corresponding code that I am running on my RaspberryPi to transmit the stream:
import io
import socket
import struct
import time
import picamera
# Create a socket connection between the Raspberry Pi and the computer
client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client_socket.connect(('10.37.68.171', 8000))
connection = client_socket.makefile('wb')
try:
with picamera.PiCamera() as camera:
camera.resolution = (640, 480)
# Start a stream to the socket
camera.start_recording(connection, format='h264')
while True:
camera.wait_recording(1)
finally:
camera.stop_recording()
connection.close()
client_socket.close()
First: How can I fix the warning? Or is it even necessary to fix it?
Second: Is there anything in general that I should be doing differently with my code in order to be able to use the stream as a virtual camera on my MacBook?
Below given is my code. I am trying to scan barcodes and display it using OpenCV. The program works well but there is a huge lag in fps when grabbing frames from drone camera as RTMP stream. Due to the same I am trying to use multi processing method.
import pandas as pd
import cv2
import numpy as np
from pyzbar.pyzbar import decode
from pyzbar.pyzbar import ZBarSymbol
import time
import multiprocessing
global frame
def barcode(frame):
for barcode in decode(frame, symbols=[ZBarSymbol.CODE128]):
myData = barcode.data.decode('utf-8')
pts = np.array([barcode.polygon],np.int32)
pts = pts.reshape((-1,1,2))
cv2.polylines(frame, [pts], True, (255,0,255),5)
pts2 = barcode.rect
akash = []
akash.append(myData)
cv2.putText(frame, myData, (pts2[0], pts2[1]), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (255,99,71), 2)
p1 = multiprocessing.Process(target = barcode)
cv2.namedWindow("Result", cv2.WINDOW_NORMAL)
vid = cv2.VideoCapture(0)
if __name__ == '__main__':
while(True):
ret, frame = vid.read()
if frame is not None:
p1.start()
cv2.imshow('Result',frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
vid.release()
vid.destroyAllWindows()
and the error is
AssertionError Traceback (most recent call last)
<ipython-input-1-df50d7c70cda> in <module>
27 ret, frame = vid.read()
28 if frame is not None:
---> 29 p1.start()
30 cv2.imshow('Result',frame)
31 if cv2.waitKey(1) & 0xFF == ord('q'):
C:\ProgramData\Anaconda3\lib\multiprocessing\process.py in start(self)
113 '''
114 self._check_closed()
--> 115 assert self._popen is None, 'cannot start a process twice'
116 assert self._parent_pid == os.getpid(), \
117 'can only start a process object created by current process'
AssertionError: cannot start a process twice
Try not to create processes inside loops. The best way to use processes is to create n processes outside and then, with the help of Queues, access and push data.
In the following code, I created 5 processes which would run infinitely and try to fetch data from inQ queue.
Then do all the processing that you were doing.
After that, I'm pushing it to outQ queue, which we'll use later to show the results.
In the main, I am simply reading the data from the opencv vid object and the pushing to the inQ which our Processes will use to fetch frame.
Next, I'm just fetching the results. This way appears better to me as we don't have to create processes in every iteration as well as we have multiple processes ready to process the data at all times.
You can also set the buffer limit for the queue if you want. Also, with live streams, try to have a skipFrame parameter to skip a few frames. That would boost up the fps.
import cv2
import numpy as np
from pyzbar.pyzbar import decode
from pyzbar.pyzbar import ZBarSymbol
import time
from multiprocessing import Process, Queue
inQ = Queue()
outQ = Queue()
def barcode():
global inQ
global outQ
try:
print("Solving..")
frame = inQ.get()
for barcode in decode(frame, symbols=[ZBarSymbol.CODE128]):
myData = barcode.data.decode('utf-8')
pts = np.array([barcode.polygon],np.int32)
pts = pts.reshape((-1,1,2))
cv2.polylines(frame, [pts], True, (255,0,255),5)
pts2 = barcode.rect
akash = []
akash.append(myData)
cv2.putText(frame, myData, (pts2[0], pts2[1]), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (255,99,71), 2)
outQ.put(frame)
except Exception as e:
print(e)
for _ in range(5): # configure yourself
Process(target = barcode).start()
cv2.namedWindow("Result", cv2.WINDOW_NORMAL)
if __name__ == '__main__':
print("Inside main")
vid = cv2.VideoCapture(0)
while vid.isOpened():
print("While...")
ret, frame = vid.read()
if ret:
try:
inQ.put(frame)
except Exception as e:
print(e)
try:
output = outQ.get()
cv2.imshow("Result", output)
except Exception as e:
print(e)
vid.release()
vid.destroyAllWindows()
So I want to write a code in python where I extract photos as frames from an rtsp camera (live streaming).
But I would want these photos to be stored with timestamp and date as well which I think I have done. My only challenge is that I want these photos to automatically save to my local computer every minute and ends after 24 hours.
How do I go about this?
This is my current code
imagesFolder = "C:/Users/<user>/documents"
cap = cv2.VideoCapture("rtsp://username:password#cameraIP/axis-media/media.amp")
frameRate = cap.get(5) #frame rate
count = 0
while cap.isOpened():
frameId = cap.get(1) # current frame number
ret, frame = cap.read()
if (ret != True):
break
if (frameId % math.floor(frameRate) == 0):
filename = imagesFolder + "/image_" + str(datetime.now().strftime("%d-%m-%Y_%I-%M-%S_%p")) + ".jpg"
cv2.imwrite(filename, frame)
cap.release()
print ("Done!")
cv2.destroyAllWindows()
You may simply wait 60 seconds between frame capturing, and break the loop after 24*60 cycles.
I tried testing my code using public RTSP stream, but I am getting black frames, so I can't test my code.
Here is the code:
import cv2
import time
from datetime import datetime
import getpass
#imagesFolder = "C:/Users/<user>/documents"
# https://stackoverflow.com/questions/842059/is-there-a-portable-way-to-get-the-current-username-in-python
imagesFolder = "C:/Users/" + getpass.getuser() + "/documents"
#cap = cv2.VideoCapture("rtsp://username:password#cameraIP/axis-media/media.amp")
# Use public RTSP Streaming for testing, but I am getting black frames!
cap = cv2.VideoCapture("rtsp://wowzaec2demo.streamlock.net/vod/mp4:BigBuckBunny_115k.mov")
frameRate = cap.get(5) #frame rate
count = 0
while cap.isOpened():
start_time = time.time()
frameId = cap.get(1) # current frame number
ret, frame = cap.read()
if (ret != True):
break
filename = imagesFolder + "/image_" + str(datetime.now().strftime("%d-%m-%Y_%I-%M-%S_%p")) + ".jpg"
cv2.imwrite(filename, frame)
# Show frame for testing
cv2.imshow('frame', frame)
cv2.waitKey(1)
count += 1
#Break loop after 24*60 minus
if count > 24*60:
break
elapsed_time = time.time() - start_time
# Wait for 60 seconds (subtract elapsed_time in order to be accurate).
time.sleep(60 - elapsed_time)
cap.release()
print ("Done!")
cv2.destroyAllWindows()
Update:
The code sample above is not working - the first frame is repeated every minute.
Suggested solution:
Grab all the video frame, and save a frame every minute.
The one minute time delta is going to be accurate up to 0.2 seconds in case of 5Hz video.
Use separate timer for measuring 24 hours.
Here is the updated code (reading from public RTSP):
import cv2
import time
from datetime import datetime
import getpass
imagesFolder = "C:/Users/" + getpass.getuser() + "/documents"
#cap = cv2.VideoCapture("rtsp://username:password#cameraIP/axis-media/media.amp")
# Use public RTSP Streaming for testing:
cap = cv2.VideoCapture("rtsp://wowzaec2demo.streamlock.net/vod/mp4:BigBuckBunny_115k.mov")
#cap = cv2.VideoCapture("test2.mp4")
frameRate = cap.get(5) #frame rate
cur_time = time.time() # Get current time
# start_time_24h measures 24 hours
start_time_24h = cur_time
# start_time_1min measures 1 minute
start_time_1min = cur_time - 59 # Subtract 59 seconds for start grabbing first frame after one second (instead of waiting a minute for the first frame).
while cap.isOpened():
frameId = cap.get(1) # current frame number
ret, frame = cap.read()
if (ret != True):
break
cur_time = time.time() # Get current time
elapsed_time_1min = cur_time - start_time_1min # Time elapsed from previous image saving.
# If 60 seconds were passed, reset timer, and store image.
if elapsed_time_1min >= 60:
# Reset the timer that is used for measuring 60 seconds
start_time_1min = cur_time
filename = imagesFolder + "/image_" + str(datetime.now().strftime("%d-%m-%Y_%I-%M-%S_%p")) + ".jpg"
#filename = "image_" + str(datetime.now().strftime("%d-%m-%Y_%I-%M-%S_%p")) + ".jpg"
cv2.imwrite(filename, frame)
# Show frame for testing
cv2.imshow('frame', frame)
cv2.waitKey(1)
elapsed_time_24h = time.time() - start_time_24h
#Break loop after 24*60*60 seconds
if elapsed_time_24h > 24*60*60:
break
#time.sleep(60 - elapsed_time) # Sleeping is a bad idea - we need to grab all the frames.
cap.release()
print ("Done!")
cv2.destroyAllWindows()
Now the images from the public RTSP look OK:
Update:
You may try capturing the video stream using FFmpeg (instead of OpenCV).
Read the following blog: Read and Write Video Frames in Python Using FFMPEG
In case you are using Windows OS, download the latest stable 64-bit static version from here (currently 4.2.2).
Extract the zip file, and place ffmpeg.exe in the same folder as your Python script.
Here is the code (capturing using FFmpeg as sub-process and stdout as a PIPE):
import cv2
import time
from datetime import datetime
import getpass
import numpy as np
import subprocess as sp
imagesFolder = "C:/Users/" + getpass.getuser() + "/documents"
#cap = cv2.VideoCapture("rtsp://username:password#cameraIP/axis-media/media.amp")
# Use public RTSP Streaming for testing:
in_stream = "rtsp://wowzaec2demo.streamlock.net/vod/mp4:BigBuckBunny_115k.mov"
cap = cv2.VideoCapture(in_stream)
#cap = cv2.VideoCapture("test2.mp4")
frameRate = cap.get(5) #frame rate
# Get resolution of input video
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
# Release VideoCapture - it was used just for getting video resolution
cap.release()
#in_stream = "rtsp://xxx.xxx.xxx.xxx:xxx/Streaming/Channels/101?transportmode=multicast",
#Use public RTSP Streaming for testing
in_stream = "rtsp://wowzaec2demo.streamlock.net/vod/mp4:BigBuckBunny_115k.mov"
# http://zulko.github.io/blog/2013/09/27/read-and-write-video-frames-in-python-using-ffmpeg/
FFMPEG_BIN = "ffmpeg" # on Linux ans Mac OS (also works on Windows when ffmpeg.exe is in the path)
#FFMPEG_BIN = "ffmpeg.exe" # on Windows
command = [ FFMPEG_BIN,
'-i', in_stream,
'-f', 'image2pipe',
'-pix_fmt', 'bgr24',
'-vcodec', 'rawvideo', '-an', '-']
# Open sub-process that gets in_stream as input and uses stdout as an output PIPE.
pipe = sp.Popen(command, stdout=sp.PIPE, bufsize=10**8)
cur_time = time.time() # Get current time
# start_time_24h measures 24 hours
start_time_24h = cur_time
# start_time_1min measures 1 minute
start_time_1min = cur_time - 30 # Subtract 30 seconds for start grabbing first frame after 30 seconds (instead of waiting a minute for the first frame).
while True:
# read width*height*3 bytes from stdout (= 1 frame)
raw_frame = pipe.stdout.read(width*height*3)
if len(raw_frame) != (width*height*3):
print('Error reading frame!!!') # Break the loop in case of an error (too few bytes were read).
break
cur_time = time.time() # Get current time
elapsed_time_1min = cur_time - start_time_1min # Time elapsed from previous image saving.
# If 60 seconds were passed, reset timer, and store image.
if elapsed_time_1min >= 60:
# Reset the timer that is used for measuring 60 seconds
start_time_1min = cur_time
# Transform the byte read into a numpy array, and reshape it to video frame dimensions
frame = np.fromstring(raw_frame, np.uint8)
frame = frame.reshape((height, width, 3))
filename = imagesFolder + "/image_" + str(datetime.now().strftime("%d-%m-%Y_%I-%M-%S_%p")) + ".jpg"
#filename = "image_" + str(datetime.now().strftime("%d-%m-%Y_%I-%M-%S_%p")) + ".jpg"
cv2.imwrite(filename, frame)
# Show frame for testing
cv2.imshow('frame', frame)
cv2.waitKey(1)
elapsed_time_24h = time.time() - start_time_24h
#Break loop after 24*60*60 seconds
if elapsed_time_24h > 24*60*60:
break
#time.sleep(60 - elapsed_time) # Sleeping is a bad idea - we need to grab all the frames.
print ("Done!")
pipe.kill() # Kill the sub-process after 24 hours
cv2.destroyAllWindows()
I tried using the apscheduler API but to no avail. Maybe someone can look at this differently and make it work.
import cv2
import math
import datetime
from datetime import datetime
from apscheduler.schedulers.blocking import BlockingScheduler
imagesFolder = "C:/Users/" + getpass.getuser() + "/documents"
cap = cv2.VideoCapture("rtsp://username:password#CameraIP/axis-media/media.amp")
frameRate = cap.get(5) #frame rate
count = 0
def some_job():
while cap.isOpened():
frameId = cap.get(1)
ret, frame = cap.read()
if (ret != True):
break
if (frameId % math.floor(frameRate) == 0):
filename = imagesFolder + "/image_" + str(datetime.now().strftime("%d-%m-%Y_%I-%M-%S_%p")) + ".jpg"
cv2.imwrite(filename, frame)
scheduler = BlockingScheduler()
scheduler.add_job(some_job, 'interval', seconds=60, start_date='2020-04-07 16:23:00', end_date='2020-04-08 16:23:00')
scheduler.start()
cap.release()
print ("Done!")
# Closes all the frames
cv2.destroyAllWindows()
This works somewhat better than other solutions. The only challenge here is that the photos stop saving after the first 3 minutes (first 3 photos which are taken in seconds but saved later during each minute) have been saved. The solution now is to ensure that it saves every minute up to 24 hours before it stops.
import cv2
import time
import getpass
import numpy as np
import subprocess as sp
from datetime import datetime
imagesFolder = "C:/Users/" + getpass.getuser() + "/documents"
# RTSP Streaming:
in_stream = "rtsp://username:password#cameraIP/axis-media/media.amp"
cap = cv2.VideoCapture(in_stream)
frameRate = cap.get(5) #frame rate
# Get resolution of input video
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
# Release VideoCapture - it was used just for getting video resolution
cap.release()
in_stream = "rtsp://username:password#cameraIP/axis-media/media.amp"
FFMPEG_BIN = "ffmpeg.exe" # on Windows
# Suspecting camera supports TCP protocol hence added: '-rtsp_transport', 'tcp'
command = [ FFMPEG_BIN,
'-rtsp_transport', 'tcp',
'-i', in_stream,
'-f', 'image2pipe',
'-pix_fmt', 'bgr24',
'-vcodec', 'rawvideo', '-an', '-']
# Open sub-process that gets in_stream as input and uses stdout as an output PIPE.
pipe = sp.Popen(command, stdout=sp.PIPE, bufsize=10**8)
cur_time = time.time() # Get current time
# start_time_24h measures 24 hours
start_time_24h = cur_time
# start_time_1min measures 1 minute
start_time_1min = cur_time - 30 # Subtract 30 seconds for start grabbing first frame after 30 seconds (instead of waiting a minute for the first frame).
while True:
# read width*height*3 bytes from stdout (= 1 frame)
raw_frame = pipe.stdout.read(width*height*3)
if len(raw_frame) != (width*height*3):
print('Error reading frame!!!') # Break the loop in case of an error (too few bytes were read).
break
cur_time = time.time() # Get current time
elapsed_time_1min = cur_time - start_time_1min # Time elapsed from previous image saving.
# If 60 seconds were passed, reset timer, and store image.
if elapsed_time_1min >= 60:
# Reset the timer that is used for measuring 60 seconds
start_time_1min = cur_time
# Transform the byte read into a numpy array, and reshape it to video frame dimensions
frame = np.fromstring(raw_frame, np.uint8)
frame = frame.reshape((height, width, 3))
filename = imagesFolder + "/image_" + str(datetime.now().strftime("%d-%m-%Y_%I-%M-%S_%p")) + ".jpg"
#filename = "image_" + str(datetime.now().strftime("%d-%m-%Y_%I-%M-%S_%p")) + ".jpg"
cv2.imwrite(filename, frame)
# Show frame for testing
cv2.imshow('frame', frame)
cv2.waitKey(1)
elapsed_time_24h = time.time() - start_time_24h
#Break loop after 24*60*60 seconds
if elapsed_time_24h > 24*60*60:
break
#time.sleep(60 - elapsed_time) # Sleeping is a bad idea - we need to grab all the frames.
print ("Done!")
pipe.kill() # Kill the sub-process after 24 hours
cv2.destroyAllWindows()
I have a RaspberryPi, a few IP camera's and I would like to get a fresh image from all these camera's every 5 minutes. I have the following script, which open the RTSP feed af grabs images ALL THE TIME, talking 10-25 every second it runs.
Is there a way to open the videofeed an take only 1 image?
import cv2
import time
cap = cv2.VideoCapture('rtsp://192.168.86.81:554/11') # it can be rtsp or http $
ret, frame = cap.read()
while ret:
cv2.imwrite('images/{}.jpg'.format(time.time()), frame)
ret, frame = cap.read()
This solved my problem. I removed time as I do not need this. I will let the aboce code stand in case anybody would want to play around with this
import cv2
cap = cv2.VideoCapture('rtsp://192.168.86.81:554/11') # it can be rtsp or http stream
ret, frame = cap.read()
if cap.isOpened():
_,frame = cap.read()
cap.release() #releasing camera immediately after capturing picture
if _ and frame is not None:
cv2.imwrite('images/latest.jpg', frame)
import cv2
import time
from datetime import datetime
import getpass
#imagesFolder = "C:/Users/<user>/documents"
# https://stackoverflow.com/questions/842059/is-there-a-portable-way-to-get-the-current-username-in-python
imagesFolder = "C:/Users/" + getpass.getuser() + "/documents"
#cap = cv2.VideoCapture("rtsp://192.168.86.81:554/11")
# Use public RTSP Streaming for testing, but I am getting black frames!
cap = cv2.VideoCapture("rtsp://192.168.86.81:554/11")
frameRate = cap.get(5) #frame rate
count = 0
while cap.isOpened():
start_time = time.time()
frameId = cap.get(1) # current frame number
ret, frame = cap.read()
if (ret != True):
break
filename = imagesFolder + "/image_" + str(datetime.now().strftime("%d-%m-%Y_%I-%M-%S_%p")) + ".jpg"
cv2.imwrite(filename, frame)
# Show frame for testing
cv2.imshow('frame', frame)
cv2.waitKey(1)
count += 1
#Break loop after 5*60 minus
if count > 5*60:
break
elapsed_time = time.time() - start_time
# Wait for 60 seconds (subtract elapsed_time in order to be accurate).
time.sleep(60 - elapsed_time)
cap.release()
print ("Done!")
cv2.destroyAllWindows()
I'm using OpenCV and Python to take images. However currently I can only take one picture at a time. I would like to have OpenCV to take multiple pictures. This is my current code.
import cv2.cv as cv
import time
cv.NamedWindow("camera", 1)
capture = cv.CaptureFromCAM(0)
while True:
img = cv.QueryFrame(capture)
cv.ShowImage("camera", img)
cv.SaveImage('pic.jpg', img)
if cv.WaitKey(10) == 27:
break
Your code overwrite a file. Save to different file each time.
For example:
import cv2.cv as cv
import time
cv.NamedWindow("camera", 1)
capture = cv.CaptureFromCAM(0)
i = 0
while True:
img = cv.QueryFrame(capture)
cv.ShowImage("camera", img)
cv.SaveImage('pic{:>05}.jpg'.format(i), img)
if cv.WaitKey(10) == 27:
break
i += 1
A minimal example of what you'd like to do, based on the c++ binded interface.
import cv2
cpt = 0
maxFrames = 5 # if you want 5 frames only.
try:
vidStream = cv2.VideoCapture(0) # index of your camera
except:
print "problem opening input stream"
sys.exit(1)
while cpt < maxFrames:
ret, frame = vidStream.read() # read frame and return code.
if not ret: # if return code is bad, abort.
sys.exit(0)
cv2.imshow("test window", frame) # show image in window
cv2.imwrite("image%04i.jpg" %cpt, frame)
cpt += 1
A full example of script, able to read from a camera index, or a file. Includes some failsafes and some information about read device.
usage: record.py [source] [target folder]
#!/usr/bin/env python
import cv2
import sys
import os
cpt = 0
maxFrames = 30
try:
targetDir = sys.argv[2]
except:
targetDir = "" # if no argument, then use current directory
try: # read input. eval if to transform video index to int
vidStream = cv2.VideoCapture(eval(sys.argv[1]))
except:
print "problem opening input stream"
sys.exit(1)
if not vidStream.isOpened():
print "capture stream not open"
sys.exit(1)
# informations in case the input is a video file.
nFrames = vidStream.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT)
print "frame number: %s" %nFrames
fps = vidStream.get(cv2.cv.CV_CAP_PROP_FPS)
print "FPS value: %s" %fps
# note that we could use frame number here, or "while 1"
# so we could read from a live written file or capture devide.
while cpt < maxFrames:
ret, frame = vidStream.read() # read frame and return code.
if not ret:
print "end of stream"
sys.exit(0)
cv2.imshow("test window", frame) # show image in window
cv2.imwrite(os.path.join(targetDir, "image_%04i.jpg" %cpt), frame)
cpt += 1
keyPressed = cv2.waitKey(1) # time to wait between frames
if keyPressed != -1: # if user pressed a key, stop recording.
sys.exit(0)
change the name of the image to be saved to " [image name] [a number which increase after every loop] "
By doing this your image will be stored with a new name after every loop.. otherwise all the images will overwrite the same name !
import cv2.cv as cv
import time
cv.NamedWindow("camera", 1)
capture = cv.CaptureFromCAM(0)
num = 0
while True:
img = cv.QueryFrame(capture)
cv.ShowImage("camera", img)
cv.SaveImage('pic'+str(num)+'.jpg', img)
if cv.WaitKey(10) == 27:
break
num += 1
now your images will be saved as pic0.jpg, pic1.jpg, pic2.jpg and so on..
i think this wil helpful...
import cv2
vid = cv2.VideoCapture("video.mp4")
d = 0
ret, frame = vid.read()
while ret:
ret, frame = vid.read()
filename = "images/file_%d.jpg"%d
cv2.imwrite(filename, frame)
d+=1
this will save every frame with different name.