how to convert videos into frames with openCV python? [duplicate] - python

So I've followed this tutorial but it doesn't seem to do anything. Simply nothing. It waits a few seconds and closes the program. What is wrong with this code?
import cv2
vidcap = cv2.VideoCapture('Compton.mp4')
success,image = vidcap.read()
count = 0
success = True
while success:
success,image = vidcap.read()
cv2.imwrite("frame%d.jpg" % count, image) # save frame as JPEG file
if cv2.waitKey(10) == 27: # exit if Escape is hit
break
count += 1
Also, in the comments it says that this limits the frames to 1000? Why?
EDIT:
I tried doing success = True first but that didn't help. It only created one image that was 0 bytes.

From here download this video so we have the same video file for the test. Make sure to have that mp4 file in the same directory of your python code. Then also make sure to run the python interpreter from the same directory.
Then modify the code, ditch waitKey that's wasting time also without a window it cannot capture the keyboard events. Also we print the success value to make sure it's reading the frames successfully.
import cv2
vidcap = cv2.VideoCapture('big_buck_bunny_720p_5mb.mp4')
success,image = vidcap.read()
count = 0
while success:
cv2.imwrite("frame%d.jpg" % count, image) # save frame as JPEG file
success,image = vidcap.read()
print('Read a new frame: ', success)
count += 1
How does that go?

To extend on this question (& answer by #user2700065) for a slightly different cases, if anyone does not want to extract every frame but wants to extract frame every one second. So a 1-minute video will give 60 frames(images).
import sys
import argparse
import cv2
print(cv2.__version__)
def extractImages(pathIn, pathOut):
count = 0
vidcap = cv2.VideoCapture(pathIn)
success,image = vidcap.read()
success = True
while success:
vidcap.set(cv2.CAP_PROP_POS_MSEC,(count*1000)) # added this line
success,image = vidcap.read()
print ('Read a new frame: ', success)
cv2.imwrite( pathOut + "\\frame%d.jpg" % count, image) # save frame as JPEG file
count = count + 1
if __name__=="__main__":
a = argparse.ArgumentParser()
a.add_argument("--pathIn", help="path to video")
a.add_argument("--pathOut", help="path to images")
args = a.parse_args()
print(args)
extractImages(args.pathIn, args.pathOut)

This is Function which will convert most of the video formats to number of frames there are in the video. It works on Python3 with OpenCV 3+
import cv2
import time
import os
def video_to_frames(input_loc, output_loc):
"""Function to extract frames from input video file
and save them as separate frames in an output directory.
Args:
input_loc: Input video file.
output_loc: Output directory to save the frames.
Returns:
None
"""
try:
os.mkdir(output_loc)
except OSError:
pass
# Log the time
time_start = time.time()
# Start capturing the feed
cap = cv2.VideoCapture(input_loc)
# Find the number of frames
video_length = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) - 1
print ("Number of frames: ", video_length)
count = 0
print ("Converting video..\n")
# Start converting the video
while cap.isOpened():
# Extract the frame
ret, frame = cap.read()
if not ret:
continue
# Write the results back to output location.
cv2.imwrite(output_loc + "/%#05d.jpg" % (count+1), frame)
count = count + 1
# If there are no more frames left
if (count > (video_length-1)):
# Log the time again
time_end = time.time()
# Release the feed
cap.release()
# Print stats
print ("Done extracting frames.\n%d frames extracted" % count)
print ("It took %d seconds forconversion." % (time_end-time_start))
break
if __name__=="__main__":
input_loc = '/path/to/video/00009.MTS'
output_loc = '/path/to/output/frames/'
video_to_frames(input_loc, output_loc)
It supports .mts and normal files like .mp4 and .avi. Tried and Tested on .mts files. Works like a Charm.

This is a tweak from previous answer for python 3.x from #GShocked, I would post it to the comment, but dont have enough reputation
import sys
import argparse
import cv2
print(cv2.__version__)
def extractImages(pathIn, pathOut):
vidcap = cv2.VideoCapture(pathIn)
success,image = vidcap.read()
count = 0
success = True
while success:
success,image = vidcap.read()
print ('Read a new frame: ', success)
cv2.imwrite( pathOut + "\\frame%d.jpg" % count, image) # save frame as JPEG file
count += 1
if __name__=="__main__":
print("aba")
a = argparse.ArgumentParser()
a.add_argument("--pathIn", help="path to video")
a.add_argument("--pathOut", help="path to images")
args = a.parse_args()
print(args)
extractImages(args.pathIn, args.pathOut)

The previous answers have lost the first frame. And it will be nice to store the images in a folder.
# create a folder to store extracted images
import os
folder = 'test'
os.mkdir(folder)
# use opencv to do the job
import cv2
print(cv2.__version__) # my version is 3.1.0
vidcap = cv2.VideoCapture('test_video.mp4')
count = 0
while True:
success,image = vidcap.read()
if not success:
break
cv2.imwrite(os.path.join(folder,"frame{:d}.jpg".format(count)), image) # save frame as JPEG file
count += 1
print("{} images are extacted in {}.".format(count,folder))
By the way, you can check the frame rate by VLC. Go to windows -> media information -> codec details

After a lot of research on how to convert frames to video I have created this function hope this helps. We require opencv for this:
import cv2
import numpy as np
import os
def frames_to_video(inputpath,outputpath,fps):
image_array = []
files = [f for f in os.listdir(inputpath) if isfile(join(inputpath, f))]
files.sort(key = lambda x: int(x[5:-4]))
for i in range(len(files)):
img = cv2.imread(inputpath + files[i])
size = (img.shape[1],img.shape[0])
img = cv2.resize(img,size)
image_array.append(img)
fourcc = cv2.VideoWriter_fourcc('D', 'I', 'V', 'X')
out = cv2.VideoWriter(outputpath,fourcc, fps, size)
for i in range(len(image_array)):
out.write(image_array[i])
out.release()
inputpath = 'folder path'
outpath = 'video file path/video.mp4'
fps = 29
frames_to_video(inputpath,outpath,fps)
change the value of fps(frames per second),input folder path and output folder path according to your own local locations

This code extract frames from the video and save the frames in .jpg formate
import cv2
import numpy as np
import os
# set video file path of input video with name and extension
vid = cv2.VideoCapture('VideoPath')
if not os.path.exists('images'):
os.makedirs('images')
#for frame identity
index = 0
while(True):
# Extract images
ret, frame = vid.read()
# end of frames
if not ret:
break
# Saves images
name = './images/frame' + str(index) + '.jpg'
print ('Creating...' + name)
cv2.imwrite(name, frame)
# next frame
index += 1

In 2022 you also have the option to use ImageIO to do this, which IMHO is much more hasslefree and readable.
import imageio.v3 as iio
for idx, frame in enumerate(iio.imiter("imageio:cockatoo.mp4")):
iio.imwrite(f"extracted_images/frame{idx:03d}.jpg", frame)
Sidenote 1: "imageio:cockatoo.mp4" is a standard image provided by ImageIO for testing and demonstration purposes. You can simply replace it with "path/to/your/video.mp4".
Sidenote 2: You will have to install one of ImageIO's optional dependencies to read video data, which can be done via pip install imageio-ffmpeg or pip install av.
You can time this against OpenCV and you will find that, there isn't that much to gain from OpenCV on this front either:
Read-Only Timings
=================
OpenCV: 0.453
imageio_ffmpeg: 0.765
imageio_pyav: 0.272
Read + Write Timings
====================
OpenCV: 3.237
imageio_ffmpeg: 1.597
imageio_pyav: 1.506
By default, OpenCV and ImageIO+av are about equally fast when reading. Both direct bind into the FFmpeg libraries under the hood so this is rather unsurprising. However, ImageIO allows you to tweak FFmpeg's default threadding model (thread_type="FRAME") which is much faster when bulk reading.
More importantly, ImageIO is much faster at writing JPEG compared to OpenCV. This is because pillow is faster than OpenCV here which ImageIO capitalizes on. Writing images dominates runtime for this scenario, so you end up with an overall 2x improvement when using ImageIO instead of OpenCV.
Here is the code for reference:
import imageio.v3 as iio
import cv2
import timeit
from pathlib import Path
# create a common local file for benchmarking
video_file = "shared_video.mp4"
if not Path(video_file).exists():
frames = iio.imread("imageio:cockatoo.mp4")
meta = iio.immeta("imageio:cockatoo.mp4", exclude_applied=False)
iio.imwrite(video_file, frames, fps=meta["fps"])
repeats = 10
def read_cv2():
vidcap = cv2.VideoCapture(video_file)
success, image = vidcap.read()
idx = 0
while success:
cv2.imwrite(f"extracted_images/frame{idx:03d}.jpg", image)
success, image = vidcap.read()
idx += 1
def read_imageio_ffmpeg():
for idx, frame in enumerate(iio.imiter(video_file, plugin="FFMPEG")):
iio.imwrite(f"extracted_images/frame{idx:03d}.jpg", frame)
def read_imageio_pyav():
for idx, frame in enumerate(
iio.imiter(video_file, plugin="pyav", format="rgb24", thread_type="FRAME")
):
iio.imwrite(f"extracted_images/frame{idx:03d}.jpg", frame)
time_cv2 = (
timeit.timeit("read_cv2()", setup="from __main__ import read_cv2", number=repeats)
/ repeats
)
time_imageio_ffmpeg = (
timeit.timeit(
"read_imageio_ffmpeg()",
setup="from __main__ import read_imageio_ffmpeg",
number=repeats,
)
/ repeats
)
time_imageio_pyav = (
timeit.timeit(
"read_imageio_pyav()",
setup="from __main__ import read_imageio_pyav",
number=repeats,
)
/ repeats
)
print(
f"""
Timings
=======
OpenCV: {time_cv2:<3.3f}
imageio_ffmpeg: {time_imageio_ffmpeg:<3.3f}
imageio_pyav: {time_imageio_pyav:<3.3f}
"""
)

Following script will extract frames every half a second of all videos in folder. (Works on python 3.7)
import cv2
import os
listing = os.listdir(r'D:/Images/AllVideos')
count=1
for vid in listing:
vid = r"D:/Images/AllVideos/"+vid
vidcap = cv2.VideoCapture(vid)
def getFrame(sec):
vidcap.set(cv2.CAP_PROP_POS_MSEC,sec*1000)
hasFrames,image = vidcap.read()
if hasFrames:
cv2.imwrite("D:/Images/Frames/image"+str(count)+".jpg", image) # Save frame as JPG file
return hasFrames
sec = 0
frameRate = 0.5 # Change this number to 1 for each 1 second
success = getFrame(sec)
while success:
count = count + 1
sec = sec + frameRate
sec = round(sec, 2)
success = getFrame(sec)

This function extracts images from video with 1 fps, IN ADDITION it identifies the last frame and stops reading also:
import cv2
import numpy as np
def extract_image_one_fps(video_source_path):
vidcap = cv2.VideoCapture(video_source_path)
count = 0
success = True
while success:
vidcap.set(cv2.CAP_PROP_POS_MSEC,(count*1000))
success,image = vidcap.read()
## Stop when last frame is identified
image_last = cv2.imread("frame{}.png".format(count-1))
if np.array_equal(image,image_last):
break
cv2.imwrite("frame%d.png" % count, image) # save frame as PNG file
print '{}.sec reading a new frame: {} '.format(count,success)
count += 1

I am using Python via Anaconda's Spyder software. Using the original code listed in the question of this thread by #Gshocked, the code does not work (the python won't read the mp4 file). So I downloaded OpenCV 3.2 and copied "opencv_ffmpeg320.dll" and "opencv_ffmpeg320_64.dll" from the "bin" folder. I pasted both of these dll files to Anaconda's "Dlls" folder.
Anaconda also has a "pckgs" folder...I copied and pasted the entire "OpenCV 3.2" folder that I downloaded to the Anaconda "pckgs" folder.
Finally, Anaconda has a "Library" folder which has a "bin" subfolder. I pasted the "opencv_ffmpeg320.dll" and "opencv_ffmpeg320_64.dll" files to that folder.
After closing and restarting Spyder, the code worked. I'm not sure which of the three methods worked, and I'm too lazy to go back and figure it out. But it works so, cheers!

i might be late here but you can use this pip package to quickly generate images from videos. You can also get images using specific fps.
pip install videoToImages
then type the following command in terminal
videoToimages --videoFolder [pathToVideosFolder]
Example: videoToimages --videoFolder "c:/videos"
for specific output fps , set --fps 10 to any required value. --fps 1 means one image per one second of the video.
Full commands:
videoToimages --videoFolder "c:/videos"
videoToimages --videoFolder "c:/videos" --fps 10 --img_size (512, 512)

This code is simple and guarantees reliable execution.
# path of video file
video_path = "path/to/video.mp4"
# Open video file
video = cv2.VideoCapture(video_path)
# number of frames in video
frame_count = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
# Convert frame to image and save to file
for i in range(frame_count):
ret, frame = video.read()
if ret:
image_path = f"path/to/image_{i}.jpg"
cv2.imwrite(image_path, frame)
# Close video file
video.release()

There are several reasons to extract slides/frames from a video presentation, especially in the case of education or conference related videos. It allows you to access the study notes without watching the whole video.
I have faced this issue several times, so I decided to create a solution for it myself using python. I have made the code open-source, you can easily set up this tool and run it in few simple steps.
Refer to this for youtube video tutorial.
Steps on how to use this tool.
Clone this project video2pdfslides
Set up your environment by running "pip install -r requirements.txt"
Copy your video path
Run "python video2pdfslides.py <video_path>"
Boom! the pdf slides will be available in in output folder
Make notes and enjoy!

Related

Concatenate a video, image and audio using ffmpeg

I am trying to concatenate a group of images with associated audio with a video clip at the start and front of the video. Whenever I concatenate the image with the associated audio it dosen't playback correctly in VLC media player and only displays the image for a frame before cutting to black and continually playing audio. I came across this github issue: https://github.com/kkroening/ffmpeg-python/issues/274 where the accepted solution was the one I implemented but one of the comments mentioned this issue of incorrect playback and error on youtube.
'''
Generates a clip from an image and a wav file, helper function for export_video
'''
def generate_clip(img):
transition_cond = os.path.exists("static/transitions/" + img + ".mp4")
chart_path = os.path.exists("charts/" + img + ".png")
if transition_cond:
clip = ffmpeg.input("static/transitions/" + img + ".mp4")
elif chart_path:
clip = ffmpeg.input("charts/" + img + ".png")
else:
clip = ffmpeg.input("static/transitions/Transition.jpg")
audio_clip = ffmpeg.input("audio/" + img + ".wav")
clip = ffmpeg.concat(clip, audio_clip, v=1, a=1)
clip = ffmpeg.filter(clip, "setdar","16/9")
return clip
'''
Combines the charts from charts/ and the audio from audio/ to generate one final video that will be uploaded to Youtube
'''
def export_video(CHARTS):
clips = []
intro = generate_clip("Intro")
clips.append(intro)
for key in CHARTS.keys():
value = CHARTS.get(key)
value.insert(0, key)
subclip = []
for img in value:
subclip.append(generate_clip(img))
concat_clip = ffmpeg.concat(*subclip)
clips.append(concat_clip)
outro = generate_clip("Outro")
clips.append(outro)
concat_clip = ffmpeg.concat(*clips)
concat_clip.output("export/export.mp4").run(overwrite_output=True)
It is unfortunate concat filter does not offer the shortest option like overlay. Anyway, the issue here is that image2 demuxer uses 25 fps by default, so a video stream with one image only lasts for 1/25 seconds long. There are a several ways to address this, but you first need to get the duration of the paired audio files. To incorporate the duration information to the ffmpeg command, you can:
Use tpad filter for each video (in series with setdar) to make the video duration to match the audio. Padded amount should be 1/25 seconds less than the audio duration.
Specify -loop 1 input option so the image will loop (indefinitely) and then specify an additional -t {duration} input option to limit the number of loops. Caution that the video duration may not be exact.
Specify -r {1/duration} so the image will last as long as the audio and use fps filter on each input to the output frame rate.
I'm not familiar with ffmpeg-python so I cannot provide its solution, but if you're interested, I'd be happy to post an equivalent code with my ffmpegio package.
[edit]
ffmpegio Solution
Here is how I'd code the 3rd solution with ffmpegio:
import ffmpegio
def generate_clip(img):
"""
Generates a clip from an image and a wav file,
helper function for export_video
"""
transition_cond = path.exists("static/transitions/" + img + ".mp4")
chart_path = path.exists("charts/" + img + ".png")
if transition_cond:
video_file = "static/transitions/" + img + ".mp4"
elif chart_path:
video_file = "charts/" + img + ".png"
else:
video_file = "static/transitions/Transition.jpg"
audio_file = "audio/" + img + ".wav"
video_opts = {}
if not transition_cond:
# audio_streams_basic() returns audio duration in seconds as Fraction
# set the "framerate" of the video to be the reciprocal
info = ffmpegio.probe.audio_streams_basic(audio_file)
video_opts["r"] = 1 / info[0]["duration"]
return [(video_file, video_opts), (audio_file, None)]
def export_video(CHARTS):
"""
Combines the charts from charts/ and the audio from audio/
to generate one final video that will be uploaded to Youtube
"""
# get all input files (video/audio pairs)
clips = [
generate_clip("Intro"),
*(generate_clip(img) for key, value in CHARTS.items() for img in value),
generate_clip("Outro"),
]
# number of clips
nclips = len(clips)
# filter chains to set DAR and fps of all video streams
vfilters = (f"[{2*n}:v]setdar=16/9,fps=30[v{n}]" for n in range(nclips))
# concatenation filter input: [v0][1:a][v1][3:a][v2][5:a]...
concatfilter = "".join((f"[v{n}][{2*n+1}:a]" for n in range(nclips))) + f"concat=n={nclips}:v=1:a=1[vout][aout]"
# form the full filtergraph
fg = ";".join((*vfilters, concatfilter))
# set output file and options
output = ("export/export.mp4", {"map": ["[vout]", "[aout]"]})
# run ffmpeg
ffmpegio.ffmpegprocess.run(
{
"inputs": [input for pair in clips for input in pair],
"outputs": [output],
"global_options": {"filter_complex": fg},
},
overwrite=True,
)
Since this code does not use the read/write features, ffmpegio-core package suffices:
pip install ffmpegio-core
Make sure that FFmpeg binary can be found by ffmpegio. See the installation doc.
Here are the direct links to the documentations of the functions used:
ffmpegprocess.run
ffmpeg_args dict argument
probe.audio_streams_basic (Ignore the documentation error both duration and start_time are both of Fraction type.
The code has not been fully validated. If you encounter a problem, it might be the easiest to post it on the GitHub Discussions to proceed.

OpenCV (Python) VideoCapture.read() on missing frames

New to python, new to OpenCV, which I'm gonna use for my master-thesis, and already got some problems using the VideoCapture object of OpenCV.
Situation:
I got 2 folders containing corresponding images (taken with RGB and infrared cameras). I want to display them sibe by side in a Window using a while-loop. The problem arises, when there are some images missing from one of the image-sequences (Due to problems while recording or whatever, I don't really know but that should be of no importance). My idea was to use the bool-returnvalue of the .read() function to check wheather there is a frame to be read and if not, replace the image by a black one. This is what I did:
Code:
import cv2
import numpy as np
pathRGB = "Bilder/RGB"
pathIR = "Bilder/IR"
# the paths to the folders containing the images
capRGB = cv2.VideoCapture(pathRGB + "/frame_%06d.jpg")
capIR = cv2.VideoCapture(pathIR + "/frame_%06d.jpg")
# setting up the VideoCapture-elements with the according format
shapeRGB = capRGB.read()[1].shape
shapeIR = capIR.read()[1].shape
# get the shape of the first image in each folder to later create the black
# dummy-image
dtypeRGB = capRGB.read()[1].dtype
dtypeIR = capIR.read()[1].dtype
# get the type of the first image in each folder to later create the black
# dummy-image
if (capRGB.isOpened() is False):
print("Error opening RGB images")
if (capIR.isOpened() is False):
print("Error opening IR images")
cv2.namedWindow("frames", cv2.WINDOW_NORMAL)
while capRGB.isOpened() and capIR.isOpened() is True:
retRGB, imgRGB = capRGB.read()
retIR, imgIR = capIR.read()
# read both images
if retRGB is True and retIR is False:
imgIR = np.zeros(shapeIR, dtype=dtypeIR)
# if there is no IR image, crate a dummy one
if retIR is True and retRGB is False:
imgRGB = np.zeros(shapeRGB, dtype=dtypeRGB)
# if there is no RGB image, crate a dummy one
if retRGB is False and retIR is False:
break
imgCombined = np.hstack((imgRGB, imgIR))
# put both images together
cv2.imshow("frames", imgCombined)
k = cv2.waitKey(1)
if k == ord("q"):
break
capRGB.release()
capIR.release()
cv2.destroyAllWindows()
Problem:
From my understanding, the problem arises as capIR.read() attempts to read a missing image (in my case the 527th) and instead of just returning false/None it attempts to read the same image over and over again. Up to the missing frame, everything works fine, the right "IR" image even turns black but then the videoplayback begins to slow down and while i still can close the window by pressing 'q', spyder IDE freezes and if I wait "too long" i even have to shut it down. Console gives out "[image2 # 000002a7af8f0480] Could not open file : Bilder/IR/frame_000527.jpg" over and over again, so much that i can't scroll to the top.
I guess what I'm asking is: Is there any way to make the .read() function just attempt 1 read and after it fails continue with the next frame?
Best regards and thank you very much in advance!
Simulated for testing with different files and directory names.
Will retrieve the largest frame number from both directories and afterwards iterate over all frame numbers for reading the files from both directories.
import os
import cv2
import re
import glob
image_dir1 = 'test1'
image_dir2 = 'test2'
# retrieve all frames in both directories
frames_cap1 = glob.glob(os.path.join(image_dir1, "frame_*.jpg"))
frames_cap2 = glob.glob(os.path.join(image_dir2, "frame_*.jpg"))
# sort inscending
frames_cap1.sort()
frames_cap2.sort()
# retrieve last frame No for both directories
last_frame_cap1 = frames_cap1[-1]
last_frame_cap2 = frames_cap2[-1]
# extract integer counter as a group
# modify regex to match file name if required
match_cap1 = re.search('frame_(\d+).jpg', last_frame_cap1)
match_cap2 = re.search('frame_(\d+).jpg', last_frame_cap2)
last_frame_no_cap1 = int(match_cap1.group(1))
last_frame_no_cap2 = int(match_cap2.group(1))
# retrieve max frame No
max_frame_no = max(last_frame_no_cap1, last_frame_no_cap2)
for i in range(max_frame_no + 1):
# adapt formatting of frame number to digit count in file name
# here: 6 digits with leading zeros
image_path_cap1 = os.path.join(image_dir1, f"frame_{i:06d}.jpg")
image_path_cap2 = os.path.join(image_dir2, f"frame_{i:06d}.jpg")
if not os.path.isfile(image_path_cap1):
print(f"handle missing file: '{image_path_cap1}'")
# ...
else:
img1 = cv2.imread(image_path_cap1)
# …
if not os.path.isfile(image_path_cap2):
print(f"handle missing file: '{image_path_cap2}'")
# ...
else:
img2 = cv2.imread(image_path_cap2)
# …
# …
Assuming that the images in directory1 have the same names as directory2 images, but we know that some image may not be present in both directories...
import glob,os,cv2
path1 = "folder1/"
path2 = "folder2/"
#change directory to path1
os.chdir(path1)
l1 = glob.glob("*.jpg") #get a list of images names
os.chdir("../") #go one directory up
blackimg = cv2.imread("blackimg.jpg")
for fname in l1:
#check if image1 exists , then read it . otherwise im1 = blackimg
if os.path.isfile(path1+fname):
im1=cv2.imread(path1+fname)
else:
im1=blackimg
#check if image2 exists , then read it . otherwise im2 = blackimg
if os.path.isfile(path2+fname):
im2=cv2.imread(path2+fname)
else:
im2=blackimg
imgCombined = np.hstack((im1, im2))
cv2.imshow("Combined", imgCombined)
print("press any key to continue, q to exit")
k = cv2.waitKey(0)
if k == ord("q"):break
cv2.destroyAllWindows()

What different video split ffmpeg VS opencv?

I used opencv and ffmpeg to do the work of framing the video.
opencv
import cv2
# 영상의 의미지를 연속적으로 캡쳐할 수 있게 하는 class
vidcap = cv2.VideoCapture("D:/godzillakingofmonster/GodzillaKingOfMonsters_clip.mkv")
count = 0
while(vidcap.isOpened()):
# read()는 grab()와 retrieve() 두 함수를 한 함수로 불러옴
# 두 함수를 동시에 불러오는 이유는 프레임이 존재하지 않을 때
# grab() 함수를 이용하여 return false 혹은 NULL 값을 넘겨 주기 때문
ret, image = vidcap.read()
# 캡쳐된 이미지를 저장하는 함수
print("D:/godzillakingofmonster/frame/frame%d.jpg" % count)
cv2.imwrite("D:/godzillakingofmonster/frame/frame%d.jpg" % count, image)
print('Saved frame%d.jpg' % count)
count += 1
vidcap.release()
ffmpeg
ffmpeg -i \"{target_video}\" \"{save_folder_path}/{media_name}_%08d.{exp}\"
I am wondering which of the two methods will give you more accurate results.
When a frame is divided, another frame is saved. Why are there different results?
Which method, ffmpeg or opencv, is more accurate and prints the results closer to the original?
I'm assuming ffmpeg is also storing jpg files. In both methods you don't specify the amount of jpg compression so you're running with default values and they are likely different.
Output to an uncompressed format such as .png to get 100% accurate images in both ffmpeg and opencv.

I need the python script to select all AVI files

The idea is the following, the script needs to recognize all DAV files recursively in the folder, and apply the conversion to JPEG of 5 seconds using OPENCV. So far everything working. However the script is listing the AVI files but converts only 1 file, and not all that were listed.
import os
import cv2
path = 'C:\\Users\\coleta 1\\Desktop\\SNAPSHOT'
files = []
for r, d, f in os.walk(path):
for file in f:
if '.avi' in file:
files.append(os.path.join(r, file))
for f in files:
print(f)
vidcap = cv2.VideoCapture(f)
def Printar(sec):
vidcap.set(cv2.CAP_PROP_POS_MSEC,sec*10000)
hasFrames,image = vidcap.read()
if hasFrames:
cv2.imwrite("image"+str(count)+".jpg", image)
return hasFrames
sec = 0
frameRate = 0.5
count=1
success = Printar(sec)
while success:
count = count + 1
sec = sec + frameRate
sec = round(sec, 2)
success = Printar(sec)
continue
I understand, I tried with two options, but in all or the error was the same: It is not necessary to do _Stuff (f)
enter image description here
The problem with your code is that the lines which use f, vidcap, etc. are outside of the loop, so will not change each time. You need to restructure so that the f, sec, count and vidcap change with each iteration of the loop. You should also try to avoid functions relying too much on global variables, as it becomes harder to know what their values will be at the time the function is executed - generally the values should instead be passed to the function as parameters. A couple of possible ways to reorganize the code would be like this:
...
def do_stuff(path):
print(path)
vidcap = cv2.VideoCapture(path)
frameRate = 0.5
i = 0
while True:
success = printar(frameRate*i, vidcap, i+1)
if not success:
return # add a return value if needed
i += 1
def printar(sec, vidcap, count):
vidcap.set(cv2.CAP_PROP_POS_MSEC,sec*10000)
hasFrames,image = vidcap.read()
if hasFrames:
cv2.imwrite("image"+str(count)+".jpg", image)
return hasFrames
for f in files:
do_stuff(f)
Or possibly simpler and better:
from itertools import count
...
def do_stuff(path):
vidcap = cv2.VideoCapture(path)
frameRate = 0.5
for i in count():
vidcap.set(cv2.CAP_PROP_POS_MSEC, i * frameRate * 10000)
has_frames, image = vidcap.read()
if has_frames:
cv2.imwrite("image{}.jpg".format(i+1), image)
else:
return
for f in files:
print(f)
do_stuff(f)

How to read the mask of an image using opencv in python

I am working on this challenge called Carvana Segmentation in kaggle. The dataset consists of 5088 images, for each image there is a mask. For eg, the below is a single image (.jpg file) and its corresponding mask (.gif file).
I was able to read .jpg files using cv2, but not the .gif files. The syntax i used to read .gif file is
>>> image = cv2.imread('filename.gif',cv2.IMREAD_GRAYSCALE)
When I try to print the image, returns None
>>> print(image) -> None
Can someone suggest any other method, please
imageio allows to read gifs like this:
import imageio
img = imageio.imread('filename.gif')
Following this repo:
https://github.com/asharma327/Read_Gif_OpenCV_Python/blob/master/gif_to_pic.py
you can do the following to read the image
import cv2
import os
def convert_gif_to_frames(gif):
# Initialize the frame number and create empty frame list
frame_num = 0
frame_list = []
# Loop until there are frames left
while True:
try:
# Try to read a frame. Okay is a BOOL if there are frames or not
okay, frame = gif.read()
# Append to empty frame list
frame_list.append(frame)
# Break if there are no other frames to read
if not okay:
break
# Increment value of the frame number by 1
frame_num += 1
except KeyboardInterrupt: # press ^C to quit
break
return frame_list
def output_frames_as_pics(frame_list):
# Reduce the list of frames by half to make the list more managable
frame_list_reduce = frame_list[0::2]
# Get the path of the current working directory
path = os.getcwd()
# Set then name of your folder
'''Replace this name with what you want your folder name to be'''
folder_name = 'Picturebook_Pics_Kiss'
# If the folder does not exist, then make it
if not os.path.exists(path + '/' + folder_name):
os.makedirs(path + '/' + folder_name)
for frames_idx in range(len(frame_list_reduce)):
cv2.imwrite(os.path.join(path + '/' + folder_name, str(frames_idx+1) + '.png'), frame_list_reduce[frames_idx])
return
gif = cv2.VideoCapture('/home/ahmedramzy/Documents/gif/giphy.gif')
# here you can get the frames and work on it
xx = convert_gif_to_frames(gif_kiss)
# here if you want to write it on hard disk using imwrite
output_frames_as_pics(xx)
You can't use imread(), there's no codec for that builtin (still a license problem)[https://answers.opencv.org/question/185929/how-to-read-gif-in-python/]
Since you are interested in python, you may use PIL library as mentioned here.
from PIL import Image
im = Image.open("animation.gif")
# To iterate through the entire gif
try:
while 1:
im.seek(im.tell()+1)
# do something to im
except EOFError:
pass # end of sequence

Categories

Resources