I want to ensure that an online video at example.com/video.mp4 wasn't filed on a smartphone and will have video dimensions similar to 1920 x 1080.
Its easy to get dimensions with the video downloaded,
import cv2
vcap = cv2.VideoCapture('video.mp4') # 0=camera
width = vcap.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH)
height = vcap.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT)
But I don't want to download the mp4 file PLUS I want to quickly find the file size -- which I can't do if I download the file.
I've managed to get dimensions by downloading 100KB piece of video file:
import cv2
import requests
def get_dimensions(url):
r = requests.get(url, stream=True)
with open('output', 'wb') as f:
for chunk in r.iter_content(chunk_size=100000):
if chunk:
f.write(chunk)
break
vcap = cv2.VideoCapture('output')
return int(vcap.get(cv2.CAP_PROP_FRAME_WIDTH)), int(vcap.get(cv2.CAP_PROP_FRAME_HEIGHT))
I've tested it on several files from internet and here what I've got:
>> get_dimensions('http://file-examples.com/wp-content/uploads/2017/04/file_example_MP4_1920_18MG.mp4')
(1920, 1080)
>> get_dimensions('http://file-examples.com/wp-content/uploads/2018/04/file_example_AVI_640_800kB.avi')
(640, 360)
>> get_dimensions('https://www.sample-videos.com/video123/mkv/720/big_buck_bunny_720p_1mb.mkv')
(1280, 720)
Related
I have produced a video using MoviePy and the audio works perfectly fine on PC, but when I try watch it on iPhone it has no audio. I played the uploaded clip on my PC too so it's not the platform where the video is. Also got a friend to listen on iPhone and it also has no audio so not my device. Edit: also tried playing on samsung tablet (android) and it also plays the audio fine.
This is the outputted video files properties:
This is my code:
from moviepy.editor import ImageClip, AudioFileClip, VideoFileClip, CompositeVideoClip
clips = [] # list of clips to be composited
current_duration = 0 # how long the total clip is
bg = VideoFileClip("background.MOV", audio=False) # remove audio from the background
title_audio = AudioFileClip("audio/title.mp3") # title audio
title_clip = ImageClip("screenshots/post.png", duration=title_audio.duration).set_audio(title_audio) # image + audio
clips.append(title_clip.resize(width=bg.w).set_position("center")) # append the resized centred clip
current_duration += title_audio.duration # increase the duration
# loop through clips 1-5 doing the same thing
for comment in range(1, 6):
com_audio = AudioFileClip("audio/voice" + str(comment) + ".mp3")
com_clip = ImageClip("screenshots/comment" + str(comment) + ".png", duration=com_audio.duration).set_audio(com_audio)
clips.append(com_clip.set_start(current_duration).resize(width=bg.w).set_position("center")) # start at current end
current_duration += com_audio.duration
final = CompositeVideoClip([bg.subclip(0, current_duration)] + clips) # composite the clips on top of the background
final.write_videofile("test.mp4", fps=24) # output the file
Based on this, you produced a video file that's (probably) h264/mp3, which isn't a supported format for iPhone - your video file needs to be h264/aac to work on iPhones (and probably any Mac device via Quicktime).
This is also an open issue for moviepy: https://github.com/Zulko/moviepy/issues/1709
You can specify an audio_codec when writing your file to make this work:
final.write_videofile("test.mp4", fps=24, audio_codec='aac') # output the file
I am trying to combine a .mp4 file with a .wav file. I am rendering my mp4 with cv2 videowriter, and I don't think it has anyway of incorporating audio with it. I have tried moviepy.editor, and ffmpeg. moviepy.editor kept messing up the video file and ffmpeg repeatedly kept giving me an error that it couldn't edit existing files in-place. Combining .mp4 with another audio file type is also fine, but if so it would be nice to also answer how to convert midi files to the file type you answered with. Thanks for the help!
moviepy.editor workflow:
video = mpe.VideoFileClip(mp4_path)
os.system(f"timidity {midi_path} -Ow -o {wav_path)}") # Convert .mid to .wav
video = video.set_audio(mpe.AudioFileClip(wav_path))
video.write_videofile(mp4_path, fps=fps)
ffmpeg workflow:
video = ffmpeg.input(mp4_path)
os.system(f"timidity {midi_path} -Ow -o {wav_path)}") # Convert .mid to .wav
audio = ffmpeg.input(wav_path)
video = ffmpeg.output(video, audio, path, vcodec='copy', acodec='aac', strict='experimental')
ffmpeg.run(video)
I tested both modules and for moviepy I get correct output video with audio even if I use the same name as output. So I don't know what can mess with output.
For ffmpeg I had to use different name for output file to resolve problem with couldn't edit existing files in-place
I had to also use object.video and object.audio to replace audio in output file.
video = ffmpeg.input(video_path).video # get only video channel
audio = ffmpeg.input(audio_path).audio # get only audio channel
My testing code
def test_moviepy(video_path, audio_path, output_path='output-moviepy.mp4', fps=24):
import moviepy.editor as mpe
print('--- moviepy ---')
video = mpe.VideoFileClip(video_path)
video = video.set_audio(mpe.AudioFileClip(audio_path))
video.write_videofile(output_path, fps=fps)
def test_ffmpeg(video_path, audio_path, output_path='output-ffmpeg.mp4', fps=24):
import ffmpeg
print('--- ffmpeg ---')
video = ffmpeg.input(video_path).video # get only video channel
audio = ffmpeg.input(audio_path).audio # get only audio channel
output = ffmpeg.output(video, audio, output_path, vcodec='copy', acodec='aac', strict='experimental')
ffmpeg.run(output)
# --- main ---
video_path = 'movie.mp4'
audio_path = 'sound.wav'
output_path = 'output.mp4'
test_moviepy(video_path, audio_path)#, output_path)
test_ffmpeg(video_path, audio_path)#, output_path)
EDIT:
After installing python module graphviz and program graphviz I could run
ffmpeg.view(output, filename='output-ffmpeg.png')
to get image
I'am trying to write a script that gives me the dimension of the Cover art in a mp3 file, the furthest I've is via Mutgen doing:
audiofile = mutagen.File(wavefile, easy=False)
print(audiofile.tags)
but then from that raw output how can I extract the dimensions like 400x400
You can use stagger and PIL, i.e.:
import stagger, io, traceback
from PIL import Image
try:
mp3 = stagger.read_tag('Menuetto.mp3')
im = Image.open(io.BytesIO(mp3[stagger.id3.APIC][0].data))
print(im.size)
# (300, 300)
# im.save("cover.jpg") # save cover to file
except:
pass
print(traceback.format_exc())
I need to read in a video using opencv, do some processing to it and export it. The code i have written is below. Right now the process is converting video to grayscale.
import cv2
# Define the codec and create VideoWriter object
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('output.avi',fourcc, 25.0, (960,540))
cap = cv2.VideoCapture('input.mp4')
while(cap.isOpened()):
ret, frame = cap.read()
if ret == True:
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
out.write(gray)
else:
break
cap.release()
out.release()
The input file 'input.mp4' has these details. I am new to this so i don't know which attributes are necessarily needed, so i will post most of them. Right now the video is only writing 5.55 kb, and doesn't play the file if you click on it, says "Windows Media Player encountered a problem while playing this file".
File
Item type: MP4 Video
Size: 2.51 MB
Video
Length: 00:00:08
Frame width: 960
Frame height: 540
Data rate: 2634kbps
Total bitrate: 2637kbps
Frame rate: 25 frames/second
Audio
Bit rate: 2kbps
Channels: 2(stereo)
Audio sample rate: 48 kHz
Here is the more specific code of what i'm supposed to be doing, but i'm not sure if this is hacked up/pseudocode so i need to understand what i'm doing before i move to this step. If you understand on how to make this work, please clarify on this code as well. I just need to find a way to open, edit and save a working video file, it seems simple but a week has already passed...
# Import everything needed to edit/save/watch video clips
from moviepy.editor import VideoFileClip
from IPython.display import HTML
white_output = 'white.mp4'
clip1 = VideoFileClip("solidWhiteRight.mp4")
white_clip = clip1.fl_image(process_image)
%time white_clip.write_videofile(white_output, audio=False)
HTML("""
<video width="960" height="540" controls>
<source src="{0}">
</video>
""".format(white_output))
I want to read a live stream from youtube to perform some basic CV things, probably we have to somehow strip of the youtube URL to convert it in a format that might be readable by openCV like?:
cap = cv2.VideoCapture('https://www.youtube.com/watch?v=_9OBhtLA9Ig')
has anyone done it?
I am sure you already know the answer by now, but I will answer for others searching the same topic. You can do this by using Pafy
(probably together with youtube_dl).
import pafy
import cv2
url = "https://www.youtube.com/watch?v=_9OBhtLA9Ig"
video = pafy.new(url)
best = video.getbest(preftype="mp4")
capture = cv2.VideoCapture(best.url)
while True:
grabbed, frame = capture.read()
# ...
And that should be it.
I've added Youtube URL source support in my VidGear Python Library that automatically pipelines YouTube Video into OpenCV by providing its URL only. Here is a complete python example:
# import libraries
from vidgear.gears import CamGear
import cv2
stream = CamGear(source='https://youtu.be/dQw4w9WgXcQ', stream_mode = True, logging=True).start() # YouTube Video URL as input
# infinite loop
while True:
frame = stream.read()
# read frames
# check if frame is None
if frame is None:
#if True break the infinite loop
break
# do something with frame here
cv2.imshow("Output Frame", frame)
# Show output window
key = cv2.waitKey(1) & 0xFF
# check for 'q' key-press
if key == ord("q"):
#if 'q' key-pressed break out
break
cv2.destroyAllWindows()
# close output window
# safely close video stream.
stream.stop()
Code Source
After 100-120 frames the answer from #lee hannigan was crashing on me for a live stream on youtube.
I worked out a method with Pafy to just grab x number of frames and splice them together. This ended up poorly stitching the chunks together though, and gave choppy results. Pafy may not be designed for live streams, I couldn't find a way to stitch the frames together seamlessly.
What worked in the end is below, slightly modified from guttentag_liu's answer on this post. It takes a few more packages, and is lengthy, but works. Because the file is live, it needs to be in chunks, hence saving to a temporary file. You could probably do your openCV work on each chunk, then save to a file in the end instead of re-opening.
# pip install urllib
# pip install m3u8
# pip install streamlink
from datetime import datetime, timedelta, timezone
import urllib
import m3u8
import streamlink
import cv2 #openCV
def get_stream(url):
"""
Get upload chunk url
input: youtube URL
output: m3u8 object segment
"""
#Try this line tries number of times, if it doesn't work,
# then show the exception on the last attempt
# Credit, theherk, https://stackoverflow.com/questions/2083987/how-to-retry-after-exception
tries = 10
for i in range(tries):
try:
streams = streamlink.streams(url)
except:
if i < tries - 1: # i is zero indexed
print(f"Attempt {i+1} of {tries}")
time.sleep(0.1) #Wait half a second, avoid overload
continue
else:
raise
break
stream_url = streams["best"] #Alternate, use '360p'
m3u8_obj = m3u8.load(stream_url.args['url'])
return m3u8_obj.segments[0] #Parsed stream
def dl_stream(url, filename, chunks):
"""
Download each chunk to file
input: url, filename, and number of chunks (int)
output: saves file at filename location
returns none.
"""
pre_time_stamp = datetime(1, 1, 1, 0, 0, tzinfo=timezone.utc)
#Repeat for each chunk
#Needs to be in chunks because
# 1) it's live
# 2) it won't let you leave the stream open forever
i=1
while i <= chunks:
#Open stream
stream_segment = get_stream(url)
#Get current time on video
cur_time_stamp = stream_segment.program_date_time
#Only get next time step, wait if it's not new yet
if cur_time_stamp <= pre_time_stamp:
#Don't increment counter until we have a new chunk
print("NO pre: ",pre_time_stamp, "curr:",cur_time_stamp)
time.sleep(0.5) #Wait half a sec
pass
else:
print("YES: pre: ",pre_time_stamp, "curr:",cur_time_stamp)
print(f'#{i} at time {cur_time_stamp}')
#Open file for writing stream
file = open(filename, 'ab+') #ab+ means keep adding to file
#Write stream to file
with urllib.request.urlopen(stream_segment.uri) as response:
html = response.read()
file.write(html)
#Update time stamp
pre_time_stamp = cur_time_stamp
time.sleep(stream_segment.duration) #Wait duration time - 1
i += 1 #only increment if we got a new chunk
return None
def openCVProcessing(saved_video_file):
'''View saved video with openCV
Add your other steps here'''
capture = cv2.VideoCapture(saved_video_file)
while capture.isOpened():
grabbed, frame = capture.read() #read in single frame
if grabbed == False:
break
#openCV processing goes here
#
cv2.imshow('frame',frame) #Show the frame
#Shown in a new window, To exit, push q on the keyboard
if cv2.waitKey(20) & 0xFF == ord('q'):
break
capture.release()
cv2.destroyAllWindows() #close the windows automatically
tempFile = "temp.ts" #files are format ts, open cv can view them
videoURL = "https://www.youtube.com/watch?v=_9OBhtLA9Ig"
dl_stream(videoURL, tempFile, 3)
openCVProcessing(tempFile)
Probably, because Youtube does not provide the like/dislike counts anymore, the first solution gives error. As a solution, you should comment the 53rd and 54th lines in the backend_youtube_dl.py in pafy package file as in the image below, after that the code in the first solution will work:
Secondly, you can get not get audio with OpenCV, it is a computer vision library, not multimedia. You should try other options for that.