Opencv 1080p record but show 720p - python

I am recording 1080p video to file, but I need to show to user 1280*720 size. how can I do this ? When it's 1080p, it doesn't fit on the screen, so I want to show 720p to the user and get the recording in 1080p.
This is my source code:
cap = cv2.VideoCapture('rtsp://secret:secret#192.168.1.64/1')
filename = user_id +'.mp4' #.avi .mp4
frames_per_seconds = 25 #this is the standard for the movie or films
config = CFEVideoConf(cap, filepath = filename, res='1080p')
out = cv2.VideoWriter(filename, config.video_type, frames_per_seconds,config.dims)
print(config.dims)
img_path = 'ap_logo.png'
logo = cv2.imread(img_path,-1)
watermark = image_resize(logo, height=50)
watermark = cv2.cvtColor(watermark, cv2.COLOR_BGR2BGRA)
while(True):
ret, frame = cap.read()
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2BGRA)
frame = cv2.flip(frame, 1)
frame = cv2.rotate(frame, cv2.ROTATE_90_CLOCKWISE)
frame_h, frame_w, frame_c = frame.shape
# # overlay with 4 channel BGR and Alpha
overlay = np.zeros((frame_h, frame_w, 4), dtype='uint8')
watermark_h, watermark_w, watermark_c = watermark.shape
for i in range(0, watermark_h):
for j in range(0, watermark_w):
if watermark[i, j][3] != 0:
h_offset = frame_h - watermark_h
w_offset = frame_w - watermark_w
overlay[h_offset + i, w_offset + j] = watermark[i, j]
cv2.addWeighted(overlay, 0.25, frame, 1.0, 0, frame)
# Display the resulting frame
frame = cv2.cvtColor(frame, cv2.COLOR_BGRA2BGR)
if ret:
# cv2.resize(1280, 720)
cv2.imshow('Frame', frame)
# cv2.resize(frame, 720,1280, interpolation=cv2.INTER_AREA)
# cv2.namedWindow('Frame', cv2.WINDOW_KEEPRATIO)
# cv2.resizeWindow('Frame', 720, 1280)
out.write(frame) # file a ilgili frame yazılıyor
if cv2.waitKey(1) & 0xFF == ord('q'):
break
#When everything done, relase the capture
cap.release()
out.release() # saved
cv2.destroyAllWindows()

Related

How to record audio while OpenCv has enabled camera

I want to record the audio and store the same while analyzing the emotion using Opencv. But unfortunately, the audio which is being recorded is of minimal length and the voice is also not clear. I want to analyze the voice emotion too for which I need to audio. Could somebody help me in resolving the same?
def main():
cap = cv2.VideoCapture(0)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1280)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 720)
p = pyaudio.PyAudio()
stream = p.open(format=audio_format, channels=channels,rate=16000, input=True,frames_per_buffer=1024)
start_time = time.time()
aud = True
while aud:
ret, frame = cap.read()
data = stream.read(chunk, exception_on_overflow = False)
frames.append(data)
if not ret:
break
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, scaleFactor=1.3, minNeighbors= 5, flags=cv2.CASCADE_SCALE_IMAGE)
result = DeepFace.analyze(img_path = frame , actions = ["emotion"], enforce_detection=False,detector_backend='ssd')
for (x,y,w,h) in faces:
if w > 130: #trick: ignore small faces
draw_border(frame, (x, y), (x + w, y + h), (255, 0, 105),4, 15, 10) ## draw rectangle around face.
detected_face = frame[int(y):int(y+h), int(x):int(x+w)] #crop detected face
detected_face = cv2.cvtColor(detected_face, cv2.COLOR_BGR2GRAY) #transform to gray scale
detected_face = cv2.resize(detected_face, (48, 48)) #resize to 48x48
img_pixels = img_to_array(detected_face)
img_pixels = np.expand_dims(img_pixels, axis = 0)
img_pixels /= 255 #pixels are in scale of [0, 255]. normalize all pixels in scale of [0, 1]
emotion = result["dominant_emotion"]
txt = str(emotion)
cv2.putText(frame,txt,(50,50),cv2.FONT_HERSHEY_SIMPLEX,1,(0,0,255),3)
cv2.imshow(file, frame)
if cv2.waitKey(1) & 0xFF == ord("q"):
aud=False
break
stream.stop_stream()
stream.close()
p.terminate()
wf = wave.open('/Users/xyz/Documents/Audio/wv.wav', 'wb')
wf.setnchannels(1)
wf.setsampwidth(p.get_sample_size(audio_format))
wf.setframerate(sample_rate)
wf.writeframes(b''.join(frames))
wf.close()
cap.release()
cv2.destroyAllWindows()

OpenCv - output video is not playing

I want to write a video using OpenCv and process every frames in different seconds.
I am using cv2.VideoWriter, but the output file shows only the first frame of my video and it is not playing. I wanted to add text on the first frame and it did it, but it doesn't continue with the rest of the video.
As you can see from the code below, it creates a new output file for the new processed video.
It does create the mp4 file, but only showing first frame with the added text and it is not playing.
Any suggestion why this is happening?
Here is my code, I am using Spyder and windows.
fps = int(round(cap.get(5)))
frame_width = int(cap.get(3))
frame_height = int(cap.get(4))
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
out = cv2.VideoWriter(output_video_file,fourcc,fps,(frame_width,frame_height))
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
if ret:
if cv2.waitKey(28) & 0xFF == ord('q'):
break
if between(cap, 0, 10000):
font = cv2.FONT_HERSHEY_COMPLEX_SMALL
cv2.putText(frame,'hello',org=(10,600),fontFace=font,fontScale=10,color=(255,0,0),thickness=4)
pass
# Our operations on the frame come here
out.write(frame)
# Display the resulting frame
cv2.imshow('frame',frame)
Exactly I could not find the problem in your code because you did not give the between function. However you may try following snippet:
import cv2
import numpy as np
cap = cv2.VideoCapture(0)
if (cap.isOpened() == False):
print("Unable to read camera feed")
frame_width = int(cap.get(3))
frame_height = int(cap.get(4))
fps = int(round(cap.get(5)))
out = cv2.VideoWriter('output.mp4', cv2.VideoWriter_fourcc(*'mp4v'), fps, (frame_width, frame_height))
while (True):
ret, frame = cap.read()
if ret == True:
cv2.putText(frame, 'hello',
org=(10, 300),
fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=5,
color=(255, 0, 0),
thickness=4)
out.write(frame)
cv2.imshow('frame', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
break
cap.release()
out.release()
cv2.destroyAllWindows()

Resizing video using opencv and saving it

I'm trying to re-size the video using opencv and then save it back to my system.The code works and does not give any error but output video file is corrupted. The fourcc I am using is mp4v works well with .mp4 but still the output video is corrupted. Need Help.
import numpy as np
import cv2
import sys
import re
vid=""
if len(sys.argv)==3:
vid=sys.argv[1]
compress=int(sys.argv[2])
else:
print("File not mentioned or compression not given")
exit()
if re.search('.mp4',vid):
print("Loading")
else:
exit()
cap = cv2.VideoCapture(0)
ret, frame = cap.read()
def rescale_frame(frame, percent=75):
width = int(frame.shape[1] * percent/ 100)
height = int(frame.shape[0] * percent/ 100)
dim = (width, height)
return cv2.resize(frame, dim, interpolation =cv2.INTER_AREA)
FPS= 15.0
FrameSize=(frame.shape[1], frame.shape[0])
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
out = cv2.VideoWriter('Video_output.mp4', fourcc, FPS, FrameSize, 0)
while(cap.isOpened()):
ret, frame = cap.read()
# check for successfulness of cap.read()
if not ret: break
rescaled_frame=rescale_frame(frame,percent=compress)
# Save the video
out.write(rescaled_frame)
cv2.imshow('frame',rescaled_frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
out.release()
cv2.destroyAllWindows()
The problem is the VideoWriter initialization.
You initialized:
out = cv2.VideoWriter('Video_output.mp4', fourcc, FPS, FrameSize, 0)
The last parameter 0 means, isColor = False. You are telling, you are going to convert frames to the grayscale and then saves. But there is no conversion in your code.
Also, you are resizing each frame in your code based on compress parameter.
If I use the default compress parameter:
cap = cv2.VideoCapture(0)
if cap.isOpened():
ret, frame = cap.read()
rescaled_frame = rescale_frame(frame)
(h, w) = rescaled_frame.shape[:2]
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
writer = cv2.VideoWriter('Video_output.mp4',
fourcc, 15.0,
(w, h), True)
else:
print("Camera is not opened")
Now we have initialized the VideoWriter with the desired dimension.
Full Code:
import time
import cv2
def rescale_frame(frame_input, percent=75):
width = int(frame_input.shape[1] * percent / 100)
height = int(frame_input.shape[0] * percent / 100)
dim = (width, height)
return cv2.resize(frame_input, dim, interpolation=cv2.INTER_AREA)
cap = cv2.VideoCapture(0)
if cap.isOpened():
ret, frame = cap.read()
rescaled_frame = rescale_frame(frame)
(h, w) = rescaled_frame.shape[:2]
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
writer = cv2.VideoWriter('Video_output.mp4',
fourcc, 15.0,
(w, h), True)
else:
print("Camera is not opened")
while cap.isOpened():
ret, frame = cap.read()
rescaled_frame = rescale_frame(frame)
# write the output frame to file
writer.write(rescaled_frame)
cv2.imshow("Output", rescaled_frame)
key = cv2.waitKey(1) & 0xFF
if key == ord("q"):
break
cv2.destroyAllWindows()
cap.release()
writer.release()
Possible Question: I don't want to change my VideoWriter parameters, what should I do?
Answer: Then you need to change your frames, to the gray image:
while cap.isOpened():
# grab the frame from the video stream and resize it to have a
# maximum width of 300 pixels
ret, frame = cap.read()
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

How to resize output frame from live stream from youtube using opencv

It's my beginning with coding and this site. I'm working on project, where I want to use openCV, but I've got an issue with that. I need to resize output frame, for recognizing object. I have read, that frame should be in size 416x416, but when I'm trying to release the frame, it's still in regular size.
Here's the code:
import pafy
import youtube_dl
import cv2
import numpy as np
url = "https://www.youtube.com/watch?v=WOn7m0_aYBw"
video = pafy.new(url)
best = video.getbest(preftype="mp4")
cap = cv2.VideoCapture()
cap.open(best.url)
net = cv2.dnn.readNet("yolov3.weights", "yolov3.cfg")
classes = []
with open("coco.names", "r") as f:
classes = [line.strip() for line in f.readlines()]
layer_names = net.getLayerNames()
output_layers =[layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()]
colors = np.random.uniform(0, 255, size=(len(classes), 3))
while True:
ret, frame = cap.read()
# if ret == True:
img = cv2.imshow('frame',frame)
#cap.set(cv2.CAP_PROP_FRAME_WIDTH, 416)
#cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 416)
width = 416
height = 416
dim = (width, height)
img = cv2.resize(frame, dim, interpolation = cv2.INTER_AREA)
print(img.shape)
if cv2.waitKey(20) & 0xFF == ord('q'):
break
blob = cv2.dnn.blobFromImage(img, 0.00392, (416, 416), (0, 0, 0), True, crop=False)
net.setInput(blob)
outs = net.forward(output_layers)
print(img.shape) returns correct size, but I think I'm releasing wrong window. How to change this code to releasing window in correct size?
You were showing the frame before resizing
while True:
ret, frame = cap.read()
width = 416
height = 416
dim = (width, height)
img = cv2.resize(frame, dim, interpolation = cv2.INTER_AREA)
print(img.shape)
cv2.imshow('frame',img)
if cv2.waitKey(20) & 0xFF == ord('q'):
break

How can I change a single frame in a video using OpenCV?

I have a video that I built using OpenCV VideoWriter.
I want to change a specific frame with a different one.
Is there a way to do it without rebuilding the entire video?
I am changing the third frame by making all pixels to zeros.
import numpy as np
import cv2
cap = cv2.VideoCapture(0)
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('output.avi',fourcc, 20.0, (640,480))
frame_number = -1
while(True):
frame_number += 1
ret, frame = cap.read()
if frame_number == 3: # if frame is the third frame than replace it with blank drame
change_frame_with = np.zeros_like(frame)
frame = change_frame_with
out.write(frame)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
cv2.imshow('frame',gray)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
out.release()
cv2.destroyAllWindows()
If you do not want to go through all the frames again:
import numpy as np
import cv2
cap = cv2.VideoCapture(0)
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('output.avi',fourcc, 20.0, (640,480))
frame_number = -1
while(True):
frame_number += 1
ret, frame = cap.read()
if frame_number == 3: # if frame is the third frame than replace it with blank drame
change_frame_with = np.zeros_like(frame)
frame = change_frame_with
out.write(frame)
break # add a break here
else:
out.write(frame)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
cv2.imshow('frame',gray)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
out.release()
cv2.destroyAllWindows()
But the above solution will not write all the other frames after 3 to the whole new video
This might help you :)
import cv2 as cv
vid = cv.VideoCapture('input.mp4')
total_frames = last_frame_number = vid.get(cv.CAP_PROP_FRAME_COUNT)
fourcc = cv.VideoWriter_fourcc(*'avc1')
writer = cv.VideoWriter("output.mp4", apiPreference=0,fourcc=fourcc,fps=video_fps[0], frameSize=(width, height))
frame_number = -1
while(True):
frame_number += 1
vid.set(1,frame_number)
ret, frame = vid.read()
if not ret or frame_number >= last_frame_number: break
# check these two lines first
if frame_number == changeable_frame_number :
frame = new_img_to_be_inserted
writer.write(frame)
# frame = np.asarray(frame)
gray = cv.cvtColor(frame, cv.IMREAD_COLOR)
# cv.imshow('frame',gray)
if cv.waitKey(1) & 0xFF == ord('q'):
break
vid.release()
writer.release()
cv.destroyAllWindows()

Categories

Resources