So I wanted to test performance difference between python-opencv library and newest opencv compiled, on a raspberry pi 4 board. After this change cv2.resize() stopped working in my script and only outputs max resolution from my webcam. I also tried imutils library but without success.I tried using both:
cv2.CAP_PROP_FRAME_WIDTH
cv2.CAP_PROP_FRAME_HEIGHT
But I only get a resized window not frame
Additionally I get this error
GStreamer warning:Cannot query video position: status=0, value=-1, duration=-1
What have I missed?
Update:
Minimal code
import cv2
from imutils.video import FPS
cap = cv2.VideoCapture(0)
#cap.set(cv2.CAP_PROP_FRAME_WIDTH,960)
#cap.set(cv2.CAP_PROP_FRAME_HEIGHT,540)
fps = FPS().start()
font = cv2.FONT_HERSHEY_DUPLEX
while cap.isOpened():
ret, frame = cap.read()
small_frame = cv2.resize(frame, (0, 0), fx=0.75, fy=0.75)
fps.update()
fps.stop()
cv2.putText(small_frame,"FPS {:.1f}".format(fps.fps()),
(10,30),font, 1.0, (255, 255, 255), 1)
cv2.imshow("Frame",small_frame)
key = cv2.waitKey(1)
if key == ord('q'):
break
if key == ord('p'):
cv2.waitKey(-1)
cap.release()
cv2.destroyAllWindows()
you are using scale and dimentions 0 togheter
try this:
import cv2
img = cv2.imread('/home/img/python.png', cv2.IMREAD_UNCHANGED)
print('Original Dimensions : ',img.shape)
scale_percent = 60 # percent of original size
width = int(img.shape[1] * scale_percent / 100)
height = int(img.shape[0] * scale_percent / 100)
dim = (width, height)
# resize image
resized = cv2.resize(img, dim, interpolation = cv2.INTER_AREA)
print('Resized Dimensions : ',resized.shape)
cv2.imshow("Resized image", resized)
cv2.waitKey(0)
cv2.destroyAllWindows()
Related
This is my code, I've looked at some tutorials but can't find what I'm looking for
I want to overlay the Frame.png image on my webcam. I tried to add the image directly but it didn't work either. If possible, Is there a way to add an image, not to overlay but to keep the image at a certain coordinate in the live webcam window
import cv2
import numpy as np
def detect_and_save():
alpha = 0.2
beta = 1-alpha
cap = cv2.VideoCapture(0)
sciframe = cv2.imread('Frame.png')
classifier = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
while True:
ret ,frame = cap.read()
overlay = frame.copy()
output = frame.copy()
gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
faces = classifier.detectMultiScale(gray,1.5,5)
cv2.putText(output, "HUD Test",(175, 50), cv2.FONT_HERSHEY_SIMPLEX, 2, (255, 50, 50), 3)
cv2
for face in faces:
x,y,w,h = face
cv2.rectangle(overlay,(x,y),(x+w,y+h),(255,200,0),-1)
cv2.rectangle(overlay,(x,y),(x+w,y+h),(255,0,0),1)
cv2.rectangle(overlay,(x,y-20),(x+w,y),(25,20,0),-1)
cv2.addWeighted(overlay,alpha,output,beta,0,output)
cv2.putText(output,"Human",(x+10,y-10),cv2.FONT_HERSHEY_SIMPLEX,
0.35, (0, 0, 255), 1)
if not ret:
continue
cv2.imshow("HUD",output)
key = cv2.waitKey(1)
if key == ord('q'):
break
elif key == ord('s'):
cv2.imwrite('./images/CID_{}.png'.format(time.strftime('%d%m%y_%H_%M_%S')),output)
cap.release()
cv2.destroyAllWindows()
if __name__ == "__main__":
import time
detect_and_save()
You can directly add one image on top of another one at any coordinate easily in opencv.
cap = cv2.VideoCapture(0)
im_height = 50 #define your top image size here
im_width = 50
im = cv2.resize(cv2.imread("Frame.png"), (im_width, im_height))
while (True):
ret, frame = cap.read()
frame[0:im_width, 0:im_height] = im #for top-left corner, 0:50 and 0:50 for my image; select your region here like 200:250
cv2.imshow("live camera", frame)
if cv2.waitKey(1) == ord("q"):
break
cap.release()
cv2.destroyAllWindows()
I'm trying to write a script that anonymized faces on videos.
here is my code (python):
import cv2
from mtcnn.mtcnn import MTCNN
ksize = (101, 101)
def decode_fourcc(cc):
return "".join([chr((int(cc) >> 8 * i) & 0xFF) for i in range(4)])
def find_face_MTCNN(color, result_list):
for result in result_list:
x, y, w, h = result['box']
roi = color[y:y+h, x:x+w]
cv2.rectangle(color, (x, y), (x+w, y+h), (0, 155, 255), 5)
detectedFace = cv2.GaussianBlur(roi, ksize, 0)
color[y:y+h, x:x+w] = detectedFace
return color
detector = MTCNN()
video_capture = cv2.VideoCapture("basic.mp4")
width = int(video_capture.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(video_capture.get(cv2.CAP_PROP_FRAME_HEIGHT))
length = int(video_capture.get(cv2.CAP_PROP_FRAME_COUNT))
fps = int(video_capture.get(cv2.CAP_PROP_FPS))
video_out = cv2.VideoWriter(
"mtcnn.mp4", cv2.VideoWriter_fourcc(*"mp4v"), fps, (width, height))
while length:
_, color = video_capture.read()
faces = detector.detect_faces(color)
detectFaceMTCNN = find_face_MTCNN(color, faces)
video_out.write(detectFaceMTCNN)
cv2.imshow("Video", detectFaceMTCNN)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
fourccIN = video_capture.get(cv2.CAP_PROP_FOURCC)
fourccOUT = video_out.get(cv2.CAP_PROP_FOURCC)
print(f"input fourcc is: {fourccIN, decode_fourcc(fourccIN)}")
print(f"output fourcc is: {fourccOUT, decode_fourcc(fourccOUT)}")
video_capture.release()
cv2.destroyAllWindows()
I'll get a perfect working window with the anonymization, so imshow() works fine. But the new saved video "mtcnn.mp4" can't be opened. I found out the problem is the fourcc format of the new video since my output is:
input fourcc is: (828601953.0, 'avc1')
output fourcc is: (-1.0, 'ÿÿÿÿ')
'ÿÿÿÿ' stands for unreadable so thats the core of the matter...
Can someone help me please?
They are facing probably the same problem:
Using MTCNN with a webcam via OpenCV
And I used this to encode the fourcc:
What is the opposite of cv2.VideoWriter_fourcc?
I've changes this line:
video_out = cv2.VideoWriter(
"mtcnn.mp4", cv2.VideoWriter_fourcc(*"mp4v"), fps, (width, height))
to:
video_out = cv2.VideoWriter(
"mtcnn.avi", cv2.VideoWriter_fourcc(*'XVID'), fps, (width, height))
And now it works for me
I'm trying to re-size the video using opencv and then save it back to my system.The code works and does not give any error but output video file is corrupted. The fourcc I am using is mp4v works well with .mp4 but still the output video is corrupted. Need Help.
import numpy as np
import cv2
import sys
import re
vid=""
if len(sys.argv)==3:
vid=sys.argv[1]
compress=int(sys.argv[2])
else:
print("File not mentioned or compression not given")
exit()
if re.search('.mp4',vid):
print("Loading")
else:
exit()
cap = cv2.VideoCapture(0)
ret, frame = cap.read()
def rescale_frame(frame, percent=75):
width = int(frame.shape[1] * percent/ 100)
height = int(frame.shape[0] * percent/ 100)
dim = (width, height)
return cv2.resize(frame, dim, interpolation =cv2.INTER_AREA)
FPS= 15.0
FrameSize=(frame.shape[1], frame.shape[0])
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
out = cv2.VideoWriter('Video_output.mp4', fourcc, FPS, FrameSize, 0)
while(cap.isOpened()):
ret, frame = cap.read()
# check for successfulness of cap.read()
if not ret: break
rescaled_frame=rescale_frame(frame,percent=compress)
# Save the video
out.write(rescaled_frame)
cv2.imshow('frame',rescaled_frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
out.release()
cv2.destroyAllWindows()
The problem is the VideoWriter initialization.
You initialized:
out = cv2.VideoWriter('Video_output.mp4', fourcc, FPS, FrameSize, 0)
The last parameter 0 means, isColor = False. You are telling, you are going to convert frames to the grayscale and then saves. But there is no conversion in your code.
Also, you are resizing each frame in your code based on compress parameter.
If I use the default compress parameter:
cap = cv2.VideoCapture(0)
if cap.isOpened():
ret, frame = cap.read()
rescaled_frame = rescale_frame(frame)
(h, w) = rescaled_frame.shape[:2]
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
writer = cv2.VideoWriter('Video_output.mp4',
fourcc, 15.0,
(w, h), True)
else:
print("Camera is not opened")
Now we have initialized the VideoWriter with the desired dimension.
Full Code:
import time
import cv2
def rescale_frame(frame_input, percent=75):
width = int(frame_input.shape[1] * percent / 100)
height = int(frame_input.shape[0] * percent / 100)
dim = (width, height)
return cv2.resize(frame_input, dim, interpolation=cv2.INTER_AREA)
cap = cv2.VideoCapture(0)
if cap.isOpened():
ret, frame = cap.read()
rescaled_frame = rescale_frame(frame)
(h, w) = rescaled_frame.shape[:2]
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
writer = cv2.VideoWriter('Video_output.mp4',
fourcc, 15.0,
(w, h), True)
else:
print("Camera is not opened")
while cap.isOpened():
ret, frame = cap.read()
rescaled_frame = rescale_frame(frame)
# write the output frame to file
writer.write(rescaled_frame)
cv2.imshow("Output", rescaled_frame)
key = cv2.waitKey(1) & 0xFF
if key == ord("q"):
break
cv2.destroyAllWindows()
cap.release()
writer.release()
Possible Question: I don't want to change my VideoWriter parameters, what should I do?
Answer: Then you need to change your frames, to the gray image:
while cap.isOpened():
# grab the frame from the video stream and resize it to have a
# maximum width of 300 pixels
ret, frame = cap.read()
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
It's my beginning with coding and this site. I'm working on project, where I want to use openCV, but I've got an issue with that. I need to resize output frame, for recognizing object. I have read, that frame should be in size 416x416, but when I'm trying to release the frame, it's still in regular size.
Here's the code:
import pafy
import youtube_dl
import cv2
import numpy as np
url = "https://www.youtube.com/watch?v=WOn7m0_aYBw"
video = pafy.new(url)
best = video.getbest(preftype="mp4")
cap = cv2.VideoCapture()
cap.open(best.url)
net = cv2.dnn.readNet("yolov3.weights", "yolov3.cfg")
classes = []
with open("coco.names", "r") as f:
classes = [line.strip() for line in f.readlines()]
layer_names = net.getLayerNames()
output_layers =[layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()]
colors = np.random.uniform(0, 255, size=(len(classes), 3))
while True:
ret, frame = cap.read()
# if ret == True:
img = cv2.imshow('frame',frame)
#cap.set(cv2.CAP_PROP_FRAME_WIDTH, 416)
#cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 416)
width = 416
height = 416
dim = (width, height)
img = cv2.resize(frame, dim, interpolation = cv2.INTER_AREA)
print(img.shape)
if cv2.waitKey(20) & 0xFF == ord('q'):
break
blob = cv2.dnn.blobFromImage(img, 0.00392, (416, 416), (0, 0, 0), True, crop=False)
net.setInput(blob)
outs = net.forward(output_layers)
print(img.shape) returns correct size, but I think I'm releasing wrong window. How to change this code to releasing window in correct size?
You were showing the frame before resizing
while True:
ret, frame = cap.read()
width = 416
height = 416
dim = (width, height)
img = cv2.resize(frame, dim, interpolation = cv2.INTER_AREA)
print(img.shape)
cv2.imshow('frame',img)
if cv2.waitKey(20) & 0xFF == ord('q'):
break
So my code should detect an object with opencv and once it detects it it should take a snap of it. Which it does fine....However it simply goes to the webcam and doesn't show me the webcam feed. When it detects the object it takes a snap and show the image.
What I want is to see the webcam feed until it detects the object....How can I do that?
Here's my code:
import cv2
cascade = cv2.CascadeClassifier('xcascade.xml')
cap = cv2.VideoCapture(1)
num = 0
while num<1000:
ret, img = cap.read()
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
cas = cascade.detectMultiScale(gray, 10, 10)
for(x,y,w,h) in cas:
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.rectangle(img,(x,y), (x+w,y+h),(255,255,0),5)
cv2.putText(img, 'Something',(x,y-120), font, 1.5, (0,255,255),5, cv2.LINE_AA)
num = num+1
cv2.imshow('img',img)
cv2.waitKey(1000)
cap.release()
cv2.desrtoyAllWindows()
break
You are only showing the image when a detection happens, you need to take your imshow outside the for loop.
import cv2
cascade = cv2.CascadeClassifier('xcascade.xml')
cap = cv2.VideoCapture(1)
num = 0
while num<1000:
ret, img = cap.read()
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
cas = cascade.detectMultiScale(gray, 10, 10)
for(x,y,w,h) in cas:
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.rectangle(img,(x,y), (x+w,y+h),(255,255,0),5)
cv2.putText(img, 'Something',(x,y-120), font, 1.5, (0,255,255),5, cv2.LINE_AA)
num = num+1
cv2.imshow('img',img)
cv2.waitKey(1000)
cap.release()
cv2.desrtoyAllWindows()
break
cv2.imshow('img',img)
cv2.waitKey(1000)