The below code is supposed to capture a video and save it.
import cv2
import numpy as np
from skimage.filters import gaussian
capture = cv2.VideoCapture(0)
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('My video.avi', fourcc, 10, (640,480))
while capture.isOpened:
ret, frame = capture.read()
if ret==True:
frame = gaussian(frame, sigma=5, multichannel=True)
out.write(frame)
cv2.imshow('My video', frame)
if cv2.waitKey(1) == 27:
break
capture.release()
out.release()
cv2.destroyAllWindows()
However, I get the following error:
error: OpenCV(3.4.3) D:\Build\OpenCV\opencv-3.4.3\modules\videoio\src\cap_ffmpeg.cpp:296: error: (-215:Assertion failed) image.depth() == CV_8U in function 'cv::`anonymous-namespace'::CvVideoWriter_FFMPEG_proxy::write'
If I remove the gaussian blur, the code works. What is wrong?
import cv2
import numpy as np
from skimage.filters import gaussian
capture = cv2.VideoCapture(0)
fourcc = cv2.VideoWriter_fourcc('X', 'V', 'I', 'D')
videoWriter = cv2.VideoWriter(
r'output_video_path', fourcc, 10.0, (640, 480))
while (True):
ret, frame = capture.read()
if ret:
frame = gaussian(frame, sigma=5, multichannel=True)
cv2.imshow('video', frame)
frame = np.uint8(255 * frame)
videoWriter.write(frame)
if cv2.waitKey(1) == 27:
break
capture.release()
videoWriter.release()
cv2.destroyAllWindows()
Related
I'm new to image processing.
I'm trying to build traffic light detection following one of papers in Python OpenCV.
But I got an error I can't understand.
Here is the code.
# TL_Detection.py
import cv2
import numpy as np
def Video():
try:
cap = cv2.VideoCapture(0)
# cap = cv2.VideoCapture('/home/aicar/Downloads/tf_test.mp4')
except:
print('no cam error')
return
# cap.set(3, 480)
# cap.set(4, 320)
frameWidth = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
frameHeight = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
size = (frameWidth, frameHeight)
cap.set(3, frameWidth)
cap.set(4, frameHeight)
cnt = 0
# while cap.isOpened():
while True:
ret, frame = cap.read()
# if not cap.isOpened():
# cap.open('/home/aicar/Downloads/tf_test.mp4')
cv2.imshow('frame', frame)
print(ret, cnt)
if not ret:
print('no ret error')
break
cnt += 1
cap.release()
cv2.destroyAllWindows()
Video()
This code returns like the following.
True 0
OpenCV Error: Assertion failed (size.width>0 && size.height>0) in imshow, file /home/aicar/opencv/opencv-3.4.0/modules/highgui/src/window.cpp, line 339
Traceback (most recent call last):
File "/home/aicar/codes_juyeong/TL_detection.py", line 53, in <module>
Video()
File "/home/aicar/codes_juyeong/TL_detection.py", line 33, in Video
cv2.imshow('frame', frame)
cv2.error: /home/aicar/opencv/opencv-3.4.0/modules/highgui/src/window.cpp:339: error: (-215) size.width>0 && size.height>0 in function imshow
Why can't it get frame after getting only the first frame ?
The webcam is connected correctly.
need your help. Thanks.
you release your cap too early, it's in the true loop. so get it out of this loop and you program will run without any problems.
To capture frame after frame try this piece of code:
import numpy as np
import cv2
cap = cv2.VideoCapture(0)
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
# Our operations on the frame come here
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Display the resulting frame
cv2.imshow('frame',gray)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
# while cap.isOpened():
while True:
ret, frame = cap.read()
# if not cap.isOpened():
# cap.open('/home/aicar/Downloads/tf_test.mp4')
cv2.imshow('frame', frame)
print(ret, cnt)
if not ret:
print('no ret error')
break
cnt += 1
# take it out of while loop
cap.release()
cv2.destroyAllWindows()
I have a face detection program.
I tried to run the code but it's not working.
import cv2
import numpy as np
recognizer = cv2.face.LBPHFaceRecognizer_create()
recognizer.read("trainer/trainer.yml")
cascadePath = "haarcascade_frontalface_default.xml"
faceCascade = cv2.CascadeClassifier(cascadePath);
cam = cv2.VideoCapture(0)
fontFace = cv2.FONT_HERSHEY_SIMPLEX
fontScale = 1
fontColor = (255, 255, 255)
while True:
ret, im =cam.read()
gray=cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)
faces=faceCascade.detectMultiScale(gray, 1.2,5)
for(x,y,w,h) in faces:
cv2.rectangle(im,(x,y),(x+w,y+h),(225,0,0),2)
Id, conf = recognizer.predict(gray[y:y+h,x:x+w])
if(conf<50):
if(Id==1):
Id="chandra"
elif(Id==2):
Id="vamsi"
else:
Id="Unknown"
cv2.putText(im,str(Id), (x,y+h),fontFace, 255)
cv2.imshow('im',im)
if (cv2.waitKey(10) == ord('q')):
break
cam.release()
cv2.destroyAllWindows()
I got this error:
(-215:Assertion failed) !empty() in function 'cv::CascadeClassifier::detectMultiScale'
I'm using opencv2 and python 3.7
Maybe this will help:
try:
ret, im =cam.read()
except:
continue
Please, use this code below to check if you have any images coming from your camera:
import numpy as np
import cv2
cap = cv2.VideoCapture(0)
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
# Our operations on the frame come here
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Display the resulting frame
cv2.imshow('frame',gray)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
program:
import numpy as np
import cv2
cap = cv2.VideoCapture(1)
fourcc = cv2.VideoWriter_fourcc(*'VID')
out = cv2.VideoWriter('output.avi',fourcc, 20.0, (640,480))
while(True):
ret, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
out.write(frame)
cv2.imshow('frame',gray)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
out.release()
cv2.destroyAllWindows()
error:
Traceback (most recent call last):
File "D:/ANIKET/python projects/img_process.py", line 5, in <module>
fourcc = cv2.VideoWriter_fourcc(*'VID')
TypeError: Required argument 'c4' (pos 4) not found
i m trying to save the video file.
but i m getting this error.
plz tell me what i am doing wrong
you can also use -1 as an default argument. It had worked in my case.
import numpy as np
import cv2
cap = cv2.VideoCapture(0)
# Define the codec and create VideoWriter object
#fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('output.avi',-1, 20.0, (640,480))
while(cap.isOpened()):
ret, frame = cap.read()
if ret:
gray = cv2.cvtColor(src=frame, code=cv2.COLOR_BGR2GRAY)
out.write(gray)
cv2.imshow('frame', gray)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
break
cap.release()
out.release()
cv2.destroyAllWindows()
This code addresses both your original issue (replacing *'VID' with *'XVID') as well as your subsequent issue in the comment to your question:
import numpy as np
import cv2
cap = cv2.VideoCapture(0)
# Define the codec and create VideoWriter object
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('output.avi',fourcc, 20.0, (640,480))
while(cap.isOpened()):
ret, frame = cap.read()
if ret:
gray = cv2.cvtColor(src=frame, code=cv2.COLOR_BGR2GRAY)
out.write(gray)
cv2.imshow('frame', gray)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
break
cap.release()
out.release()
cv2.destroyAllWindows()
Feel free to tell me if this works as you intended and point out any remaining issues!
I have videostreams and I'd like to convert them to foreground detected videos on which everything is white that moving and all others are black.
When I run this below script nothing happens, python ide just waits. Should I wait, does the video render or do i make something wrong?
Thanks
import cv2
import numpy
cap = cv2.VideoCapture('2018_02_28_12_07_42.h264')
fgbg = cv2.createBackgroundSubtractorMOG2()
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('output.avi',fourcc, 20.0, (640,480))
#while(cap.isOpened()):
while True:
ret, frame = cap.read()
#if ret == True:
fgmask = fgbg.apply(frame)
out.write(frame)
cv2.imshow('original', frame)
cv2.imshow('fg', fgmask)
k = cv2.waitKey(30) & 0xff
if k == 27:
break
cap.release()
out.release()
cv2.destroyWindows()
Here is my code:
import cv2
import numpy as np
cap = cv2.VideoCapture(1)
fourcc = cv2.VideoWriter_fourcc('M','J','P','G')
framesize = (640,480)
out = cv2.VideoWriter('dump.avi',fourcc,60.0,framesize)
font = cv2.FONT_HERSHEY_SIMPLEX
while True:
ret, frame = cap.read()
gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
cv2.imshow('frame',frame)
cv2.rectangle(gray, (0,0),(640,480),(255,255,255),3)
cv2.putText(gray, "gray", (0,130),font, 5,(255,255,255),2, cv2.LINE_AA)
cv2.imshow('fr',gray)
Here I am trying to color a specific square area on the live image feed
#gray[100:105,110:115] = [255,255,255]
io = gray[37:111,107:194]
Here I am cloning an are into another
gray[200:200,270:283] = io
out.write(frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
out.release()
cv2.destroyAllWindows()
How can I color a specific area? As my attempt is not working.