how can segmentation fault error be fixed OpenCV python - python

I am trying to run a simple objection detection on webcam using Yolov5 but I keep getting the error below.
zsh: segmentation fault
The camera appears to open then shut off immediately and the code exit with the above error.
Here is my code
def object_detector():
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
model = torch.hub.load('ultralytics/yolov5', 'yolov5s')
# mmocr = MMOCR(det='TextSnake', recog='SAR')
cam = cv2.VideoCapture(0)
while(True):
ret, frame = cam.read()
# ocr_result = mmocr.readtext(frame, output='demo/cam.jpg', export='demo/', print_result=True, imshow=True)
# print("RESULT \n ", ocr_result)
frame = frame[:, :, [2,1,0]]
frame = Image.fromarray(frame)
frame = cv2.cvtColor(np.array(frame), cv2.COLOR_RGB2BGR)
# ocr_result = mmocr.readtext(frame, output='demo/cam.jpg', export='demo/', print_result=True, imshow=True)
# print("RESULT \n ", ocr_result)
result = model(frame,size=640)
# Results
# crops = result.crop(save=True)
cv2.imshow('YOLO', np.squeeze(result.render()))
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cam.release()
cv2.destroyAllWindows()
what am i doing wrong and how can i fix it ?

You're not testing the return value from cam.read() ensure that ret is a success code, and that frame is not a nullptr before you proceed.

You need to check that an image is returned in the first place. The first return resulting from cam.read() tells you whether an image has been received. This is how you can make use of it:
...
while(True):
ret, frame = cam.read()
if ret:
frame = frame[:, :, [2,1,0]]
...
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cam.release()
cv2.destroyAllWindows()

Related

failing to save video after applying saliency

I am attempting to save the output of the code proposed here: https://www.programmersought.com/article/34317272452/
The output video is created, but when I try to reproduce the result, there is this message: 'an error occurred could not demultiplex stream'. Also, I have tried several forms to save the video; for example, I have used the shape of the frames, but it neither recognizes them. Could anyone help to figure out where I am doing wrong?
def read_video(video):
# Saliency detection algorithm
saliency_algorithm = "BinWangApr2014"
start_frame = 0
if saliency_algorithm is None or video is None:
print('Please set saliency_algorithm and video')
return
cap = cv2.VideoCapture(video)
video = cv2.VideoWriter('out_j.avi',cv2.VideoWriter_fourcc(*'XVID'), 5, (360,360))
# Set the video start frame
cap.set(cv2.CAP_PROP_POS_FRAMES, start_frame)
_, frame = cap.read()
if frame is None:
print('Please set saliency_algorithm and video')
return
image = frame.copy()
if saliency_algorithm.find("BinWangApr2014")==0:
saliency_algorithm = cv2.saliency.MotionSaliencyBinWangApr2014_create()
# set the size of the data structure
saliency_algorithm.setImagesize(image.shape[1], image.shape[0])
# Initialization
saliency_algorithm.init()
paused = False
while True:
if not paused:
_, frame = cap.read()
if frame is None:
break
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
start = cv2.getTickCount()
success, saliencyMap = saliency_algorithm.computeSaliency(frame)
duration = (cv2.getCPUTickCount() - start)/ \
cv2.getTickFrequency()
#print("computeBinaryMap cost time is: {} ms".format(duration * 1000))
video.write(frame)
#cv2.imshow('image', frame)
cv2.imshow('saliencyMap', saliencyMap*255)
c = cv2.waitKey(2)
c = chr(c) if c !=-1 else 0
if c == 'q':
break
if c == 'p':
paused = not paused
#video.write(frame)
cv2.destroyAllWindows()
video.release()
#camera.stop()
return
Thanks in advance

error: (-215:Assertion failed) bmi && width >= 0 && height >= 0 && (bpp == 8 || bpp == 24 || bpp == 32) in function 'FillBitmapInfo

I'm doing a violence detection project and I have trained my model using resnet but when I'm just printing probability it gives some error.
# import packages done
#load model
model = load_model('E:\Docs & Other\C-VS\ViolenceDetection\Resources\VDresnet152v2.h5')
img_width, img_hight = 224, 224
#start web cam
cap = cv2.VideoCapture(0)
#sart reading images and prediction
while True:
#read image from webcam
responce, color_img = cap.read()
#if respoce False the break the loop
if responce == False:
break
#resize image with 50 % ratio
color_img = cv2.resize(color_img,(224,224))
color_img = color_img.reshape(1,224,224,3)
pred_prob = model.predict(color_img)
print(pred_prob[0][0].round(2))
pred=np.argmax(pred_prob)
# display image
cv2.imshow('LIVE', color_img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Release the VideoCapture object
cap.release()
cv2.destroyAllWindows()
What am I doing wrong? when I run this code it gives an error.
after this line:
color_img = color_img.reshape(1,224,224,3)
color_img is a 4d tensor (with seperate color planes !), not usable with cv2.imshow()
please do not "recycle" variable names, esp. if their context changes.
color_img = cv2.resize(color_img,(224,224))
color_tensor = color_img.reshape(1,224,224,3)
pred_prob = model.predict(color_tensor)
...
cv2.imshow('LIVE', color_img)

Why webcam does not appear in python-opencv

When I run my nooby opencv program on python using macbook air it does not show anything. Also the program exits after 0.5-1 second.
Here is the code, it is really pretty simple
import cv2
cap = cv2.VideoCapture(0)
cap.set(3,640)
cap.set(4,480)
while True:
success, img = cap.read()
cv2.imshow("Video", img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
You should first list the available camera ids
Here you can use G M's answer:
def list_ports():
is_working = True
dev_port = 0
working_ports = []
available_ports = []
while is_working:
camera = cv2.VideoCapture(dev_port)
if not camera.isOpened():
is_working = False
print("Port %s is not working." % dev_port)
else:
is_reading, img = camera.read()
w = camera.get(3)
h = camera.get(4)
if is_reading:
print("Port %s is working and reads images (%s x %s)" % (dev_port, h, w))
working_ports.append(dev_port)
else:
print("Port %s for camera ( %s x %s) is present but does not reads." % (dev_port, h, w))
available_ports.append(dev_port)
dev_port += 1
return available_ports, working_ports
Result will be:
Port 0 is working and reads images (720.0 x 1280.0)
OpenCV: out device of bound (0-0): 1
OpenCV: camera failed to properly initialize!
Port 1 is not working.
For instance, for my mac, camera id (cid) 0 is available but not 1.
Therefore I initialized the code with cid 0.
cap = cv2.VideoCapture(0)
cap.set(3, 640)
cap.set(4, 480)
while cap.isOpened():
success, img = cap.read()
if success:
cv2.imshow("Video", img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
You should not initialize VideoCapture object by default 0. Since it may not work. If there is no available port listed as a result, you may need an external camera.

How can i captured detected images of face in opencv to make a database?

I am trying to enroll the faces of the user in opencv for facial recognition. I have finihed the detection part but what i wanted to achieve is to save the detected faces.So basically waht i wanted to achieve is :
when i see in the webcam it automatically capture 20-30 or n no images and saves it locally.
Currently i am trying to simply save 1 image when clicked some key ,The program runs fine but nothing saves locally.Here is the code
import cv2
import os
cascade = cv2.CascadeClassifier("../haar-cascade-files-master/haar-cascade-files-master/haarcascade_frontalface_alt2.xml")
cap = cv2.VideoCapture(0)
while True:
ret,frame = cap.read(0)
# gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
faces = cascade.detectMultiScale(frame,1.1,5)
orig = frame.copy()
for (x,y,w,h) in faces:
cv2.rectangle(frame,(x,y),(x+w,y+h),(255,255,0),4)
cv2.imshow("Video ",frame)
if cv2.waitKey(2) & 0xFF == 27:
break
elif cv2.waitKey(2) & 0xFF == ord('s'):
faceimg=frame[y:y+h,x:x+w]
cv2.imwrite("../images/facec.jpeg",faceimg)
# cv2.imshow("Crop Image",faceimg)
cap.release()
cv2.destroyAllWindows()
So whats wrong in the code and how can i save n no of images automatically.
If only one person is looking at the camera in a given scenario, you can use a counter.
N = 20
cnt = 0
while True:
...
...
# If the frame contains only one face
for (x,y,w,h) in faces:
cv2.rectangle(frame,(x,y),(x+w,y+h),(255,255,0),4)
faceimg=frame[y:y+h,x:x+w]
cv2.imwrite("../images/face_"+str(cnt)+".jpeg",faceimg)
cnt = cnt + 1
if cnt == N:
cnt = 0
...
...

Python OpenCV Error(-215) in VideoWriter.write

I am trying to a write a python3-opencv3 code to read the color video and convert it to grayscale and save it back.
(Trying exercises to learn python & opencv)
As I am working in ubuntu, I found out cv2.VideoCapture isColor flag will not work (it works only windows)
import numpy as np
import cv2
cap = cv2.VideoCapture('output.avi')
ret, frame = cap.read()
print('ret =', ret, 'W =', frame.shape[1], 'H =', frame.shape[0], 'channel =', frame.shape[2])
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
height, width = gray.shape[:2]
print(height,width)
cv2.imshow('frame',gray)
FPS= 20.0
FrameSize=(frame.shape[1], frame.shape[0])
fourcc = cv2.VideoWriter_fourcc(*'MJPG')
out = cv2.VideoWriter('Video_output.avi', fourcc, FPS, (width,height))
while(ret):
ret, frame = cap.read()
if cv2.waitKey(1) & 0xFF == ord('q') or ret == False:
break
print('ret =', ret, 'W =', frame.shape[1], 'H =', frame.shape[0], 'channel =', frame.shape[2])
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Save the video
out.write(gray)
cv2.imshow('frame',gray)
cap.release()
out.release()
cv2.destroyAllWindows()
It is give me this
error: (-215) img.cols == width && img.rows == height*3 in function write
I am guessing, the problem is with framesize and grayscale image conversion but I am not able to figure out ? I have tried different combination of height & widht but none are able execute the program correctly.
Can someone help ?
As requested in the comments:
Traceback (most recent call last):
File "/home/akash/Coded/VideoCode/Test3", line 70, in <module>
out.write(gray)
cv2.error: /home/travis/miniconda/conda-bld/conda_1486587069159/work/opencv-3.1.0/modules/videoio/src/cap_mjpeg_encoder.cpp:834: error: (-215) img.cols == width && img.rows == height*3 in function write
OpenCV error -215 means "assertion failed". The assertion itself is listed in the message (the phrase "Assertion failed" itself probably should, too; you can open a ticket about this in OpenCV's bug tracker). (Update: my pull request with this change was accepted on 06.03 and was released in opencv 3.4.2.)
As one can deduce from the assertion, it expects the frame to be a 3-color-channel one. To make a grayscale video, you need to pass isColor=False to VideoWriter constructor:
out = cv2.VideoWriter('Video_output.avi', fourcc, FPS, (width,height), False)

Categories

Resources