Why webcam does not appear in python-opencv - python

When I run my nooby opencv program on python using macbook air it does not show anything. Also the program exits after 0.5-1 second.
Here is the code, it is really pretty simple
import cv2
cap = cv2.VideoCapture(0)
cap.set(3,640)
cap.set(4,480)
while True:
success, img = cap.read()
cv2.imshow("Video", img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break

You should first list the available camera ids
Here you can use G M's answer:
def list_ports():
is_working = True
dev_port = 0
working_ports = []
available_ports = []
while is_working:
camera = cv2.VideoCapture(dev_port)
if not camera.isOpened():
is_working = False
print("Port %s is not working." % dev_port)
else:
is_reading, img = camera.read()
w = camera.get(3)
h = camera.get(4)
if is_reading:
print("Port %s is working and reads images (%s x %s)" % (dev_port, h, w))
working_ports.append(dev_port)
else:
print("Port %s for camera ( %s x %s) is present but does not reads." % (dev_port, h, w))
available_ports.append(dev_port)
dev_port += 1
return available_ports, working_ports
Result will be:
Port 0 is working and reads images (720.0 x 1280.0)
OpenCV: out device of bound (0-0): 1
OpenCV: camera failed to properly initialize!
Port 1 is not working.
For instance, for my mac, camera id (cid) 0 is available but not 1.
Therefore I initialized the code with cid 0.
cap = cv2.VideoCapture(0)
cap.set(3, 640)
cap.set(4, 480)
while cap.isOpened():
success, img = cap.read()
if success:
cv2.imshow("Video", img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
You should not initialize VideoCapture object by default 0. Since it may not work. If there is no available port listed as a result, you may need an external camera.

Related

how can segmentation fault error be fixed OpenCV python

I am trying to run a simple objection detection on webcam using Yolov5 but I keep getting the error below.
zsh: segmentation fault
The camera appears to open then shut off immediately and the code exit with the above error.
Here is my code
def object_detector():
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
model = torch.hub.load('ultralytics/yolov5', 'yolov5s')
# mmocr = MMOCR(det='TextSnake', recog='SAR')
cam = cv2.VideoCapture(0)
while(True):
ret, frame = cam.read()
# ocr_result = mmocr.readtext(frame, output='demo/cam.jpg', export='demo/', print_result=True, imshow=True)
# print("RESULT \n ", ocr_result)
frame = frame[:, :, [2,1,0]]
frame = Image.fromarray(frame)
frame = cv2.cvtColor(np.array(frame), cv2.COLOR_RGB2BGR)
# ocr_result = mmocr.readtext(frame, output='demo/cam.jpg', export='demo/', print_result=True, imshow=True)
# print("RESULT \n ", ocr_result)
result = model(frame,size=640)
# Results
# crops = result.crop(save=True)
cv2.imshow('YOLO', np.squeeze(result.render()))
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cam.release()
cv2.destroyAllWindows()
what am i doing wrong and how can i fix it ?
You're not testing the return value from cam.read() ensure that ret is a success code, and that frame is not a nullptr before you proceed.
You need to check that an image is returned in the first place. The first return resulting from cam.read() tells you whether an image has been received. This is how you can make use of it:
...
while(True):
ret, frame = cam.read()
if ret:
frame = frame[:, :, [2,1,0]]
...
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cam.release()
cv2.destroyAllWindows()

failing to save video after applying saliency

I am attempting to save the output of the code proposed here: https://www.programmersought.com/article/34317272452/
The output video is created, but when I try to reproduce the result, there is this message: 'an error occurred could not demultiplex stream'. Also, I have tried several forms to save the video; for example, I have used the shape of the frames, but it neither recognizes them. Could anyone help to figure out where I am doing wrong?
def read_video(video):
# Saliency detection algorithm
saliency_algorithm = "BinWangApr2014"
start_frame = 0
if saliency_algorithm is None or video is None:
print('Please set saliency_algorithm and video')
return
cap = cv2.VideoCapture(video)
video = cv2.VideoWriter('out_j.avi',cv2.VideoWriter_fourcc(*'XVID'), 5, (360,360))
# Set the video start frame
cap.set(cv2.CAP_PROP_POS_FRAMES, start_frame)
_, frame = cap.read()
if frame is None:
print('Please set saliency_algorithm and video')
return
image = frame.copy()
if saliency_algorithm.find("BinWangApr2014")==0:
saliency_algorithm = cv2.saliency.MotionSaliencyBinWangApr2014_create()
# set the size of the data structure
saliency_algorithm.setImagesize(image.shape[1], image.shape[0])
# Initialization
saliency_algorithm.init()
paused = False
while True:
if not paused:
_, frame = cap.read()
if frame is None:
break
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
start = cv2.getTickCount()
success, saliencyMap = saliency_algorithm.computeSaliency(frame)
duration = (cv2.getCPUTickCount() - start)/ \
cv2.getTickFrequency()
#print("computeBinaryMap cost time is: {} ms".format(duration * 1000))
video.write(frame)
#cv2.imshow('image', frame)
cv2.imshow('saliencyMap', saliencyMap*255)
c = cv2.waitKey(2)
c = chr(c) if c !=-1 else 0
if c == 'q':
break
if c == 'p':
paused = not paused
#video.write(frame)
cv2.destroyAllWindows()
video.release()
#camera.stop()
return
Thanks in advance

Opencv fails to save video when cropped using numpy slicing

I'm using OpenCV (3.4.6) to capture video from a Raspberry Pi HQ camera, attached to a Raspberry Pi 4 running Raspbian 10. Bounding boxes suggested by an object detection algorithm are then drawn onto the captured frames, and the output saved to a video file.
Everything works great, except when I try to crop the video to my region of interest.
Using the following line successfully crops the image (as confirmed by cv2.imshow):
frame = frame[0:720, 280:1000]
But, it also stops the output being saved by out.write - I just get an empty file, and being new to both Python and OpenCV, I'm struggling to work out why.
I've trimmed down the code to the relevant part below (apologies - it's still a bit long - I'm not entirely sure what might be important):
# begin video capture
try:
vid = cv2.VideoCapture(int(video_path), cv2.CAP_V4L2)
except:
vid = cv2.VideoCapture(video_path)
out = None
# set capture resolution
inputcodec = cv2.VideoWriter_fourcc('M', 'J', 'P', 'G') # forces MJPG codec
vid.set(6, inputcodec) # sets inputcodec for video capture
vid.set(3, 1280) # sets video capture width
vid.set(4, 720) # sets video capture height
if FLAGS.output:
# by default VideoCapture returns float instead of int
width = int(vid.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = int(vid.get(cv2.CAP_PROP_FPS))
codec = cv2.VideoWriter_fourcc(*FLAGS.output_format)
out = cv2.VideoWriter(FLAGS.output, codec, fps, (width, height))
while True:
return_value, frame = vid.read()
frame = frame[0:720, 280:1000] # crop video to square region of interest
if return_value:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
image = Image.fromarray(frame)
else:
print('Video has ended or failed, try a different video format!')
break
frame_size = frame.shape[:2]
image_data = cv2.resize(frame, (input_size, input_size))
image_data = image_data / 255.
image_data = image_data[np.newaxis, ...].astype(np.float32)
start_time = time.time()
if FLAGS.framework == 'tflite':
interpreter.set_tensor(input_details[0]['index'], image_data)
interpreter.invoke()
pred = [interpreter.get_tensor(output_details[i]['index']) for i in range(len(output_details))]
if FLAGS.model == 'yolov3' and FLAGS.tiny == True:
boxes, pred_conf = filter_boxes(pred[1], pred[0], score_threshold=0.25,
input_shape=tf.constant([input_size, input_size]))
else:
boxes, pred_conf = filter_boxes(pred[0], pred[1], score_threshold=0.25,
input_shape=tf.constant([input_size, input_size]))
else:
batch_data = tf.constant(image_data)
pred_bbox = infer(batch_data)
for key, value in pred_bbox.items():
boxes = value[:, :, 0:4]
pred_conf = value[:, :, 4:]
boxes, scores, classes, valid_detections = tf.image.combined_non_max_suppression(
boxes=tf.reshape(boxes, (tf.shape(boxes)[0], -1, 1, 4)),
scores=tf.reshape(
pred_conf, (tf.shape(pred_conf)[0], -1, tf.shape(pred_conf)[-1])),
max_output_size_per_class=50,
max_total_size=50,
iou_threshold=FLAGS.iou,
score_threshold=FLAGS.score
)
pred_bbox = [boxes.numpy(), scores.numpy(), classes.numpy(), valid_detections.numpy()]
image = utils.draw_bbox(frame, pred_bbox)
fps = 1.0 / (time.time() - start_time)
print("FPS: %.2f" % fps)
result = np.asarray(image)
cv2.namedWindow("result", cv2.WINDOW_AUTOSIZE)
result = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
if not FLAGS.dont_show:
cv2.imshow("result", result)
if FLAGS.output:
out.write(result)
if cv2.waitKey(1) & 0xFF == ord('q'): break
cv2.destroyAllWindows()
Any suggestions for how to resolve this would be greatly appreciated.
You created the VideoWriter and set its width and height to 1280 by 720. Later you told it to save 720 by 720 crops. That is the problem. frame = frame[0:720, 280:1000] is a 720 by 720 array.
Decide what dimensions your video should have, and use them consistently. A video stream can't change resolution.

How can i captured detected images of face in opencv to make a database?

I am trying to enroll the faces of the user in opencv for facial recognition. I have finihed the detection part but what i wanted to achieve is to save the detected faces.So basically waht i wanted to achieve is :
when i see in the webcam it automatically capture 20-30 or n no images and saves it locally.
Currently i am trying to simply save 1 image when clicked some key ,The program runs fine but nothing saves locally.Here is the code
import cv2
import os
cascade = cv2.CascadeClassifier("../haar-cascade-files-master/haar-cascade-files-master/haarcascade_frontalface_alt2.xml")
cap = cv2.VideoCapture(0)
while True:
ret,frame = cap.read(0)
# gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
faces = cascade.detectMultiScale(frame,1.1,5)
orig = frame.copy()
for (x,y,w,h) in faces:
cv2.rectangle(frame,(x,y),(x+w,y+h),(255,255,0),4)
cv2.imshow("Video ",frame)
if cv2.waitKey(2) & 0xFF == 27:
break
elif cv2.waitKey(2) & 0xFF == ord('s'):
faceimg=frame[y:y+h,x:x+w]
cv2.imwrite("../images/facec.jpeg",faceimg)
# cv2.imshow("Crop Image",faceimg)
cap.release()
cv2.destroyAllWindows()
So whats wrong in the code and how can i save n no of images automatically.
If only one person is looking at the camera in a given scenario, you can use a counter.
N = 20
cnt = 0
while True:
...
...
# If the frame contains only one face
for (x,y,w,h) in faces:
cv2.rectangle(frame,(x,y),(x+w,y+h),(255,255,0),4)
faceimg=frame[y:y+h,x:x+w]
cv2.imwrite("../images/face_"+str(cnt)+".jpeg",faceimg)
cnt = cnt + 1
if cnt == N:
cnt = 0
...
...

txt file won't be written in python in windows, it works on mac

I wrote a code that takes a video and recognizes face of a human and calculate the distance of the human from the camera (which is the eye of a robot). I need the data in a txt format so I am writing it in the distance_data.txt but my file is always empty.
I had run the code in a mac computer and it worked perfectly fine and the file was not empty.
import cv2
import time
import numpy as np
person_cascade = cv2.CascadeClassifier('human_recognition.txt')
cap = cv2.VideoCapture('oct23_2.avi')
# Define the codec and create VideoWriter object
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('oct23_2out.avi',fourcc, 20.0, (640,360))
distance_data = open('distance_data.txt','w')
timer=-1
while (cap.isOpened()):
r, frame = cap.read()
if r:
frame = cv2.resize(frame,(640,360)) # Downscale to improve frame rate
gray_frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY) # Haar-cascade classifier needs a grayscale image
cv2.imshow('gray',gray_frame)
#print(gray_frame.type())
rects = person_cascade.detectMultiScale(gray_frame)
max=0
#print(rects)
(x, y, w, h) = rects[0]
cv2.rectangle(frame, (x,y), (x+w ,y+h),(0,255,0),2)
#fshape = frame.shape
#fheight = fshape[0]
#fwidth = fshape[1]
timer=timer+1
xpos=(0.03421)*(w*w)-8.1183*w+521.841
yn=360-y
heightinvid=0.0197377*yn-0.0194
realhieght=((-0.4901)*(heightinvid* heightinvid)+4.4269*heightinvid+8.30356)/ 0.927183854
horizontalp=x+w/2
#print(timer+1 , ",", xpos, )
#if (xpos<=41 and xpos>=40):
#print("height of the human is ", realhieght+149, "or", (-0.000184)*yn*yn+0.08506*yn+8.43461+149)
cm_deviation_in_x=-0.02033898*(320-horizontalp)
#print("Horizontal position of human is ", cm_deviation_in_x, " cm.")
newxpos = format (xpos, '.3f')
distance_data.write("Hi")
distance_data.write(str(newxpos))
distance_data.write(',')
distance_data.write(str(format(cm_deviation_in_x, '.3f')))
distance_data.write("\r\n")
#print(w)
#print("x position of the human is = ",xpos)
#fourcc = cv2.VideoWriter_fourcc(*'XVID')
#out = cv2.VideoWriter('output25.avi',fourcc, 20.0, (fwidth,fheight))
cv2.imshow("preview", frame)
out.write(frame)
k = cv2.waitKey(10)
#print(w,h)
if k & 0xFF == ord("q"): # Exit condition
break
distance_data.flush()
cap.release()
cv2.destroyAllWindows()

Categories

Resources