Can't open file: './config/PixelBasedAdaptiveSegmenter.xml' in write mode - python

I am testing different background segmentation algorithm from the library pybgs. Unfortunately, I am facing an error that I don't understand.
The code is :
import cv2
import pybgs as bgs
video_path = "video.mp4"
# create VideoCapture object for further video processing
captured_video = cv2.VideoCapture(video_path)
# check video capture status
if not captured_video.isOpened:
print("Unable to open: " + video_path)
exit(0)
background_sub_method = bgs.SuBSENSE()
while True:
# read video frames
ret, frame = captured_video.read()
# check whether the frames have been grabbed
if not ret:
break
# pass the frame to the background subtractor
foreground_mask = background_sub_method.apply(frame)
# obtain the background without foreground mask
img_bg_model = background_sub_method.getBackgroundModel()
cv2.imshow("Initial Frame", frame)
cv2.imshow("FG Mask", foreground_mask)
cv2.imshow("Subtraction Result", img_bg_model)
key = cv2.waitKey(10)
if key == 27:
break
Except that the algorithm don't work properly, I get this error that I don't understand.
[ERROR:0#0.002] global /home/usr/opencv-4.x/modules/core/src/persistence.cpp (505) open Can't open file: './config/SuBSense.xml' in write mode
Failed to open ./config/SuBSense.xml
In my lib pybgs, I have a config folder but there is no SuBSense.xml file.
So I don't know where this error is from, where this SuBSense.xml file is suppose to be.

Related

Why is Python Open CV imwrite unable to save a simple image from my webcam?

My goal is to capture some images from my laptop's webcam to use them later. The main problem is that cv2.imwrite doesn't seem to work, the directory is created succesfully but I cannot find a way to save the image. I am currently on Manjaro Linux. I've alredy checked that the frame is not empty and in other Python scripts I've been able to show the image properly, the only problem seems to be when I try to save the image.
Is there any other way to save the image or is something wrong with my code?
I have the following Python code:
import cv2 #opencv
import os
import time
import uuid
IMAGES_PATH = 'Tensorflor/workspace/images/collectedimages'
labels = ['hola', 'gracias', 'si', 'no', 'tequiero']
img_number = 15
for label in labels:
!mkdir {'Tensorflow/workspace/images/collectedimages/'+label}
cap = cv2.VideoCapture(0)
print('Collecting images for {}'.format(label))
time.sleep(5)
for numimg in range(img_number):
ret, frame = cap.read()
img_name = os.path.join(IMAGES_PATH, label, label+'.'+'{}.jpg'.format(str(uuid.uuid1())))
cv2.imwrite(img_name,frame)
cv2.imshow('Frame',frame)
time.sleep(2)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()

How to save the capture image file in USB using python opencv function named "imwrite" in Linux

I tried to save capture of webcam image in USB using Python on Linux environment.
"Imwrite" is work in file directory but not work in USB directory.
I tried on 'os' package and path.
Is there other method to doing this?
path='/media/odroid/MYUSB/savefolder/'
capture_img=/demo/capture.jpg
image=cv2.imread(capture_img)
cv2.imwrite(os.path.join(path, resave.jpg),image)
The whole code is running without error, but jpg file is not saved in MYUSB
Perhaps you don't need to use os.join() try this:
import cv2
import numpy as np
cap = cv2.VideoCapture(0)
savePath = 'output.jpg' #Replace this with your own path say /media/odroid/MYUSB/savefolder/output.jpg
ret, frame = cap.read()
cv2.imwrite(savePath,frame)
If you want to save the whole video please refer to this answer
Here is the code:
import cv2
import numpy as np
cap = cv2.VideoCapture(0)
savePath = 'output.avi' #Replace this with your own path say /media/odroid/MYUSB/savefolder/output.avi
fourcc = cv2.VideoWriter_fourcc(*'MJPG')
out = cv2.VideoWriter(savePath, fourcc, 20.0, (int(cap.get(3)), int(cap.get(4))))
while True:
newret, newframe = cap.read()
cv2.imshow('orig',newframe)
out.write(newframe)
k = cv2.waitKey(5) & 0xFF
if k == 27:
break
ret, frame = cap.read()
cv2.imwrite(savePath,frame)
cap.release()
out.release()
cv2.destroyAllWindows()
Also, your code has some issues, here is a fixed version of it which should work:
path='/media/odroid/MYUSB/savefolder/'
capture_img='/demo/capture.jpg' #it seems path should be demo/capture.jpg
image=cv2.imread(capture_img)
cv2.imwrite(os.path.join(path, 'resave.jpg'),image)

cv2.VideoCapture freezes on this specific video

For some reason, it hangs on this video:
Here's the code
import cv2
import time
cap = cv2.VideoCapture("http://192.65.213.243/mjpg/video.mjpg")
while(cap.isOpened()):
ret, img = cap.read()
current_time_in_milliseconds = "%.5f" % time.time()
filename="{}.jpg".format(current_time_in_milliseconds)
cv2.imwrite(filename, img)
Any ideas why? Is it something about this video format?
This code works on other mjpg but something about this feed that makes python freeze at cv2.VideoCapture()
I do get this funny error too:
warning: Error opening file
(/build/opencv/modules/videoio/src/cap_ffmpeg_impl.hpp:792) warning:
http://192.65.213.243/mjpg/video.mjpg
(/build/opencv/modules/videoio/src/cap_ffmpeg_impl.hpp:793)

Face Detection in Video using Google Cloud API

I'm trying to do face detection in a video using Google Vision API. I'm using the following code:
import argparse
import cv2
from google.cloud import vision
from PIL import Image, ImageDraw
def detect_face(face_file, max_results=4):
"""Uses the Vision API to detect faces in the given file.
Args:
face_file: A file-like object containing an image with faces.
Returns:
An array of Face objects with information about the picture.
"""
content = face_file.read()
# [START get_vision_service]
image = vision.Client().image(content=content)
# [END get_vision_service]
return image.detect_faces()
def highlight_faces(frame, faces, output_filename):
"""Draws a polygon around the faces, then saves to output_filename.
Args:
image: a file containing the image with the faces.
faces: a list of faces found in the file. This should be in the format
returned by the Vision API.
output_filename: the name of the image file to be created, where the
faces have polygons drawn around them.
"""
im = Image.open(frame)
draw = ImageDraw.Draw(im)
for face in faces:
box = [(bound.x_coordinate, bound.y_coordinate)
for bound in face.bounds.vertices]
draw.line(box + [box[0]], width=5, fill='#00ff00')
#im.save(output_filename)
def main(input_filename, max_results):
video_capture = cv2.VideoCapture(input_filename)
while True:
# Capture frame-by-frame
ret, frame = video_capture.read()
faces = detect_face(frame, max_results)
highlight_faces(frame, faces)
cv2.imshow('Video', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Detects faces in the given image.')
parser.add_argument(
'input_image', help='the image you\'d like to detect faces in.')
parser.add_argument(
'--max-results', dest='max_results', default=4,
help='the max results of face detection.')
args = parser.parse_args()
main(args.input_image, args.max_results)
But I'm getting the error:
content = face_file.read() AttributeError: 'numpy.ndarray' object has
no attribute 'read'
The "frames" are getting read as numpy array. But don't know how to bypass them.
Can anyone please help me?
The detect_face function is expecting a file-like object to read the data from. One possible way to do this is to convert frame (of type numpy.ndarray) into an image, and put it into a buffer, which can then be read like a file.
For example, try making the following changes to your code:
## Add some imports.
import io
import numpy as np
...
def main(input_filename, max_results):
...
while True:
# Capture frame-by-frame
ret, frame = video_capture.read()
## Convert to an image, then write to a buffer.
image_from_frame = Image.fromarray(np.unit8(frame))
buffer = io.BytesIO()
image_from_frame.save(buffer, format='PNG')
buffer.seek(0)
## Use the buffer like a file.
faces = detect_face(buffer, max_results)
...
Note: There should be a way to use image_from_frame.tobytes() as image content in the vision API client, but I could not make it work.

How to read Youtube live stream using openCV python?

I want to read a live stream from youtube to perform some basic CV things, probably we have to somehow strip of the youtube URL to convert it in a format that might be readable by openCV like?:
cap = cv2.VideoCapture('https://www.youtube.com/watch?v=_9OBhtLA9Ig')
has anyone done it?
I am sure you already know the answer by now, but I will answer for others searching the same topic. You can do this by using Pafy
(probably together with youtube_dl).
import pafy
import cv2
url = "https://www.youtube.com/watch?v=_9OBhtLA9Ig"
video = pafy.new(url)
best = video.getbest(preftype="mp4")
capture = cv2.VideoCapture(best.url)
while True:
grabbed, frame = capture.read()
# ...
And that should be it.
I've added Youtube URL source support in my VidGear Python Library that automatically pipelines YouTube Video into OpenCV by providing its URL only. Here is a complete python example:
# import libraries
from vidgear.gears import CamGear
import cv2
stream = CamGear(source='https://youtu.be/dQw4w9WgXcQ', stream_mode = True, logging=True).start() # YouTube Video URL as input
# infinite loop
while True:
frame = stream.read()
# read frames
# check if frame is None
if frame is None:
#if True break the infinite loop
break
# do something with frame here
cv2.imshow("Output Frame", frame)
# Show output window
key = cv2.waitKey(1) & 0xFF
# check for 'q' key-press
if key == ord("q"):
#if 'q' key-pressed break out
break
cv2.destroyAllWindows()
# close output window
# safely close video stream.
stream.stop()
Code Source
After 100-120 frames the answer from #lee hannigan was crashing on me for a live stream on youtube.
I worked out a method with Pafy to just grab x number of frames and splice them together. This ended up poorly stitching the chunks together though, and gave choppy results. Pafy may not be designed for live streams, I couldn't find a way to stitch the frames together seamlessly.
What worked in the end is below, slightly modified from guttentag_liu's answer on this post. It takes a few more packages, and is lengthy, but works. Because the file is live, it needs to be in chunks, hence saving to a temporary file. You could probably do your openCV work on each chunk, then save to a file in the end instead of re-opening.
# pip install urllib
# pip install m3u8
# pip install streamlink
from datetime import datetime, timedelta, timezone
import urllib
import m3u8
import streamlink
import cv2 #openCV
def get_stream(url):
"""
Get upload chunk url
input: youtube URL
output: m3u8 object segment
"""
#Try this line tries number of times, if it doesn't work,
# then show the exception on the last attempt
# Credit, theherk, https://stackoverflow.com/questions/2083987/how-to-retry-after-exception
tries = 10
for i in range(tries):
try:
streams = streamlink.streams(url)
except:
if i < tries - 1: # i is zero indexed
print(f"Attempt {i+1} of {tries}")
time.sleep(0.1) #Wait half a second, avoid overload
continue
else:
raise
break
stream_url = streams["best"] #Alternate, use '360p'
m3u8_obj = m3u8.load(stream_url.args['url'])
return m3u8_obj.segments[0] #Parsed stream
def dl_stream(url, filename, chunks):
"""
Download each chunk to file
input: url, filename, and number of chunks (int)
output: saves file at filename location
returns none.
"""
pre_time_stamp = datetime(1, 1, 1, 0, 0, tzinfo=timezone.utc)
#Repeat for each chunk
#Needs to be in chunks because
# 1) it's live
# 2) it won't let you leave the stream open forever
i=1
while i <= chunks:
#Open stream
stream_segment = get_stream(url)
#Get current time on video
cur_time_stamp = stream_segment.program_date_time
#Only get next time step, wait if it's not new yet
if cur_time_stamp <= pre_time_stamp:
#Don't increment counter until we have a new chunk
print("NO pre: ",pre_time_stamp, "curr:",cur_time_stamp)
time.sleep(0.5) #Wait half a sec
pass
else:
print("YES: pre: ",pre_time_stamp, "curr:",cur_time_stamp)
print(f'#{i} at time {cur_time_stamp}')
#Open file for writing stream
file = open(filename, 'ab+') #ab+ means keep adding to file
#Write stream to file
with urllib.request.urlopen(stream_segment.uri) as response:
html = response.read()
file.write(html)
#Update time stamp
pre_time_stamp = cur_time_stamp
time.sleep(stream_segment.duration) #Wait duration time - 1
i += 1 #only increment if we got a new chunk
return None
def openCVProcessing(saved_video_file):
'''View saved video with openCV
Add your other steps here'''
capture = cv2.VideoCapture(saved_video_file)
while capture.isOpened():
grabbed, frame = capture.read() #read in single frame
if grabbed == False:
break
#openCV processing goes here
#
cv2.imshow('frame',frame) #Show the frame
#Shown in a new window, To exit, push q on the keyboard
if cv2.waitKey(20) & 0xFF == ord('q'):
break
capture.release()
cv2.destroyAllWindows() #close the windows automatically
tempFile = "temp.ts" #files are format ts, open cv can view them
videoURL = "https://www.youtube.com/watch?v=_9OBhtLA9Ig"
dl_stream(videoURL, tempFile, 3)
openCVProcessing(tempFile)
Probably, because Youtube does not provide the like/dislike counts anymore, the first solution gives error. As a solution, you should comment the 53rd and 54th lines in the backend_youtube_dl.py in pafy package file as in the image below, after that the code in the first solution will work:
Secondly, you can get not get audio with OpenCV, it is a computer vision library, not multimedia. You should try other options for that.

Categories

Resources