How to fix problem with save a video frame by frame - python

The problem is that I try to save a video file I get a one frame each time from another function (I check this is not the same frame...), the video created but only with one frame. I run with a loop outside the class Video_utility and send frame to the function save_and_display_video.
import cv2
class Video_utility:
def __init__(self, name_video, format_video, display, fps, size):
self.name_video = name_video
self.format_video = format_video
self.display = display
self.fps = fps
self.size = size
self.stream_frame = None
self.flag_update = True
self.display = True
self.fourcc = cv2.VideoWriter_fourcc(*'XVID')
self.out = cv2.VideoWriter(name_video, self.fourcc, fps, self.size)
self.i = 0
def save_and_display_video(self, frame):
frame = cv2.resize(frame,(self.size))
self.out.write(frame)
self.out.release()
cv2.destroyAllWindows()

Don't close the file after every frame
You have self.out.release() at your save_and_display_video() function.
You'd need to do that only after you've received the whole video.

Related

Camera in Kivy with OpenCV takes a lot of time to load

The title says it all, the loading of camera video takes a lot of time and stays in "Not Responding" at that time.
It takes almost 1 to 2 minutes to load the camera.
This is my code.
class Attendance(Screen):
def on_enter(self, *args):
self.image = Image()
self.capture = cv2.VideoCapture(0)
Clock.schedule_interval(self.load_video, 1.0/30.0)
self.add_widget(self.image)
return super().on_enter(*args)
def load_video(self, *args):
frame = self.capture.read()
self.iamge_frame = frame
buffer = cv2.flip(frame, 0).tobytes()
texture = Texture.create(size=(frame.shape[1], frame.shape[0]), colorfmt='bgr')
texture.blit_buffer(buffer, colorfmt='bgr',bufferfmt = 'ubyte')
self.image.texture = texture
I fixed this by changing self.capture to the following:
self.capture = cv2.VideoCapture(0, cv2.CAP_DSHOW)

Tkinter program works fine in IDE (Visual Studio) but when using pyinstaller to compile to .exe threading does not work same like it does in IDE

When I run my python project in my IDE the GUI and everything is responsive and works great. But when I run as .exe my threading components don't work like they do in IDE. The program's goal is to grab a live feed via RTSP and using opencv to display the images. This is done in its own thread here.
import time
import threading
import cv2
import PIL.Image
"""TODO: add docstring"""
class VideoCapture:
def __init__(self, xmlDict=None, width=None, height=None, fps=None):
"""TODO: add docstring"""
self.xmlDict = xmlDict
self.width = width
self.height = height
self.fps = int(self.xmlDict['FPS'])
self.running = False
# Open the video source
self.vid = cv2.VideoCapture(self.xmlDict['IpAddress'])
if not self.vid.isOpened():
raise ValueError("[MyVideoCapture] Unable to open video source", xmlDict['IpAddress'])
# Get video source width and height
if not self.width:
self.width = int(self.vid.get(cv2.CAP_PROP_FRAME_WIDTH)) # convert float to int
if not self.height:
self.height = int(self.vid.get(cv2.CAP_PROP_FRAME_HEIGHT)) # convert float to int
if not self.fps:
self.fps = int(self.vid.get(cv2.CAP_PROP_FPS)) # convert float to int
# default value at start
self.ret = False
self.frame = None
self.convert_color = cv2.COLOR_BGR2RGB
#self.convert_color = cv2.COLOR_BGR2GRAY
self.convert_pillow = True
# start thread
self.running = True
self.thread = threading.Thread(target=self.process)
self.thread.start()
def process(self):
"""TODO: add docstring"""
while self.running:
ret, frame = self.vid.read()
if ret:
# process image
frame = cv2.resize(frame, (self.width, self.height))
# it has to record before converting colors
if self.convert_pillow:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame = PIL.Image.fromarray(frame)
else:
print('[MyVideoCapture] stream end:', self.video_source)
# TODO: reopen stream
self.running = False
if self.recording:
self.stop_recording()
break
# assign new frame
self.ret = ret
self.frame = frame
# sleep for next frame
#if self.fps != "FULL":
# time.sleep(1/int(self.fps))
I have a button setup called start that infers an image every 2 seconds and prints out the label and confidence. When I do this in .exe the live feed and GUI freeze while inference is being made but when I use program in IDE it does not freeze. Here is the code that does this.
#Button to start inference
self.btn_snapshot = tk.Button(self.btnFrame,width = 10,height = 2, text="Start", command=lambda:threading.Thread(target = self.snapshot).start())
self.btn_snapshot.grid(row = 1,column = 0)
#snapshot function
def snapshot(self):
self.recording = True
while self.recording:
filename = self.vid.snapshot()
result = self.predictImage(filename)
output = self.calculatePassFail(result)
if self.manager:
self.manager.onClick(output)
else:
print('something')
time.sleep(2)
The other two methods that the snapshot function calls are predictImage and calculatePassFail.
def predictImage(self,imageName):
onnxModel = ImageModel.load(self.xmlDict['ModelPath'])
result = onnxModel.predict_from_file(imageName)
return result
def calculatePassFail(self,result):
calcResult = result.labels[0]
self.labelName = calcResult[0]
self.imgScore = calcResult[1]*100
return f"{self.labelName} with score{self.imgScore}"
So I found a fix around this, not sure if its a proper fix but it works. So for some reason when I use pyinstaller to create .exe and there is a console window I have the issue but when I use the flag --noconsole when using pyinstaller to create w/out console the issue goes away and my inference on images acts in its own thread like it does in my IDE. Not sure why it does but it works I guess.

How can I preview streaming images in tkinter with Allied Vision camera that uses Vimba SDK?

I want to display images from an Allied Vision camera inside a tkinter frame using OpenCV and the SDK for the camera, VimbaPython.
The only possible way to initialize the camera is with a Python with statement:
with Vimba.get_instance() as vimba:
cams = vimba.get_all_cameras()
with cams[0] as camera:
camera.get_frame()
# Convert frame to opencv image, then use Image.fromarray and ImageTk.PhotoImage to
# display it on the tkinter GUI
Everything works fine so far. But I don't only need a single frame. Instead, I need to continuously get frames and display them on the screen so that it is streaming.
I found that one way to do it is to call the .after(delay, function) method from a tkinter Label widget.
So, after obtaining one frame, I want to call the same function to get a new frame and display it again. The code would look like that:
with Vimba.get_instance() as vimba:
cams = vimba.get_all_cameras()
with cams[0] as camera:
def show_frame():
frame = camera.get_frame()
frame = frame.as_opencv_image()
im = Image.fromarray(frame)
img = Image.PhotoImage(im)
lblVideo.configure(image=img) # this is the Tkinter Label Widget
lblVideo.image = img
show_frame()
lblVideo.after(20, show_frame)
Then this shows the first frame and stops, throwing an error saying that Vimba needs to be initialized with a with statement. I don't know much about Python, but it looks like when I call the function with the .after() method it ends the with statement.
I would like to know if it is possible to execute this show_frame() function without ending the with. Also, I can't initialize the camera every time because the program goes really slow.
Thank you
I know this question is pretty old, but I ran into a similar problem with the Allied Vision cameras and found the solution to be relatively robust. So I hope this helps someone, even if not the OP.
An alternative to using with statements is using __enter__ and __exit__ (see sample here). With this, I created a class for the Vimba camera and during the __init__ I used these functions twice: once to initialize the Vimba instance, and once to open the camera itself. An example as follows...
vimba_handle = Vimba.get_instance().__enter__()
camera = vimba_handle.get_all_cameras()[0].__enter__()
I'll include a longer snippet as code as well, but please note my purpose was slightly different the OP's intent. Hopefully, it is still useful.
class VimbaCam:
def __init__(self, device_id=0):
# Variables
self.current_frame = np.array([])
self.device = None
self.device_id = device_id
self.vimba_handle = Vimba.get_instance().__enter__()
self.is_streaming = False
self.scale_window = 4
self.stream_thread = threading.Thread(target=self.thread_stream, daemon=True)
# Default settings
self.auto_exposure = "Off"
self.auto_gain = "Off"
self.acquisition = "Continuous"
self.exposure_us = 200000
self.fps = 6.763
self.gain = 0
self.gamma = 1
self.open()
def close(self):
if self.device is not None:
if self.is_streaming:
self.stop_stream()
time.sleep(1)
self.device.__exit__(None, None, None)
self.vimba_handle.__exit__(None, None, None)
def open(self):
cams = self.vimba_handle.get_all_cameras()
if not cams:
error_check(151, currentframe())
else:
self.device = cams[self.device_id].__enter__()
self.set_defaults()
self.start_stream()
def start_stream(self):
if self.device is not None:
self.is_streaming = True
self.stream_thread.start()
time.sleep(1)
def thread_stream(self):
while self.is_streaming:
current_frame = self.device.get_frame().as_opencv_image()
h, w, _ = current_frame.shape
self.current_frame = current_frame.reshape((h, w))
self.stream_thread = threading.Thread(target=self.thread_stream, daemon=True)
def stop_stream(self):
if self.device is not None:
self.is_streaming = False
def live_video(self):
if self.device is not None:
window_name = "Allied Vision"
h, w = self.current_frame.shape
w = int(w / self.scale_window)
h = int(h / self.scale_window)
cv2.namedWindow(window_name, cv2.WINDOW_NORMAL)
cv2.resizeWindow(window_name, w, h)
while 1:
cv2.imshow(window_name, self.current_frame)
cv2.waitKey(1)
if cv2.getWindowProperty(window_name, cv2.WND_PROP_VISIBLE) < 1:
break
cv2.destroyAllWindows()
def set_defaults(self):
if self.device is not None:
# Exposure time settings
self.device.ExposureAuto.set(self.auto_exposure)
self.device.ExposureTimeAbs.set(self.exposure_us)
# Gain settings
self.device.GainAuto.set(self.auto_gain)
self.device.Gain.set(self.gain)
# Gamma settings
self.device.Gamma.set(self.gamma)
self.device.AcquisitionMode.set(self.acquisition)
self.device.AcquisitionFrameRateAbs.set(self.fps)
# Try to adjust GeV packet size (available for GigE only)
try:
self.device.GVSPAdjustPacketSize.run()
while not self.device.GVSPAdjustPacketSize.is_done():
pass
except (AttributeError, VimbaFeatureError):
pass
# Color formatting (tries mono first, then color)
cv_formats = intersect_pixel_formats(self.device.get_pixel_formats(), OPENCV_PIXEL_FORMATS)
mono_formats = intersect_pixel_formats(cv_formats, MONO_PIXEL_FORMATS)
color_formats = intersect_pixel_formats(cv_formats, COLOR_PIXEL_FORMATS)
if mono_formats:
self.device.set_pixel_format(mono_formats[0])
elif color_formats:
self.device.set_pixel_format(color_formats[0])
if __name__ == "__main__":
dev = VimbaCam()
dev.live_video()
dev.close()
You need to use thread to run the capture code and pass the frames read via queue. Then the main tkinter application reads the queue and show the frames periodically using .after().
Below is an example based on your posted code:
import threading
from queue import SimpleQueue
import tkinter as tk
from PIL import Image, ImageTk
from vimba import Vimba
def camera_streaming(queue):
global is_streaming
is_streaming = True
print("streaming started")
with Vimba.get_instance() as vimba:
with vimba.get_all_cameras()[0] as camera:
while is_streaming:
frame = camera.get_frame()
frame = frame.as_opencv_image()
im = Image.fromarray(frame)
img = ImageTk.PhotoImage(im)
queue.put(img) # put the capture image into queue
print("streaming stopped")
def start_streaming():
start_btn["state"] = "disabled" # disable start button to avoid running the threaded task more than once
stop_btn["state"] = "normal" # enable stop button to allow user to stop the threaded task
show_streaming()
threading.Thread(target=camera_streaming, args=(queue,), daemon=True).start()
def stop_streaming():
global is_streaming, after_id
is_streaming = False # terminate the streaming thread
if after_id:
lblVideo.after_cancel(after_id) # cancel the showing task
after_id = None
stop_btn["state"] = "disabled" # disable stop button
start_btn["state"] = "normal" # enable start button
# periodical task to show frames in queue
def show_streaming():
global after_id
if not queue.empty():
image = queue.get()
lblVideo.config(image=image)
lblVideo.image = image
after_id = lblVideo.after(20, show_streaming)
queue = SimpleQueue() # queue for video frames
after_id = None
root = tk.Tk()
lblVideo = tk.Label(root, image=tk.PhotoImage(), width=640, height=480)
lblVideo.grid(row=0, column=0, columnspan=2)
start_btn = tk.Button(root, text="Start", width=10, command=start_streaming)
start_btn.grid(row=1, column=0)
stop_btn = tk.Button(root, text="Stop", width=10, command=stop_streaming, state="disabled")
stop_btn.grid(row=1, column=1)
root.mainloop()
Note that I don't have the camera and the SDK installed, the above code may not work for you. I just demonstrate how to use thread, queue and .after().
Below is a testing vimba module (saved as vimba.py) I use to simulate VimbaPython module using OpenCV and a webcam:
import cv2
class Frame:
def __init__(self, frame):
self.frame = frame
def as_opencv_image(self):
return self.frame
class Camera:
def __init__(self, cam_id=0):
self.cap = cv2.VideoCapture(cam_id, cv2.CAP_DSHOW)
def __enter__(self):
return self
def __exit__(self, *args):
self.cap.release()
return self
def get_frame(self):
ret, frame = self.cap.read()
if ret:
return Frame(frame)
class Vimba:
_instance = None
#classmethod
def get_instance(self):
if self._instance is None:
self._instance = Vimba()
return self._instance
def __enter__(self):
return self
def __exit__(self, *args):
return self
def get_all_cameras(self):
return (Camera(),)
I tried to read the frames in openCV and display them in tkinter label. I was able to do so using the below code:
import tkinter as tk
import cv2
from PIL import ImageTk, Image
video_path = "SAMPLE/STORED_VIDEO/PATH"
root = tk.Tk()
base_img = Image.open("PATH/TO/DEFAULT/LABLE/IMAGE")
img_obj = ImageTk.PhotoImage(base_img)
lblVideo = tk.Label(root, image=img_obj)
lblVideo.pack()
cap = cv2.VideoCapture(video_path)
if cap.isOpened():
def show_frame():
_, frame = cap.read()
im = Image.fromarray(frame)
img = ImageTk.PhotoImage(im)
lblVideo.configure(image=img)
lblVideo.image = img
lblVideo.after(1, show_frame) # Need to create callback here
show_frame()
root.mainloop()
Although this doesnot contain the with statement, you can try replacing the after() callback inside the show_frame function itself.

PyQt widget keeps increasing in size and goes out the window

I have written an application in PyQt5. I am basically displaying a camera feed (in this case my web cam), but the problem is that the frame size keeps on increasing at run time and ultimately goes out of my laptop screen. I'm unable to figure out what the problem is.
Can anyone please explain what I'm doing wrong here?
Below is the code snippet.
from PyQt5 import QtCore, QtGui, QtWidgets
from threading import Thread
from collections import deque
from datetime import datetime
import time
import sys
import cv2
import imutils
class CameraWidget(QtWidgets.QWidget):
"""Independent camera feed
Uses threading to grab IP camera frames in the background
#param width - Width of the video frame
#param height - Height of the video frame
#param stream_link - IP/RTSP/Webcam link
#param aspect_ratio - Whether to maintain frame aspect ratio or force into fraame
"""
def __init__(self, width=0, height=0, aspect_ratio=False, parent=None, deque_size=1):
super(CameraWidget, self).__init__(parent)
# Initialize deque used to store frames read from the stream
self.deque = deque(maxlen=deque_size)
self.maintain_aspect_ratio = aspect_ratio
self.camera_stream_link = 0
# Flag to check if camera is valid/working
self.online = False
self.capture = None
self.video_frame = QtWidgets.QLabel()
self.load_network_stream()
# Start background frame grabbing
self.get_frame_thread = Thread(target=self.get_frame, args=())
self.get_frame_thread.daemon = True
self.get_frame_thread.start()
# Periodically set video frame to display
self.timer = QtCore.QTimer()
self.timer.timeout.connect(self.set_frame)
self.timer.start(.5)
print('Started camera: {}'.format(self.camera_stream_link))
def load_network_stream(self):
"""Verifies stream link and open new stream if valid"""
def load_network_stream_thread():
if self.verify_network_stream(self.camera_stream_link):
self.capture = cv2.VideoCapture(self.camera_stream_link)
self.online = True
self.load_stream_thread = Thread(target=load_network_stream_thread, args=())
self.load_stream_thread.daemon = True
self.load_stream_thread.start()
def verify_network_stream(self, link):
"""Attempts to receive a frame from given link"""
cap = cv2.VideoCapture(link)
if not cap.isOpened():
return False
cap.release()
return True
def get_frame(self):
# time.sleep(5)
"""Reads frame, resizes, and converts image to pixmap"""
while True:
try:
if self.capture.isOpened() and self.online:
# Read next frame from stream and insert into deque
status, frame = self.capture.read()
if status:
self.deque.append(frame)
else:
self.capture.release()
self.online = False
else:
# Attempt to reconnect
print('attempting to reconnect', self.camera_stream_link)
self.load_network_stream()
self.spin(2)
self.spin(.001)
except AttributeError:
pass
def spin(self, seconds):
"""Pause for set amount of seconds, replaces time.sleep so program doesnt stall"""
time_end = time.time() + seconds
while time.time() < time_end:
QtWidgets.QApplication.processEvents()
def set_frame(self):
"""Sets pixmap image to video frame"""
if not self.online:
self.spin(1)
return
if self.deque and self.online:
# Grab latest frame
frame = self.deque[-1]
# Keep frame aspect ratio
if self.maintain_aspect_ratio:
self.frame = imutils.resize(frame, width=self.screen_width)
# Force resize
else:
self.frame = cv2.resize(frame, (self.screen_width, self.screen_height))
self.frame = cv2.cvtColor(self.frame, cv2.COLOR_BGR2RGB)
h, w, ch = self.frame.shape
bytesPerLine = ch * w
# Convert to pixmap and set to video frame
self.img = QtGui.QImage(self.frame, w, h, bytesPerLine, QtGui.QImage.Format_RGB888)
self.pix = QtGui.QPixmap.fromImage(self.img)
self.video_frame.setPixmap(self.pix)
def set_frame_params(self, width, height):
self.screen_width = width
self.screen_height = height
def get_video_frame(self):
self.video_frame.setScaledContents(True)
return self.video_frame
class MainWindow(QtWidgets.QMainWindow):
def __init__(self, parent=None):
super(MainWindow, self).__init__(parent)
# Middle frame
self.mid_frame = QtWidgets.QFrame()
self.mid_frame.setStyleSheet("background-color: rgb(153, 187, 255)")
self.camera = CameraWidget()
# Create camera widgets
print('Creating Camera Widgets...')
self.video_frame = self.camera.get_video_frame()
self.mid_layout = QtWidgets.QHBoxLayout()
self.mid_layout.addWidget(self.video_frame)
self.mid_frame.setLayout(self.mid_layout)
self.widget = QtWidgets.QWidget()
self.layout = QtWidgets.QVBoxLayout()
self.layout.addWidget(self.mid_frame)
self.layout.setContentsMargins(0,0,0,0)
self.layout.setSpacing(0)
self.widget.setLayout(self.layout)
self.setCentralWidget(self.widget)
def event(self, e):
if e.type() in (QtCore.QEvent.Show, QtCore.QEvent.Resize):
print("resize ", self.mid_frame.width(), self.mid_frame.height())
self.camera.set_frame_params(self.mid_frame.width()-10, self.mid_frame.height()-10)
return QtWidgets.QMainWindow.event(self, e)
if __name__ == '__main__':
# Create main application window
app = QtWidgets.QApplication([])
app.setStyle(QtWidgets.QStyleFactory.create("Cleanlooks"))
w = MainWindow()
w.showMaximized()
sys.exit(app.exec_())

webcam recording with python tkinter

I want to record video (not audio) from webcam.
I have put two buttons for start recording & stop recording.
As program starts it pick image from camera & shows on screen.works perfect.
My problem is when i click start recording & after some times stop recording,
only avi file created ,with 0K or 6K size found. No further recording found.
import tkinter
import cv2
import PIL.Image, PIL.ImageTk
stopb = None
class App():
def __init__(self, window, window_title):
self.window = window
self.window.title = window_title
self.ok = False
self.video = cv2.VideoCapture(0)
self.width = self.video.get(cv2.CAP_PROP_FRAME_WIDTH)
self.height = self.video.get(cv2.CAP_PROP_FRAME_HEIGHT)
#create videowriter
self.fourcc = cv2.VideoWriter_fourcc(*'XVID')
self.out = cv2.VideoWriter('output.avi',self.fourcc,10,(640,480))
# Create a canvas that can fit the above video source size
self.canvas = tkinter.Canvas(window, width=self.width, height=self.height)
self.canvas.pack()
self.opencamera = tkinter.Button(window, text="open camera", command=self.open_camera)
self.opencamera.pack()
self.closecamera = tkinter.Button(window, text="close camera", command=self.close_camera)
self.closecamera.pack()
self.delay = 10
self.update()
# After it is called once, the update method will be automatically called every delay milliseconds
self.window.mainloop()
def update(self):
ret, frame = self.video.read()
if self.ok == 'T':
self.out.write(frame)
if ret:
self.photo = PIL.ImageTk.PhotoImage(image=PIL.Image.fromarray(frame))
self.canvas.create_image(0, 0, image=self.photo, anchor=tkinter.NW)
self.window.after(self.delay, self.update)
def open_camera(self):
self.ok = True
print("camera opened")
print(self.ok)
def close_camera(self):
print("camera closed")
self.ok = False
self.video.release()
self.out.release()
def __del__(self):
if self.video.isOpened():
self.video.release()
self.out.release()
App(tkinter.Tk(), "mywindow")
Your problem is that you're never writing anything to your output, as if self.ok == 'T' will never evaluate to true. You should change it to just if self.ok, the same thing you did with ret.
def update(self):
ret, frame = self.video.read()
if self.ok:
self.out.write(frame)
if ret:
self.photo = PIL.ImageTk.PhotoImage(image=PIL.Image.fromarray(frame))
self.canvas.create_image(0, 0, image=self.photo, anchor=tkinter.NW)
self.window.after(self.delay, self.update)
One more issue I found was- after clicking the close camera button followed by the open camera button, the new recording won't start.
Meaning you will just be able to record the first video captured before clicking the close camera button.
This is because the close camera function releases the video object and output object thus video.read() of the update function won't work.
A quick fix is to create a method of set_camera() which creates the video object. Call this function every time in open camera.
https://replit.com/#AmitYadav4/video-record-in-tkinker-canvas

Categories

Resources