I am running a GUI with kivy, and simultaneously processing some realtime 3D scanner data. I'm using a multiprocessing instance to process the scan data, while the kivy GUI runs. The scanner data is read in by OpenCV, frame by frame.
My issue is that I can share the scanner data with the kivy instance, in order to display it within the GUI. I have tried making the the array "frame" global, but that doesn't seem to be doing the trick.
I have read about the multiprocessing manager, but im unclear on how to use it to manage a numpy array.
frame = np.zeros((480,640))
class CamApp(App):
def update_slider(self, instance, value):
#print(value)
self.slider_value = value*3
def build(self):
self.img1=Image()
layout = GridLayout(cols = 1)
layout.add_widget(self.img1)
Clock.schedule_interval(self.update, 1.0/33.0)
return layout
def update(self, dt):
global frame
#format as texture
buf1 = cv2.flip(frame, 0)
buf = buf1.tobytes()
texture1 = Texture.create(size=(frame.shape[1], frame.shape[0]), colorfmt='bgr')
texture1.blit_buffer(buf, colorfmt='bgr', bufferfmt='ubyte')
# display image from the texture
self.img1.texture = texture1
def cam(message):
print(message)
dc = DepthCamera()
global frame
while True:
ret, depth_frame, colour_frame = dc.get_frame()
frame = cv2.applyColorMap(cv2.convertScaleAbs(depth_frame, alpha=0.2), cv2.COLORMAP_BONE)
#cv2.imshow("Depth frame", frame)
#cv2.imshow("Color frame", colour_frame)
#key = cv2.waitKey(1)
if __name__ == '__main__':
p = Process(target=cam, args=('beginning capture',))
p.start()
CamApp().run()
cv2.destroyAllWindows()
Related
The title says it all, the loading of camera video takes a lot of time and stays in "Not Responding" at that time.
It takes almost 1 to 2 minutes to load the camera.
This is my code.
class Attendance(Screen):
def on_enter(self, *args):
self.image = Image()
self.capture = cv2.VideoCapture(0)
Clock.schedule_interval(self.load_video, 1.0/30.0)
self.add_widget(self.image)
return super().on_enter(*args)
def load_video(self, *args):
frame = self.capture.read()
self.iamge_frame = frame
buffer = cv2.flip(frame, 0).tobytes()
texture = Texture.create(size=(frame.shape[1], frame.shape[0]), colorfmt='bgr')
texture.blit_buffer(buffer, colorfmt='bgr',bufferfmt = 'ubyte')
self.image.texture = texture
I fixed this by changing self.capture to the following:
self.capture = cv2.VideoCapture(0, cv2.CAP_DSHOW)
I want to display images from an Allied Vision camera inside a tkinter frame using OpenCV and the SDK for the camera, VimbaPython.
The only possible way to initialize the camera is with a Python with statement:
with Vimba.get_instance() as vimba:
cams = vimba.get_all_cameras()
with cams[0] as camera:
camera.get_frame()
# Convert frame to opencv image, then use Image.fromarray and ImageTk.PhotoImage to
# display it on the tkinter GUI
Everything works fine so far. But I don't only need a single frame. Instead, I need to continuously get frames and display them on the screen so that it is streaming.
I found that one way to do it is to call the .after(delay, function) method from a tkinter Label widget.
So, after obtaining one frame, I want to call the same function to get a new frame and display it again. The code would look like that:
with Vimba.get_instance() as vimba:
cams = vimba.get_all_cameras()
with cams[0] as camera:
def show_frame():
frame = camera.get_frame()
frame = frame.as_opencv_image()
im = Image.fromarray(frame)
img = Image.PhotoImage(im)
lblVideo.configure(image=img) # this is the Tkinter Label Widget
lblVideo.image = img
show_frame()
lblVideo.after(20, show_frame)
Then this shows the first frame and stops, throwing an error saying that Vimba needs to be initialized with a with statement. I don't know much about Python, but it looks like when I call the function with the .after() method it ends the with statement.
I would like to know if it is possible to execute this show_frame() function without ending the with. Also, I can't initialize the camera every time because the program goes really slow.
Thank you
I know this question is pretty old, but I ran into a similar problem with the Allied Vision cameras and found the solution to be relatively robust. So I hope this helps someone, even if not the OP.
An alternative to using with statements is using __enter__ and __exit__ (see sample here). With this, I created a class for the Vimba camera and during the __init__ I used these functions twice: once to initialize the Vimba instance, and once to open the camera itself. An example as follows...
vimba_handle = Vimba.get_instance().__enter__()
camera = vimba_handle.get_all_cameras()[0].__enter__()
I'll include a longer snippet as code as well, but please note my purpose was slightly different the OP's intent. Hopefully, it is still useful.
class VimbaCam:
def __init__(self, device_id=0):
# Variables
self.current_frame = np.array([])
self.device = None
self.device_id = device_id
self.vimba_handle = Vimba.get_instance().__enter__()
self.is_streaming = False
self.scale_window = 4
self.stream_thread = threading.Thread(target=self.thread_stream, daemon=True)
# Default settings
self.auto_exposure = "Off"
self.auto_gain = "Off"
self.acquisition = "Continuous"
self.exposure_us = 200000
self.fps = 6.763
self.gain = 0
self.gamma = 1
self.open()
def close(self):
if self.device is not None:
if self.is_streaming:
self.stop_stream()
time.sleep(1)
self.device.__exit__(None, None, None)
self.vimba_handle.__exit__(None, None, None)
def open(self):
cams = self.vimba_handle.get_all_cameras()
if not cams:
error_check(151, currentframe())
else:
self.device = cams[self.device_id].__enter__()
self.set_defaults()
self.start_stream()
def start_stream(self):
if self.device is not None:
self.is_streaming = True
self.stream_thread.start()
time.sleep(1)
def thread_stream(self):
while self.is_streaming:
current_frame = self.device.get_frame().as_opencv_image()
h, w, _ = current_frame.shape
self.current_frame = current_frame.reshape((h, w))
self.stream_thread = threading.Thread(target=self.thread_stream, daemon=True)
def stop_stream(self):
if self.device is not None:
self.is_streaming = False
def live_video(self):
if self.device is not None:
window_name = "Allied Vision"
h, w = self.current_frame.shape
w = int(w / self.scale_window)
h = int(h / self.scale_window)
cv2.namedWindow(window_name, cv2.WINDOW_NORMAL)
cv2.resizeWindow(window_name, w, h)
while 1:
cv2.imshow(window_name, self.current_frame)
cv2.waitKey(1)
if cv2.getWindowProperty(window_name, cv2.WND_PROP_VISIBLE) < 1:
break
cv2.destroyAllWindows()
def set_defaults(self):
if self.device is not None:
# Exposure time settings
self.device.ExposureAuto.set(self.auto_exposure)
self.device.ExposureTimeAbs.set(self.exposure_us)
# Gain settings
self.device.GainAuto.set(self.auto_gain)
self.device.Gain.set(self.gain)
# Gamma settings
self.device.Gamma.set(self.gamma)
self.device.AcquisitionMode.set(self.acquisition)
self.device.AcquisitionFrameRateAbs.set(self.fps)
# Try to adjust GeV packet size (available for GigE only)
try:
self.device.GVSPAdjustPacketSize.run()
while not self.device.GVSPAdjustPacketSize.is_done():
pass
except (AttributeError, VimbaFeatureError):
pass
# Color formatting (tries mono first, then color)
cv_formats = intersect_pixel_formats(self.device.get_pixel_formats(), OPENCV_PIXEL_FORMATS)
mono_formats = intersect_pixel_formats(cv_formats, MONO_PIXEL_FORMATS)
color_formats = intersect_pixel_formats(cv_formats, COLOR_PIXEL_FORMATS)
if mono_formats:
self.device.set_pixel_format(mono_formats[0])
elif color_formats:
self.device.set_pixel_format(color_formats[0])
if __name__ == "__main__":
dev = VimbaCam()
dev.live_video()
dev.close()
You need to use thread to run the capture code and pass the frames read via queue. Then the main tkinter application reads the queue and show the frames periodically using .after().
Below is an example based on your posted code:
import threading
from queue import SimpleQueue
import tkinter as tk
from PIL import Image, ImageTk
from vimba import Vimba
def camera_streaming(queue):
global is_streaming
is_streaming = True
print("streaming started")
with Vimba.get_instance() as vimba:
with vimba.get_all_cameras()[0] as camera:
while is_streaming:
frame = camera.get_frame()
frame = frame.as_opencv_image()
im = Image.fromarray(frame)
img = ImageTk.PhotoImage(im)
queue.put(img) # put the capture image into queue
print("streaming stopped")
def start_streaming():
start_btn["state"] = "disabled" # disable start button to avoid running the threaded task more than once
stop_btn["state"] = "normal" # enable stop button to allow user to stop the threaded task
show_streaming()
threading.Thread(target=camera_streaming, args=(queue,), daemon=True).start()
def stop_streaming():
global is_streaming, after_id
is_streaming = False # terminate the streaming thread
if after_id:
lblVideo.after_cancel(after_id) # cancel the showing task
after_id = None
stop_btn["state"] = "disabled" # disable stop button
start_btn["state"] = "normal" # enable start button
# periodical task to show frames in queue
def show_streaming():
global after_id
if not queue.empty():
image = queue.get()
lblVideo.config(image=image)
lblVideo.image = image
after_id = lblVideo.after(20, show_streaming)
queue = SimpleQueue() # queue for video frames
after_id = None
root = tk.Tk()
lblVideo = tk.Label(root, image=tk.PhotoImage(), width=640, height=480)
lblVideo.grid(row=0, column=0, columnspan=2)
start_btn = tk.Button(root, text="Start", width=10, command=start_streaming)
start_btn.grid(row=1, column=0)
stop_btn = tk.Button(root, text="Stop", width=10, command=stop_streaming, state="disabled")
stop_btn.grid(row=1, column=1)
root.mainloop()
Note that I don't have the camera and the SDK installed, the above code may not work for you. I just demonstrate how to use thread, queue and .after().
Below is a testing vimba module (saved as vimba.py) I use to simulate VimbaPython module using OpenCV and a webcam:
import cv2
class Frame:
def __init__(self, frame):
self.frame = frame
def as_opencv_image(self):
return self.frame
class Camera:
def __init__(self, cam_id=0):
self.cap = cv2.VideoCapture(cam_id, cv2.CAP_DSHOW)
def __enter__(self):
return self
def __exit__(self, *args):
self.cap.release()
return self
def get_frame(self):
ret, frame = self.cap.read()
if ret:
return Frame(frame)
class Vimba:
_instance = None
#classmethod
def get_instance(self):
if self._instance is None:
self._instance = Vimba()
return self._instance
def __enter__(self):
return self
def __exit__(self, *args):
return self
def get_all_cameras(self):
return (Camera(),)
I tried to read the frames in openCV and display them in tkinter label. I was able to do so using the below code:
import tkinter as tk
import cv2
from PIL import ImageTk, Image
video_path = "SAMPLE/STORED_VIDEO/PATH"
root = tk.Tk()
base_img = Image.open("PATH/TO/DEFAULT/LABLE/IMAGE")
img_obj = ImageTk.PhotoImage(base_img)
lblVideo = tk.Label(root, image=img_obj)
lblVideo.pack()
cap = cv2.VideoCapture(video_path)
if cap.isOpened():
def show_frame():
_, frame = cap.read()
im = Image.fromarray(frame)
img = ImageTk.PhotoImage(im)
lblVideo.configure(image=img)
lblVideo.image = img
lblVideo.after(1, show_frame) # Need to create callback here
show_frame()
root.mainloop()
Although this doesnot contain the with statement, you can try replacing the after() callback inside the show_frame function itself.
I was trying to make faster my frames in opencv, it was so slow using it normal, so I decided to ask it here Make faster videocapture opencv the answer was to use multi threading to make it faster, so I code it like this
# The same genderrecognition.py code but with multi-threading to make it faster and fix the the lag of the other one
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.models import load_model
import numpy as np
import cv2
import os
import cvlib as cv
# open webcam and initiate the cam
webcam = cv2.VideoCapture(0, cv2.CAP_DSHOW)
# opencv class
class VideoStream:
def __init__(self):
# read frame from webcam
self.status, self.frame = webcam.read()
webcam.set(cv2.CAP_PROP_FPS, 1000)
self.frame = cv2.flip(self.frame, 1)
print("videostream working")
# face detection class
class face_detection:
def __init__(self):
# use VideoStream Class variables
self.videostream = VideoStream()
self.frame = self.videostream.frame
# apply face detection
self.face, self.confidence = cv.detect_face(self.frame)
# loop through detected faces
for self.idx, self.f in enumerate(self.face):
# get the corner point of the rectangle
self.startX, self.startY = self.f[0], self.f[1]
self.endX, self.endY = self.f[2], self.f[3]
cv2.rectangle(self.frame, (self.startX, self.startY), (self.endX, self.endY), (0,255,0), 2)
self.face_crop = np.copy(self.frame[self.startY:self.endY, self.startX:self.endX])
if self.face_crop.shape[0] < 10 or self.face_crop.shape[1] < 10:
continue
# preprocessing for gender detection model
self.face_crop = cv2.resize(self.face_crop, (96,96))
self.face_crop = self.face_crop.astype("float") / 255.0
self.face_crop = img_to_array(self.face_crop)
self.face_crop = np.expand_dims(self.face_crop, axis=0)
GFR()
print("face_detection working")
# gender recognition class
class GFR:
def __init__(self):
self.model = load_model("C:/Users/berna/Desktop/Programming/AI_ML_DL/Projects/FaceGenderRecognition/gender_detection.model")
self.facedetection = face_detection()
self.face_crop = self.facedetection.face_crop
self.classes = ['hombre', 'mujer']
self.startX, self.startY = self.facedetection.startX, self.facedetection.startY
self.endX, self.endY = self.facedetection.endX, self.facedetection.endY
self.frame = self.facedetection.frame
# apply the gender detection face with the model
self.conf = model.predict(self.face_crop)[0]
# get label with max acc
self.idx = np.argmax(self.conf)
self.label = self.classes[self.idx]
self.label = "{}: {:.2f}".format(self.label, self.conf[self.idx] * 100)
self.Y = self.startY - 10 if self.startY - 10 > 10 else self.startY + 10
# write label and confidence above the face rectangle
cv2.putText(self.frame, self.label, (self.startX, self.Y), cv2.FONT_HERSHEY_SIMPLEX,
0.7, (0,255,0), 2)
print("gender recognition working!")
# classes and webcam while loop
gender_detection = GFR()
# loop through frames
while webcam.isOpened():
VideoStream()
face_detection()
# display output
cv2.imshow("Gender Detection", gender_detection.frame)
# press "Q" to stop
if cv2.waitKey(1) & 0xFF == ord('q'):
break
webcam.release()
cv2.destroyAllWindows()
it give me no errors, but compared to my other code that is on the other question, the webcam open and on this one no, any idea?
Your VideoStream class's init looks ok, but I think you might have better luck creating a cv2 VideoCapture object in the init as well:
self.stream = cv2.VideoCapture(0)
I'm not really as familiar with webcam.set() but if you want to incorporate that, I'm sure you can.
Here you have grabbed the initial frames:
self.status, self.frame = webcam.read()
(Or using the new self.stream variable):
self.status, self.frame = self.stream.read()
Yet this will only grab a frame when it's initialized, not in a loop. To achieve a loop, you have to make a few more class methods. One will be for continuously getting frames (I added a self.stopped attribute, although it's not in your code. It might be a good idea to have a True/False stop flag):
def read_stream(self):
while not self.stopped:
(self.grabbed, self.frame) = self.stream.read()
Then if you want to use multithreading, you can make a thread pointing to the read_stream method:
def start(self):
Thread(target=self.read_stream, args=()).start()
return self
You will have to call the start() method on the VideoStream before you start your CV2 imshow() loop.
video_stream = VideoStream().start(). #<------Here--------
while webcam.isOpened():
face_detection()
# display output
cv2.imshow("Gender Detection", gender_detection.frame)
# press "Q" to stop
if cv2.waitKey(1) & 0xFF == ord('q'):
break
Hopefully this helps getting the CV2 display to show. Whether your GFR class or face detection is grabbing the right frames from the VideoStream class... that's something else, and I can't debug all that code.
I have a project where I need to design a gui in qt. This design contains a widget where live video feed will be displayed from a usb webcam using opencv. This project will detect faces and will also recognize them which means a lot of processing will happen on each frames.
For this what I have done is that I have created a thread which initialize the camera and takes frames from it using opencv. It then puts all the frame in the queue and this queue is then read by a function update_frame which basically displays the frame on qt widget. This is working fine with no delay.
Inside the update_frame function, I added the face detection due to which it was performing very slow. So I created another thread start_inferencing which basically reads frame from queue and after detecting face, it put the frame again in another queue q2 which is then read by update_frame and it displays but still its responding very slow. Below is the code:
q = queue.Queue()
q2 = queue.Queue()
def grab(cam, qu, width, height):
global running
capture = cv2.VideoCapture(cam)
capture.set(cv2.CAP_PROP_FRAME_WIDTH, width)
capture.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
while running:
frame = {}
capture.grab()
ret_val, img = capture.retrieve(0)
frame["img"] = img
if qu.qsize() < 100:
qu.put(frame)
else:
print(qu.qsize())
class Logic(QtWidgets.QMainWindow, Ui_MainWindow):
def __init__(self, parent=None):
QtWidgets.QMainWindow.__init__(self, parent)
self.setupUi(self)
set_initial_alert_temp()
self.window_width = self.ImgWidget.frameSize().width()
self.window_height = self.ImgWidget.frameSize().height()
self.ImgWidget = OwnImageWidget(self.ImgWidget)
self.timer = QtCore.QTimer(self)
self.timer.timeout.connect(self.update_frame)
self.timer.start(1)
self.outside_temp_text_box.setText(str(curr_temp_cel))
def update_frame(self):
if not q2.empty():
frame1 = q2.get()
img = frame1["img"]
img_height, img_width, img_colors = img.shape
scale_w = float(self.window_width) / float(img_width)
scale_h = float(self.window_height) / float(img_height)
scale = min([scale_w, scale_h])
if scale == 0:
scale = 1
img = cv2.resize(img, None, fx=scale, fy=scale, interpolation=cv2.INTER_CUBIC)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
height, width, bpc = img.shape
bpl = bpc * width
image = QtGui.QImage(img.data, width, height, bpl, QtGui.QImage.Format_RGB888)
self.ImgWidget.setImage(image)
def start_inferencing():
while True:
if not q.empty():
frame = q.get()
img = frame["img"]
face_bbox = face.detect_face(img)
if face_bbox is not None:
(f_startX, f_startY, f_endX, f_endY) = face_bbox.astype("int")
f_startX = f_startX + 10
f_startY = f_startY + 10
f_endX = f_endX - 10
f_endY = f_endY - 10
cv2.rectangle(img, (f_startX, f_startY), (f_endX, f_endY), (0, 255, 0), 2)
frame1 = {"img": img}
if q2.qsize() < 100:
q2.put(frame1)
else:
print(q2.qsize())
def main():
capture_thread = threading.Thread(target=grab, args=(0, q, 640, 480))
capture_thread.start()
infer_thread = threading.Thread(target=start_inferencing)
infer_thread.start()
app = QtWidgets.QApplication(sys.argv)
w = Logic(None)
w.setWindowTitle('Test')
w.show()
app.exec_()
main()
Below is the summary of whats happening in the code:
camera -> frame -> queue.put # (reading frame from camera and putting it in queue)
queue.get -> frame -> detect face -> queue2.put # (getting frame from queue, detecting face in it and putting the updated frames in queue2)
queue2.get -> frame -> display it on qt widget # (getting frame from queue2 and display it on qt widget)
The main reason why the live video feed is slow is because the frame which is read in grab function is not able to process faster and thus the queue size keeps on increasing a lot and thus it overall becomes very slow. Is there any good approach I can use which can detect the face as well as show it without any delay. Please help. Thanks
Queue accumulates the frames that the thread does not manage to process. So, no chanse to process them at all. That's why the queue useless here. Working clocks here defined by arriving frames, each frame generates event, which may work in it's own thread (let's say in processing thread) after frame processing is finished, processing thread generates another event and it processed in another thread, let's say in GUI thread and it shows the result to user.
If you mandatory need some buffer, check ring buffer it have finite lenght.
You have a producer/consumer sequence...
grab frame and push on queue1
dequeue frame from queue1, process and enqueue results on queue2
dequeue results from queue2 and display
From what you've stated stage 2. is the bottleneck. That being the case you could try assigning more resources (i.e. threads) to that stage so 2. has multiple threads reading from queue1, processing and pushing results onto queue2. You just need to ensure that the processed data popped from queue2 is sequenced correctly -- presumably by assigning each initial frame with a sequence number or id.
I am trying to simply play a video in gtk environment using opencv code in python. In order to achieve it I made a glade file that contains a toplevel window, a file menu, a drawing area and a file chooser dialog. When user select a file, code starts a thread that calls function VideoPlayerDA that starts reading video and after every frame it generates a queue_draw signal to display frame in drawing area. The problem however is that after few frames the whole UI freezes and becomes unresponsive, video gets stuck.
Tools: I am using Gtk version 3.22.11, python 3.5.3, OpenCV version 3.3.0 on debian stretch.
PS: cv2.waitkey also seems to be not working.
import cv2
import time
import threading
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk, GObject, Gdk, GdkPixbuf
GObject.threads_init()
mymutex = threading.Lock()
dimg = GdkPixbuf.Pixbuf.new_from_file('test.jpg')
def VideoPlayerDA(filename,drawing_area):
global dimg,dimg_available
cap = cv2.VideoCapture(filename)
while(cap.isOpened()):
mymutex.acquire()
ret, img = cap.read()
if img is not None:
boxAllocation = drawing_area.get_allocation()
img = cv2.resize(img, (boxAllocation.width,\
boxAllocation.height))
img = cv2.cvtColor(img,cv2.COLOR_BGR2RGB) # opencv by default load BGR colorspace. Gtk supports RGB hance the conversion
dimg = GdkPixbuf.Pixbuf.new_from_data(img.tostring(),
GdkPixbuf.Colorspace.RGB,False,8,
img.shape[1],
img.shape[0],
img.shape[2]*img.shape[1],None,None)
#time.sleep(0.03)
drawing_area.queue_draw()
mymutex.release()
time.sleep(0.03)
#if ((cv2.waitKey(30) & 0xFF) == ord('q')):
# break
else:
mymutex.release()
break
print('end of file')
class video_player_gui:
def on_main_window_destroy(self,object):
Gtk.main_quit()
def on_open_activate(self,widget):
response = self.file_chooser.run()
if response == 0:
self.filename = self.file_chooser.get_filename()
thread = threading.Thread(target = VideoPlayerDA, args=(self.filename, self.drawing_area,))
thread.daemon = True
thread.start()
self.file_chooser.hide()
else:
pass
def on_drawing_area_draw(self,widget,cr):
global dimg
Gdk.cairo_set_source_pixbuf(cr, dimg.copy(), 0, 0)
cr.paint()
def __init__(self):
self.gladefile = '/home/nouman/Development/Glade/P2/OpenCv_integration_test.glade'
self.builder = Gtk.Builder()
self.builder.add_from_file(self.gladefile)
self.builder.connect_signals(self)
self.main_window = self.builder.get_object("main_window")
self.file_chooser = self.builder.get_object("file_chooser")
self.drawing_area = self.builder.get_object("drawing_area")
self.main_window.show()
if __name__ == "__main__":
main = video_player_gui()
Gtk.main()
I found a solution for now for anyone who will be getting same problem. I was using a global variable dimg without thread synchronization. Synchronizing threads before use of variable solved the problem. Edit on_drawing_area_draw as following will solve the issue
def on_drawing_area_draw(self,widget,cr):
global dimg
mymutex.acquire()
Gdk.cairo_set_source_pixbuf(cr, dimg.copy(), 0, 0)
cr.paint()
mymutex.release()