I am writing a WebCam Gui, which is supposed to take pictures and manipulate with WebCam parameters. After the video stream is activated on the main GUI window, an additional window can be opened to change the WebCamera parameters Screenshot 1, Screenshot 2.
I am using Qthread to stream on QLabel. Also, I was able to set the initial camera parameters on the camera properties' changing window. My problem is changing the Exposure parameter by using a slider on the sub-window and seeing results in real-time on the main window.
Please see the code.
import sys
from PyQt5.QtWidgets import *
from PyQt5 import uic
from PyQt5.QtCore import *
from PyQt5.QtGui import *
import cv2
class MainFrame(QMainWindow):
def __init__(self):
super(MainFrame, self).__init__()
# Loading UI
uic.loadUi("MainFrame.ui", self)
# Remove maximize button to prevent crushing
self.setWindowFlags(Qt.WindowCloseButtonHint | Qt.WindowMinimizeButtonHint)
# Define Widgets
self.Video_Feed = self.findChild(QLabel, 'Video_Feed')
self.Case_Name = self.findChild(QLineEdit, "Case_Name")
self.Pictures_List = self.findChild(QListWidget, "Pictures_List")
self.Start_Video = self.findChild(QAction, 'actionStart_Video')
self.Start_Video.setShortcut('Shift+S')
self.Stop_Video = self.findChild(QAction, 'actionStop_Video')
self.Stop_Video.setShortcut('Shift+F')
self.Take_a_Picture = self.findChild(QAction, 'actionTake_a_picture')
self.Take_a_Timed_Picture = self.findChild(QAction, 'actionTake_a_timed_picture')
self.Camera_Properties = self.findChild(QAction, 'actionProperties')
# Initializing Video
self.Start_Video.triggered.connect(self.Start_Video_Clicked)
self.Stop_Video.triggered.connect(self.Stop_Video_Clicked)
self.Camera_Properties.triggered.connect(self.Camera_Properties_Clicked)
def Video_Feed_Update(self, Image):
self.Video_Feed.setPixmap(QPixmap.fromImage(Image))
def Start_Video_Clicked(self):
self.Video_Feed_is_Running = True
self.thread = QThread()
self.Video_Thread = Worker()
self.Video_Thread.moveToThread(self.thread)
self.Video_Thread.ImageUpdate.connect(self.Video_Feed_Update)
self.thread.started.connect(self.Video_Thread.run)
self.thread.start()
def Stop_Video_Clicked(self):
self.Video_Thread.stop_video()
self.Video_Feed.setText("Your video starts here")
def Camera_Properties_Clicked(self):
self.CP = CameraParameters()
Initial_Exposure = self.Video_Thread.Camera_Initial_Parameters()
self.CP.Setup_Exposure(int(Initial_Exposure))
self.CP.Exposure_Calibration.connect(self.Video_Thread.Exposure_update)
self.CP.show()
class Worker(QObject):
ImageUpdate = pyqtSignal(QImage)
def run(self):
self.ThreadActive = True
self.Capture = cv2.VideoCapture(1, cv2.CAP_DSHOW)
self.Capture.set(3, 1920)
self.Capture.set(4, 1080)
while self.ThreadActive:
ret, frame = self.Capture.read()
if ret:
image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# Converting Video into QT5 readable format
qt_video_format = QImage(image.data, image.shape[1], image.shape[0], QImage.Format_RGB888)
qt_picture = qt_video_format.scaled(1280, 720, Qt.KeepAspectRatio)
self.ImageUpdate.emit(qt_picture)
def stop_video(self):
self.ThreadActive = False
self.Capture.release()
def Camera_Initial_Parameters(self):
return self.Capture.get(cv2.CAP_PROP_EXPOSURE)
def Exposure_update(self, value):
self.Capture.set(cv2.CAP_PROP_EXPOSURE, value)
class CameraParameters(QDialog):
Exposure_Calibration = pyqtSignal(int)
def __init__(self):
super().__init__()
uic.loadUi('Cam_Parameters.ui', self)
# Sliders
self.Exposure_Slider = self.findChild(QSlider, 'ExposureSlider')
self.Exposure_Slider.setRange(-10, 10)
self.White_Balance_Slider = self.findChild(QSlider, 'WBSlider')
self.White_Balance_Slider.setMinimum(-10)
self.White_Balance_Slider.setMaximum(10)
self.Brightness_Slider = self.findChild(QSlider, 'BrightnessSlider')
self.Brightness_Slider.setMinimum(0)
self.Brightness_Slider.setMaximum(300)
self.Saturation_Slider = self.findChild(QSlider, 'SaturationSlider')
self.Saturation_Slider.setMinimum(0)
self.Saturation_Slider.setMaximum(300)
self.Contrast_Slider = self.findChild(QSlider, 'ContrastSlider')
self.Contrast_Slider.setMinimum(-10)
self.Contrast_Slider.setMaximum(10)
self.Gamma_Slider = self.findChild(QSlider, 'GammaSlider')
self.Gamma_Slider.setMinimum(-10)
self.Gamma_Slider.setMaximum(10)
self.Sharpness_Slider = self.findChild(QSlider, 'SharpnessSlider')
self.Sharpness_Slider.setMinimum(0)
self.Sharpness_Slider.setMaximum(100)
# Sliders values
self.Exposure_Value = self.findChild(QLabel, 'Exposure_Value')
self.White_Balance_Value = self.findChild(QLabel, 'WB_value')
self.Brightness_Value = self.findChild(QLabel, 'Brightness_value')
self.Saturation_Value = self.findChild(QLabel, 'Saturation_value')
self.Contrast_Value = self.findChild(QLabel, 'Contrast_value')
self.Gamma_Value = self.findChild(QLabel, 'Gamma_value')
self.Sharpness_Value = self.findChild(QLabel, 'Sharpness_value')
# Connections
self.Exposure_Slider.valueChanged.connect(self.Exposure_sliding)
def Setup_Exposure(self, value):
self.Exposure_Slider.setValue(value)
self.Exposure_Value.setText(str(value))
def Exposure_sliding(self, value):
self.Exposure_Value.setText(str(value))
self.Exposure_Calibration.emit(value)
if __name__ == "__main__":
App = QApplication(sys.argv)
Root = MainFrame()
Root.show()
sys.exit(App.exec())
Cam_Parameters.ui, MainFrame.ui for the GUI
I admit I don't know why this worked, but changing the connect to a lambda function did the trick.
self.CP.Exposure_Calibration.connect(lambda x: self.Video_Thread.Exposure_update(x))
Related
I have written an application in PyQt5. I am basically displaying a camera feed (in this case my web cam), but the problem is that the frame size keeps on increasing at run time and ultimately goes out of my laptop screen. I'm unable to figure out what the problem is.
Can anyone please explain what I'm doing wrong here?
Below is the code snippet.
from PyQt5 import QtCore, QtGui, QtWidgets
from threading import Thread
from collections import deque
from datetime import datetime
import time
import sys
import cv2
import imutils
class CameraWidget(QtWidgets.QWidget):
"""Independent camera feed
Uses threading to grab IP camera frames in the background
#param width - Width of the video frame
#param height - Height of the video frame
#param stream_link - IP/RTSP/Webcam link
#param aspect_ratio - Whether to maintain frame aspect ratio or force into fraame
"""
def __init__(self, width=0, height=0, aspect_ratio=False, parent=None, deque_size=1):
super(CameraWidget, self).__init__(parent)
# Initialize deque used to store frames read from the stream
self.deque = deque(maxlen=deque_size)
self.maintain_aspect_ratio = aspect_ratio
self.camera_stream_link = 0
# Flag to check if camera is valid/working
self.online = False
self.capture = None
self.video_frame = QtWidgets.QLabel()
self.load_network_stream()
# Start background frame grabbing
self.get_frame_thread = Thread(target=self.get_frame, args=())
self.get_frame_thread.daemon = True
self.get_frame_thread.start()
# Periodically set video frame to display
self.timer = QtCore.QTimer()
self.timer.timeout.connect(self.set_frame)
self.timer.start(.5)
print('Started camera: {}'.format(self.camera_stream_link))
def load_network_stream(self):
"""Verifies stream link and open new stream if valid"""
def load_network_stream_thread():
if self.verify_network_stream(self.camera_stream_link):
self.capture = cv2.VideoCapture(self.camera_stream_link)
self.online = True
self.load_stream_thread = Thread(target=load_network_stream_thread, args=())
self.load_stream_thread.daemon = True
self.load_stream_thread.start()
def verify_network_stream(self, link):
"""Attempts to receive a frame from given link"""
cap = cv2.VideoCapture(link)
if not cap.isOpened():
return False
cap.release()
return True
def get_frame(self):
# time.sleep(5)
"""Reads frame, resizes, and converts image to pixmap"""
while True:
try:
if self.capture.isOpened() and self.online:
# Read next frame from stream and insert into deque
status, frame = self.capture.read()
if status:
self.deque.append(frame)
else:
self.capture.release()
self.online = False
else:
# Attempt to reconnect
print('attempting to reconnect', self.camera_stream_link)
self.load_network_stream()
self.spin(2)
self.spin(.001)
except AttributeError:
pass
def spin(self, seconds):
"""Pause for set amount of seconds, replaces time.sleep so program doesnt stall"""
time_end = time.time() + seconds
while time.time() < time_end:
QtWidgets.QApplication.processEvents()
def set_frame(self):
"""Sets pixmap image to video frame"""
if not self.online:
self.spin(1)
return
if self.deque and self.online:
# Grab latest frame
frame = self.deque[-1]
# Keep frame aspect ratio
if self.maintain_aspect_ratio:
self.frame = imutils.resize(frame, width=self.screen_width)
# Force resize
else:
self.frame = cv2.resize(frame, (self.screen_width, self.screen_height))
self.frame = cv2.cvtColor(self.frame, cv2.COLOR_BGR2RGB)
h, w, ch = self.frame.shape
bytesPerLine = ch * w
# Convert to pixmap and set to video frame
self.img = QtGui.QImage(self.frame, w, h, bytesPerLine, QtGui.QImage.Format_RGB888)
self.pix = QtGui.QPixmap.fromImage(self.img)
self.video_frame.setPixmap(self.pix)
def set_frame_params(self, width, height):
self.screen_width = width
self.screen_height = height
def get_video_frame(self):
self.video_frame.setScaledContents(True)
return self.video_frame
class MainWindow(QtWidgets.QMainWindow):
def __init__(self, parent=None):
super(MainWindow, self).__init__(parent)
# Middle frame
self.mid_frame = QtWidgets.QFrame()
self.mid_frame.setStyleSheet("background-color: rgb(153, 187, 255)")
self.camera = CameraWidget()
# Create camera widgets
print('Creating Camera Widgets...')
self.video_frame = self.camera.get_video_frame()
self.mid_layout = QtWidgets.QHBoxLayout()
self.mid_layout.addWidget(self.video_frame)
self.mid_frame.setLayout(self.mid_layout)
self.widget = QtWidgets.QWidget()
self.layout = QtWidgets.QVBoxLayout()
self.layout.addWidget(self.mid_frame)
self.layout.setContentsMargins(0,0,0,0)
self.layout.setSpacing(0)
self.widget.setLayout(self.layout)
self.setCentralWidget(self.widget)
def event(self, e):
if e.type() in (QtCore.QEvent.Show, QtCore.QEvent.Resize):
print("resize ", self.mid_frame.width(), self.mid_frame.height())
self.camera.set_frame_params(self.mid_frame.width()-10, self.mid_frame.height()-10)
return QtWidgets.QMainWindow.event(self, e)
if __name__ == '__main__':
# Create main application window
app = QtWidgets.QApplication([])
app.setStyle(QtWidgets.QStyleFactory.create("Cleanlooks"))
w = MainWindow()
w.showMaximized()
sys.exit(app.exec_())
I'm trying to display an rtsp stream via kivy video player, this runs fine but in my video I get a 2 or 3 second delay in the stream which I would ideally like to eliminate to 0.5 to 1 seconds.
Here's what I have:
from kivy.app import App
from kivy.uix.video import Video
class TestApp(App):
def build(self):
video = Video(source='rtsp://my-stream-address', state='play')
video.size = (720, 320)
video.opacity = 0
video.state = 'play'
video.bind(texture=self._play_started)
return video
def _play_started(self, instance, value):
instance.opacity = 1
if __name__ == '__main__':
TestApp().run()
EDIT
I have a working solution to the video streaming BUT I don't know how to get this into my kivy gui.
Here's my streaming solution:
from threading import Thread
import cv2, time
class ThreadedCamera(object):
def __init__(self, src=0):
self.capture = cv2.VideoCapture(src)
self.capture.set(cv2.CAP_PROP_BUFFERSIZE, 2)
self.FPS = 1/30
self.FPS_MS = int(self.FPS * 1000)
# Start frame retrieval thread
self.thread = Thread(target=self.update, args=())
self.thread.daemon = True
self.thread.start()
def update(self):
while True:
if self.capture.isOpened():
(self.status, self.frame) = self.capture.read()
time.sleep(self.FPS)
def show_frame(self):
cv2.imshow('frame', self.frame)
cv2.waitKey(self.FPS_MS)
if __name__ == '__main__':
src = 'rtsp://my-stream-address'
threaded_camera = ThreadedCamera(src)
while True:
try:
threaded_camera.show_frame()
except AttributeError:
pass
EDIT 2
I have also found this implementation of a kivy video widget not using the built in Video widget. I'm still unsure how to combine my working solution with a Kivy widget but perhaps this can help someone help me:
class KivyCamera(Image):
source = ObjectProperty()
fps = NumericProperty(30)
def __init__(self, **kwargs):
super(KivyCamera, self).__init__(**kwargs)
self._capture = None
if self.source is not None:
self._capture = cv2.VideoCapture(self.source)
Clock.schedule_interval(self.update, 1.0 / self.fps)
def on_source(self, *args):
if self._capture is not None:
self._capture.release()
self._capture = cv2.VideoCapture(self.source)
#property
def capture(self):
return self._capture
def update(self, dt):
ret, frame = self.capture.read()
if ret:
buf1 = cv2.flip(frame, 0)
buf = buf1.tostring()
image_texture = Texture.create(
size=(frame.shape[1], frame.shape[0]), colorfmt="bgr"
)
image_texture.blit_buffer(buf, colorfmt="bgr", bufferfmt="ubyte")
self.texture = image_texture
My initial question was for Kivy Video player widget. But now the solution I am finding is using threading with OpenCV, so I have changed the tags on this question and will accept any Kivy implementation of this solution.
I am trying to retrieve the value of x i.e is a counter from class ShowVideo and to display the counter in a button in ImageViewer class. Video is running perfectly, but I m not getting the logic on how to show counter when video starts running. All the function should work simultaneously, here i as a variable is the counter name declared in inside function startVideo() that comes under showVideo class. I want to update the value on button i.e button_in(class ImageViewer, method InitUI()) as a counter.
'''
import cv2
import numpy as np
import sys
from PyQt5 import QtCore
from PyQt5 import QtWidgets
from PyQt5 import QtGui
from PyQt5.QtCore import pyqtSignal,QRect,QThread
from PyQt5.QtWidgets import *
from PyQt5.QtGui import QPixmap
filenameOpen =0
class ShowVideo(QtCore.QObject):# its only running a videooo
camera = cv2.VideoCapture(filenameOpen)
VideoSignal = QtCore.pyqtSignal(QtGui.QImage)
#############################################
# newValue=QtCore.pyqtSignal(int)
# stopped= pyqtSignal()
#################################################
def __init__(self, parent = None):
# super(ShowVideo, self).__init__(parent)
super().__init__()
#QtCore.pyqtSlot()
def startVideo(self):
run_video = True
# self.counterThread.startVideo()QImage
x=0
while run_video:
# ret, image = self.camera.read()
ret, image = self.camera.read()
height, width, channels = image.shape
frame=image.copy()
frameClone = frame.copy()
# frame = cv2.resize(frame, (1920, 1080))
color_swapped_image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
height, width, _ = color_swapped_image.shape
qt_image = QtGui.QImage(color_swapped_image.data,
width,
height,
color_swapped_image.strides[0],
QtGui.QImage.Format_RGB888)
self.VideoSignal.emit(qt_image)
# return x
# print(x)
# x=x+1
class ImageViewer(QtWidgets.QMainWindow):
def __init__(self, parent = None):
super().__init__()
left=0
top=0
width=1920
height=1080
iconName="icon.png"
self.setWindowIcon(QtGui.QIcon(iconName))
self.setGeometry(left,top,width,height)
self.image = QtGui.QImage()
self.initUI()
self.show()
def initUI(self):
self.setWindowTitle('Emotion Analysis')
button_video = QPushButton(self)
button_file = QPushButton(self)
button_play = QPushButton(self)
self.button_in=QPushButton(self)
button_out=QPushButton(self)
button_total=QPushButton(self)
self.button_stop = QPushButton(self)
self.label_image=QLabel(self)
label_image_blank=QLabel(self)
label_in=QLabel(self)
label_out=QLabel(self)
label_total=QLabel(self)
# button definations>>>>>>>>>>>>>>>>>>>>
button_video.setGeometry((QRect(10,30,90,65)))# syntax(x,y,<>,^)
button_video.setIcon(QtGui.QIcon("securitycamera.png"))
button_video.setIconSize(QtCore.QSize(50,50))
# button.setToolTip("This is Click Me Button")
button_video.setToolTip("<h4>Live Stream<h4>")
button_video.clicked.connect(vid.startVideo)
button_file.setGeometry((QRect(110,30,90,65)))# syntax(x,y,<>,^)
button_file.setIcon(QtGui.QIcon("file.png"))
button_file.setIconSize(QtCore.QSize(50,50))
button_file.setToolTip("<h4>Add new connection<h4>")
button_file.clicked.connect(QtWidgets.qApp.quit) # this line is also working condition, the quit() method is defined above
button_play.setGeometry((QRect(1710,30,90,65)))# syntax(x,y,<>,^)
# button_play.setGeometry((QRect(1710,300,90,65)))# syntax(x,y,<>,^)
button_play.setIcon(QtGui.QIcon("play_red.png"))
button_play.setIconSize(QtCore.QSize(50,50))
button_play.setToolTip("<h4>Play video<h4>")
button_play.clicked.connect(vid.startVideo)
self.button_stop.setGeometry((QRect(1820,30,90,65)))# syntax(x,y,<>,^)
self.button_stop.setIcon(QtGui.QIcon("stop.png"))
self.button_stop.setIconSize(QtCore.QSize(50,50))
self.button_stop.setToolTip("<h4>Stop Video<h4>")
self.button_stop.clicked.connect(QApplication.instance().quit)
#############################################################
self.button_in.setGeometry((QRect(1710,500,90,45)))# syntax(x,y,<>,^)
self.button_in.setText("0")# it should be updated while counter runs
# self.button_in.setText(x)
self.button_in.setFont(QtGui.QFont("Sanserif",20))
# self.counterThread=QThread()
# self.counter=ShowVideo()
# self.counter.moveToThread(self.counterThread)
# self.button_in.clicked.connect(self.startCounting)
# self.vid.newValue.connect(self.button_in.setText)
# self.counterThread.started.connect(self.counter.startVideo)
#####################################################################
button_out.setGeometry((QRect(1710,550,90,45)))# syntax(x,y,<>,^)
button_out.setText("0")
button_out.setFont(QtGui.QFont("Sanserif",20))
button_total.setGeometry((QRect(1710,600,90,45)))# syntax(x,y,<>,^)
button_total.setText("0")
button_total.setFont(QtGui.QFont("Sanserif",20))
# label definations>>>>>>>>>>>>>>>>>>>>
self.label_image.setGeometry((QRect(10,110,1500,900))) # syntax(x,y,<>,^)
self.label_image.setPixmap(QPixmap("black.jpg"))
self.label_image.setScaledContents(True)
label_in.setGeometry((QRect(1600,500,100,50))) # syntax(x,y,<>,^)
label_in.setText("In")
label_in.setFont(QtGui.QFont("Sanserif",20))
label_out.setGeometry((QRect(1600,550,100,50))) # syntax(x,y,<>,^)
label_out.setText("Out")
label_out.setFont(QtGui.QFont("Sanserif",20))
label_total.setGeometry((QRect(1600,600,100,50))) # syntax(x,y,<>,^)
label_total.setText("Total")
label_total.setFont(QtGui.QFont("Sanserif",20))
#QtCore.pyqtSlot(QtGui.QImage)
def setImage(self, image):
self.label_image.setPixmap(QPixmap.fromImage(image))
# self.vid.newValue.connect(self.button_in.setText)
# #QtCore.pyqtSlot(int)
# def startCounting(self,x):
# # # if not self.counterThread.isRunning():
# # # self.counterThread.startVideo()
# # # pass
# self.vid.newValue.connect(self.button_in.setText)
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
thread = QtCore.QThread() #thread declareation
thread.start()
vid = ShowVideo() # calling 1st class
vid.moveToThread(thread)
image_viewer = ImageViewer() #calling second class
vid.VideoSignal.connect(image_viewer.setImage)
# vid.VideoSignal.connect(image_viewer.startCounting)
sys.exit(app.exec_()) '''
Probably the easiest is to emit the frame number with the image when emitting ShowVideo.VideoSignal in ShowVideo.startVedio and connecting vid.VideoSignal to image_viewer.setText, i.e.
class ShowVideo(QtCore.QObject):
VideoSignal = QtCore.pyqtSignal(QtGui.QImage, int)
....
#QtCore.pyqtSlot()
def startVideo(self):
....
x += 1
self.VideoSignal.emit(qt_image, x)
....
if __name__ == '__main__':
....
vid.VideoSignal.connect(lambda img, frame: image_viewer.button_in.setText(str(frame)) )
I'm new to GUI-programming and need help with a QThread application.
I designed a GUI-Programm which records a Signal from the microphone and plots it in a Figure at the same time.
Now I want to evaluate the signal in another thread, so it still records and plots in the GUI.
The streaming and plotting works fine but everytime I start the thread the GUI freezes and then exits.
Does somebody know what I did wrong in my code, I don't have that much Programming-experience?
# Imports ----------------------------
import sys
import time
import numpy as np
import pyaudio
from PyQt5 import QtGui, QtWidgets, QtCore
import matplotlib
from matplotlib.mlab import find
import matplotlib.gridspec as gridspec
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.figure import Figure
matplotlib.use('Qt5Agg')
class Window(QtWidgets.QMainWindow):
def __init__(self): # template for rest of GUI,
super(Window, self).__init__()
self.setGeometry(50, 50, 1500, 900)
self.centralwidget = QtWidgets.QWidget(self)
self.centralwidget.setObjectName("centralwidget")
self.channels = 2 # StereoSignal
self.fs = 44100 # Samplingrate
self.Chunks = 4096 # Buffersize
self.streamstart = False
self.audiodata = [] # to buffer streaming-values in
self.tapeLength = 4 # seconds
self.tape = np.empty(self.fs * self.tapeLength) * np.nan # tape to store signal-chunks
self.home()
def home(self):
btn = QtWidgets.QPushButton("Stream and Plot", self) # Button to start streaming
btn.clicked.connect(self.plot)
btn.move(100, 100)
btn = QtWidgets.QPushButton("Stop", self) # Button to stop streaming
btn.clicked.connect(self.stop_signal)
btn.move(200, 100)
btn = QtWidgets.QPushButton("Evaluate", self) # Button for the Evaluation
btn.clicked.connect(self.evaluation)
btn.move(100, 140)
self.textEdit = QtWidgets.QTextEdit(self) # Show text of evaluation
self.textEdit.move(250, 170)
self.textEdit.resize(200, 200)
self.scrollArea = QtWidgets.QScrollArea(self) # Scroll-Area to plot signal (Figure) in
self.scrollArea.move(75, 400)
self.scrollArea.resize(600, 300)
self.scrollArea.setWidgetResizable(False)
self.figure = Figure((15, 2.8), dpi=100) # figure instance (to plot on) F(width, height, ...)
self.canvas = FigureCanvas(self.figure)
self.scrollArea.setWidget(self.canvas)
self.gs = gridspec.GridSpec(1, 1)
self.ax = self.figure.add_subplot(self.gs[0])
self.figure.subplots_adjust(left=0.05)
def start_stream(self, start=True):
"""start a Signal-Stream with pyAudio, with callback (to also play immediately)"""
if start is True:
self.p = pyaudio.PyAudio()
self.stream = self.p.open(format=pyaudio.paFloat32, channels=self.channels, rate=self.fs, input=True,
output=True, frames_per_buffer=self.Chunks, stream_callback=self.callback)
self.streamstart = True
self.stream.start_stream()
print("Recording...")
def callback(self, in_data, frame_count, time_info, flag):
"""Callback-Function which stores the streaming data in a list"""
data = np.fromstring(np.array(in_data).flatten(), dtype=np.float32)
self.audiodata = data
print("appending...")
return data, pyaudio.paContinue
def tape_add(self):
"""add chunks from (callback)-list to tapes for left and right Signalparts"""
if self.streamstart:
self.tape[:-self.Chunks] = self.tape[self.Chunks:]
self.taper = self.tape # tape for right signal
self.tapel = self.tape # tape for left signal
self.tapel[-self.Chunks:] = self.audiodata[::2]
self.taper[-self.Chunks:] = self.audiodata[1::2]
print("taping...")
else:
print("No streaming values found")
def plot(self):
"""Start the streaming an plot the signal"""
print("(Stereo-)Signal streaming & plotting...")
if self.streamstart:
pass
else:
self.start_stream(start=True)
self.t1 = time.time()
time.sleep(0.5)
while self.streamstart:
QtWidgets.QApplication.processEvents() # does this still work with threads?
print("Plotting...")
self.tape_add()
self.timeArray = np.arange(self.taper.size)
self.timeArray = (self.timeArray / self.fs) * 1000 # scale to milliseconds
self.ax.clear()
self.ax.plot(self.timeArray, (self.taper / np.max(np.abs(self.taper))), '-b')
self.ax.grid()
self.ax.set_ylabel("Amplitude")
self.ax.set_xlabel("Samples")
self.canvas.draw()
def stop_signal(self):
print("Stopping Signal.")
if self.streamstart:
print("Stop Recording")
self.stream.stop_stream()
self.stream.close()
self.p.terminate()
self.streamstart = False
else:
pass
def evaluation(self):
""" Start the evaluation in another Thread"""
threader = WorkerThread(self.taper, self.tapel)
thread = QtCore.QThread()
# threader.threadDone.connect(self.thread_done) # doesn't work yet
thread.started.connect(threader.run)
thread.start() # start thread
class WorkerThread(QtCore.QObject):
def __init__(self, taper, tapel): # take the tape-parts from the original thread
# super().__init__() # do I need this or next?
QtCore.QThread.__init__(self)
self.__taper = taper
self.__tapel = tapel
def run(self):
"""Do evaluation, later mor, for now just some calculations"""
print("Evaluating Signal")
self.tpr = self.__taper.astype(np.float32, order='C') / 32768 # here the GUI freezes and then exits
self.tpl = self.__tapel.astype(np.float32, order='C') / 32768
# cut nan-values if there are some
self.r = self.tpr[~np.isnan(self.tpr)]
self.l = self.tpl[~np.isnan(self.tpl)]
# normalize signals
self.left2 = (self.l / np.max(np.abs(self.l)))
self.right2 = (self.r / np.max(np.abs(self.r)))
self.norm_audio2 = np.array((self.left2, self.right2)) # like channels (in de_interlace)
# do some calculations
self.databew = """ Mute, Loudness and PSNR/MOS...
Dominant fundamental frequencies etc.
"""
print(self.databew)
# self.textEdit.append(self.databew) # would this work?
# self.threadDone.emit('Thread-Bewertung Done.') # later implemented
def main():
app = QtWidgets.QApplication(sys.argv)
GUI = Window()
GUI.show()
sys.exit(app.exec_())
main()
So the streaming parts work, maybe someone can tell me what's wrong with the Threading-Part, where I want to do some simple calculations with the recorded signal?
The thread doesn't work with the Signal still recording, but also not, when I stop the recording and plotting and have the Signal in a buffer.
I'm sorry I couldn't get a simpler programm working with similar values, where the same problem occurs.
Hope someone can still help me?
thx, Julia
After a little trying out different things I found a sulition. So the Problem was indeed the QApplication.ProcessEvents-part. This is for completing loops in PyQt, but mine is a endless loop, only stopped after a button-klick. This is why the GUI froze everytime I used it.
The solution now, was to put the plotting part also in a new Thread, which can access the GUI-window.
Here is the new code, which works fine and reasonable fast:
# Imports ----------------------------
import sys
import time
import numpy as np
import pyaudio
from PyQt5 import QtGui, QtWidgets, QtCore
import matplotlib
import matplotlib.gridspec as gridspec
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.figure import Figure
matplotlib.use('Qt5Agg')
class Window(QtWidgets.QMainWindow):
def __init__(self): # template for rest of GUI,
super(Window, self).__init__()
self.setGeometry(50, 50, 1500, 900)
self.centralwidget = QtWidgets.QWidget(self)
self.centralwidget.setObjectName("centralwidget")
self.channels = 2 # StereoSignal
self.fs = 44100 # Samplingrate
self.Chunks = 4096 # Buffersize
self.streamstart = False
self.audiodata = [] # to buffer streaming-values in
self.tapeLength = 4 # seconds
self.tape = np.empty(self.fs * self.tapeLength) * np.nan # tape to store signal-chunks
self.home()
def home(self):
btn = QtWidgets.QPushButton("Stream and Plot", self) # Button to start streaming
btn.clicked.connect(self.plot)
btn.move(100, 100)
btn = QtWidgets.QPushButton("Stop", self) # Button to stop streaming
btn.clicked.connect(self.stop_signal)
btn.move(200, 100)
btn = QtWidgets.QPushButton("Evaluate", self) # Button for the Evaluation
btn.clicked.connect(self.evaluation)
btn.move(100, 140)
self.textEdit = QtWidgets.QTextEdit(self) # Show text of evaluation
self.textEdit.move(250, 170)
self.textEdit.resize(200, 200)
self.scrollArea = QtWidgets.QScrollArea(self) # Scroll-Area to plot signal (Figure) in
self.scrollArea.move(75, 400)
self.scrollArea.resize(600, 300)
self.scrollArea.setWidgetResizable(False)
self.figure = Figure((15, 2.8), dpi=100) # figure instance (to plot on) F(width, height, ...)
self.canvas = FigureCanvas(self.figure)
self.scrollArea.setWidget(self.canvas)
self.gs = gridspec.GridSpec(1, 1)
self.ax = self.figure.add_subplot(self.gs[0])
self.figure.subplots_adjust(left=0.05)
def start_stream(self, start=True):
"""start a Signal-Stream with pyAudio, with callback (to also play immediately)"""
if start is True:
self.p = pyaudio.PyAudio()
self.stream = self.p.open(format=pyaudio.paFloat32, channels=self.channels, rate=self.fs, input=True,
output=True, frames_per_buffer=self.Chunks, stream_callback=self.callback)
self.streamstart = True
self.stream.start_stream()
print("Recording...")
def callback(self, in_data, frame_count, time_info, flag):
"""Callback-Function which stores the streaming data in a list"""
data = np.fromstring(np.array(in_data).flatten(), dtype=np.float32)
self.audiodata = data
print("appending...")
return data, pyaudio.paContinue
def tape_add(self):
"""add chunks from (callback)-list to tapes for left and right Signalparts"""
if self.streamstart:
self.tape[:-self.Chunks] = self.tape[self.Chunks:]
self.taper = self.tape # tape for right signal
self.tapel = self.tape # tape for left signal
self.tapel[-self.Chunks:] = self.audiodata[::2]
self.taper[-self.Chunks:] = self.audiodata[1::2]
print("taping...")
else:
print("No streaming values found")
def plot(self):
"""Start the streaming an plot the signal"""
print("(Stereo-)Signal streaming & plotting...")
self.plot_thread = PlotThead(self)
self.plot_thread.start()
def stop_signal(self):
print("Stopping Signal.")
if self.streamstart:
print("Stop Recording")
self.stream.stop_stream()
self.stream.close()
self.p.terminate()
self.streamstart = False
self.plot_thread.stop()
else:
pass
def evaluation(self):
""" Start the evaluation in another Thread"""
self.thread = WorkerThread(self, self.taper, self.tapel)
self.thread.start() # start thread
class PlotThead(QtCore.QThread):
def __init__(self, window):
QtCore.QThread.__init__(self)
self.deamon = True
self.__is_running = True
self.window = window
def stop(self):
self.__is_running = False
def run(self):
if self.window.streamstart:
pass
else:
self.window.start_stream(start=True)
self.window.t1 = time.time()
time.sleep(0.5)
while self.window.streamstart and self.__is_running:
print("Plotting...")
self.window.tape_add()
self.window.timeArray = np.arange(self.window.taper.size)
self.window.timeArray = (self.window.timeArray / self.window.fs) * 1000 # scale to milliseconds
self.window.ax.clear()
self.window.ax.plot(self.window.timeArray, (self.window.taper / np.max(np.abs(self.window.taper))), '-b')
self.window.ax.grid()
self.window.ax.set_ylabel("Amplitude")
self.window.ax.set_xlabel("Samples")
self.window.canvas.draw()
class WorkerThread(QtCore.QThread):
def __init__(self, window, taper, tapel): # take the tape-parts from the original thread
QtCore.QThread.__init__(self)
self.__taper = taper
self.__tapel = tapel
self.deamon = True
self.window = window
def run(self):
"""Do evaluation, later mor, for now just some calculations"""
print("Evaluating Signal")
self.tpr = self.__taper.astype(np.float32, order='C') / 32768 # here the GUI freezes and then exits
self.tpl = self.__tapel.astype(np.float32, order='C') / 32768
# cut nan-values if there are some
self.r = self.tpr[~np.isnan(self.tpr)]
self.l = self.tpl[~np.isnan(self.tpl)]
# normalize signals
self.left2 = (self.l / np.max(np.abs(self.l)))
self.right2 = (self.r / np.max(np.abs(self.r)))
self.norm_audio2 = np.array((self.left2, self.right2)) # like channels (in de_interlace)
# do some calculations
self.databew = """ Mute, Loudness and PSNR/MOS...
Dominant fundamental frequencies etc.
"""
print(self.databew)
self.window.textEdit.append(self.databew) # would this work?
def main():
app = QtWidgets.QApplication(sys.argv)
GUI = Window()
GUI.show()
sys.exit(app.exec_())
main()
I have been trying to create a tkinter top level window that streams video form webcam and do a QR scan. I got this QR scan code from SO and another code that just updates images from webcam instead of streaming the video on a tkinter label.
and i tried to combine these both so that a toplevel window with a label updating image from webcam and a close button to close the toplevel window. And while it streams the images, it can scan for QR code and if a scan is successful, the webcam and the toplevel window gets closed.
here is what i tried.
import cv2
import cv2.cv as cv
import numpy
import zbar
import time
import threading
import Tkinter
from PIL import Image, ImageTk
class BarCodeScanner(threading.Thread, Tkinter.Toplevel):
def __init__(self):
# i made this as a global variable so i can access this image
# outside ie,. beyond the thread to update the image on to the tkinter window
global imgtk
imgtk = None
threading.Thread.__init__(self)
self.WINDOW_NAME = 'Camera'
self.CV_SYSTEM_CACHE_CNT = 5 # Cv has 5-frame cache
self.LOOP_INTERVAL_TIME = 0.2
cv.NamedWindow(self.WINDOW_NAME, cv.CV_WINDOW_NORMAL)
self.cam = cv2.VideoCapture(-1)
self.confirm = 0
def scan(self, aframe):
imgray = cv2.cvtColor(aframe, cv2.COLOR_BGR2GRAY)
# to show coloured image, as from the other code mentioned in the other code
imgcol = cv2.cvtColor(aframe, cv2.COLOR_BGR2RGBA)
imgcol_array = Image.fromarray(imgcol)
imgtk = ImageTk.PhotoImage(image=imgcol_array)
raw = str(imgray.data)
scanner = zbar.ImageScanner()
scanner.parse_config('enable')
width = int(self.cam.get(cv.CV_CAP_PROP_FRAME_WIDTH))
height = int(self.cam.get(cv.CV_CAP_PROP_FRAME_HEIGHT))
imageZbar = zbar.Image(width, height,'Y800', raw)
scanner.scan(imageZbar)
for symbol in imageZbar:
print 'decoded', symbol.type, 'symbol', '"%s"' % symbol.data
return symbol.data
def run(self):
self.datalst = []
print 'BarCodeScanner run', time.time()
while True:
for i in range(0,self.CV_SYSTEM_CACHE_CNT):
self.cam.read()
img = self.cam.read()
self.data = self.scan(img[1])
cv2.imshow(self.WINDOW_NAME, img[1])
cv.WaitKey(1)
time.sleep(self.LOOP_INTERVAL_TIME)
if self.data:
self.datalst.append(self.data)
# i have added this section so that it waits for scan
# if a scan is made it and if gets same value after 2 scans
# it has to stop webcam
if len(self.datalst) == 2 and len(set(self.datalst)) <= 1:
# I want to close the webcam before closing the toplevel window
#self.cam.release()
#cv2.destroyAllWindows()
break
self.cam.release()
def Video_Window():
video_window = Tkinter.Toplevel()
video_window.title('QR Scan !!')
img_label = Tkinter.Label(video_window)
img_label.pack(side=Tkinter.TOP)
close_button = Tkinter.Button(video_window, text='close', command = video_window.destroy)
close_button.pack(side=Tkinter.TOP)
def update_frame():
global imgtk
img_label.configure(image=imgtk)
img_label.after(10,update_frame)
update_frame()
def main():
root = Tkinter.Tk()
button_scanQr = Tkinter.Button(root, text='QR Scan', command=start_scan)
button_scanQr.pack()
root.mainloop()
def start_scan():
scanner = BarCodeScanner()
scanner.start()
Video_Window()
#scanner.join()
main()
Problem is,
I actually wanted to display the video on the Toplevel window, not the OpenCV window
at the same time do a QR Scan,if a read is sucessfull, the Toplevel window should close without abruptly closing webcam(because, when i try to use self.cam.release() or cv2.destroyAllWindows() my webcams lights or on even if i forcefully terminate the programs compilation).
Now what i get is a separate window created by OpenCV that streams video inside. But i don’t want that window, instead i want the video to be displayed on the tkinter's toplevel window. also when there is a sucessfull read, the webcam stucks at the final image it reads.
i tried to remove the line that was responsible for OpenCV window, inside the run method of BarcodeScanner class
cv2.imshow(self.WINDOW_NAME, img[1])
it still showed up with a different window with no output, and if i try to close that window, it created another one similar and recursively.
UPDATE:
As i noticed i made some silly mistakes without understanding of some lines in cv2, i made some change on the code by adding the toplevel window code into the run method of the class(im not sure if this is a right way).
import cv2
import cv2.cv as cv
import numpy
import zbar
import time
import threading
import Tkinter
from multiprocessing import Process, Queue
from Queue import Empty
from PIL import Image, ImageTk
class BarCodeScanner(threading.Thread, Tkinter.Toplevel):
def __init__(self):
threading.Thread.__init__(self)
#self.WINDOW_NAME = 'Camera'
self.CV_SYSTEM_CACHE_CNT = 5 # Cv has 5-frame cache
self.LOOP_INTERVAL_TIME = 0.2
#cv.NamedWindow(self.WINDOW_NAME, cv.CV_WINDOW_NORMAL)
self.cam = cv2.VideoCapture(-1)
# check if webcam device is free
self.proceede = self.cam.isOpened()
if not self.proceede:
return
self.confirm = 0
def scan(self, aframe):
imgray = cv2.cvtColor(aframe, cv2.COLOR_BGR2GRAY)
raw = str(imgray.data)
scanner = zbar.ImageScanner()
scanner.parse_config('enable')
width = int(self.cam.get(cv.CV_CAP_PROP_FRAME_WIDTH))
height = int(self.cam.get(cv.CV_CAP_PROP_FRAME_HEIGHT))
imageZbar = zbar.Image(width, height,'Y800', raw)
scanner.scan(imageZbar)
for symbol in imageZbar:
print 'decoded', symbol.type, 'symbol', '"%s"' % symbol.data
return symbol.data
def run(self):
if not self.proceede:
return
def show_frame():
_, img = self.cam.read()
img = cv2.flip(img,1)
cv2image = cv2.cvtColor(img, cv2.COLOR_BGR2RGBA)
img = Image.fromarray(cv2image)
imgtk = ImageTk.PhotoImage(image=img)
img_label.imgtk = imgtk
img_label.configure(image=imgtk)
video_window.after(250, show_frame)
def destroy_video_window():
self.cam.release()
video_window.destroy()
# Toplevel GUI
video_window = Tkinter.Toplevel()
video_window.title('QR Scan !!')
img_label = Tkinter.Label(video_window)
img_label.pack(side=Tkinter.TOP)
close_button = Tkinter.Button(video_window, text='close', command = destroy_video_window)
close_button.pack(side=Tkinter.RIGHT)
show_frame()
self.datalst = []
print 'BarCodeScanner run', time.time()
while True:
for i in range(0,self.CV_SYSTEM_CACHE_CNT):
self.cam.read()
img = self.cam.read()
self.data = self.scan(img[1])
time.sleep(self.LOOP_INTERVAL_TIME)
if self.data:
self.datalst.append(self.data)
if len(self.datalst) == 2 and len(set(self.datalst)) <= 1:
video_window.destroy()
break
self.cam.release()
def main():
root = Tkinter.Tk()
button_scanQr = Tkinter.Button(root, text='QR Scan', command=scaner)
button_scanQr.pack()
root.mainloop()
def scaner():
scanner = BarCodeScanner()
scanner.start()
main()
now, I can get the image on the Toplevel window, But i dont know how to close the webcam.
condition 1: when i show a QR code to scan, it reads it successfully and webcam quits without any error.
condition 2: when i click the close button on the toplevel window(say if user doesn't want to do any scan and just want to close the webcam) i get error saying
libv4l2: error dequeuing buf: Invalid argument
VIDIOC_DQBUF: Invalid argument
select: Bad file descriptor
VIDIOC_DQBUF: Bad file descriptor
select: Bad file descriptor
VIDIOC_DQBUF: Bad file descriptor
Segmentation fault (core dumped)
I am writing this application for Linux, Mac and Windows machine. How can i close or terminate the webcam safely.
Your program has two threads, the main thread and the worker thread that reads frames from the camera. When the close button is clicked, it happens in the main thread. After self.cam.release() the object self.cam is probably in an unusable state, and when a method of self.cam is called by the worker thread, there may be some trouble. Maybe the implementation of cv2.VideoCapture is faulty and it should throw some exception when that happens.
Accessing tkinter widgets from other thread than the main thread may also cause problems.
For clean program termination, creating an instance of threading.Event and then checking for event.is_set() at some point in the work thread could work. For example
def destroy_video_window():
self.stop_event.set()
video_window.destroy()
and then in the worker thread
while True:
if self.stop_event.is_set():
break
for i in range(0, self.CV_SYSTEM_CACHE_CNT):
self.cam.read()
There are several things that could be done in other way, the following is a modified version of the code. It avoids calling tkinter methods from other thread than the main thread, event_generate() being the only tkinter method called by the worker thread. Explicit polling is avoided by emitting virtual events, for example <<ScannerQuit>>, that are placed in the tkinter event queue.
import cv2
import cv2.cv as cv
import zbar
import time
import threading
import Tkinter as tk
from PIL import Image, ImageTk
class Scanner(object):
def __init__(self, handler, *args, **kw):
self.thread = threading.Thread(target=self.run)
self.handler = handler
self.CV_SYSTEM_CACHE_CNT = 5 # Cv has 5-frame cache
self.LOOP_INTERVAL_TIME = 0.2
self.cam = cv2.VideoCapture(-1)
self.scanner = zbar.ImageScanner()
self.scanner.parse_config('enable')
self.cam_width = int(self.cam.get(cv.CV_CAP_PROP_FRAME_WIDTH))
self.cam_height = int(self.cam.get(cv.CV_CAP_PROP_FRAME_HEIGHT))
self.last_symbol = None
def start(self):
self.thread.start()
def scan(self, aframe):
imgray = cv2.cvtColor(aframe, cv2.COLOR_BGR2GRAY)
raw = str(imgray.data)
image_zbar = zbar.Image(self.cam_width, self.cam_height, 'Y800', raw)
self.scanner.scan(image_zbar)
for symbol in image_zbar:
return symbol.data
def run(self):
print 'starting scanner'
while True:
if self.handler.need_stop():
break
# explanation for this in
# http://stackoverflow.com/a/35283646/5781248
for i in range(0, self.CV_SYSTEM_CACHE_CNT):
self.cam.read()
img = self.cam.read()
self.handler.send_frame(img)
self.data = self.scan(img[1])
if self.handler.need_stop():
break
if self.data is not None and (self.last_symbol is None
or self.last_symbol <> self.data):
# print 'decoded', symbol.type, 'symbol', '"%s"' % symbol.data
self.handler.send_symbol(self.data)
self.last_symbol = self.data
time.sleep(self.LOOP_INTERVAL_TIME)
self.cam.release()
class ScanWindow(tk.Toplevel):
def __init__(self, parent, gui, *args, **kw):
tk.Toplevel.__init__(self, master=parent, *args, **kw)
self.parent = parent
self.gui = gui
self.scanner = None
self.lock = threading.Lock()
self.stop_event = threading.Event()
self.img_label = tk.Label(self)
self.img_label.pack(side=tk.TOP)
self.close_button = tk.Button(self, text='close', command=self._stop)
self.close_button.pack()
self.bind('<Escape>', self._stop)
parent.bind('<<ScannerFrame>>', self.on_frame)
parent.bind('<<ScannerEnd>>', self.quit)
parent.bind('<<ScannerSymbol>>', self.on_symbol)
def start(self):
self.frames = []
self.symbols = []
class Handler(object):
def need_stop(self_):
return self.stop_event.is_set()
def send_frame(self_, frame):
self.lock.acquire(True)
self.frames.append(frame)
self.lock.release()
self.parent.event_generate('<<ScannerFrame>>', when='tail')
def send_symbol(self_, data):
self.lock.acquire(True)
self.symbols.append(data)
self.lock.release()
self.parent.event_generate('<<ScannerSymbol>>', when='tail')
self.stop_event.clear()
self.scanner = Scanner(Handler())
self.scanner.start()
self.deiconify()
def _stop(self, *args):
self.gui.stop()
def stop(self):
if self.scanner is None:
return
self.stop_event.set()
self.frames = []
self.symbols = []
self.scanner = None
self.iconify()
def quit(self, *args):
self.parent.event_generate('<<ScannerQuit>>', when='tail')
def on_symbol(self, *args):
self.lock.acquire(True)
symbol_data = self.symbols.pop(0)
self.lock.release()
print 'symbol', '"%s"' % symbol_data
self.after(500, self.quit)
def on_frame(self, *args):
self.lock.acquire(True)
frame = self.frames.pop(0)
self.lock.release()
_, img = frame
img = cv2.flip(img, 1)
cv2image = cv2.cvtColor(img, cv2.COLOR_BGR2RGBA)
img = Image.fromarray(cv2image)
imgtk = ImageTk.PhotoImage(image=img)
self.img_label.imgtk = imgtk
self.img_label.configure(image=imgtk)
class GUI(object):
def __init__(self, root):
self.root = root
self.scan_window = ScanWindow(self.root, self)
self.scan_window.iconify()
self.root.title('QR Scan !!')
self.lframe = tk.Frame(self.root)
self.lframe.pack(side=tk.TOP)
self.start_button = tk.Button(self.lframe, text='start', command=self.start)
self.start_button.pack(side=tk.LEFT)
self.stop_button = tk.Button(self.lframe, text='stop', command=self.stop)
self.stop_button.configure(state='disabled')
self.stop_button.pack(side=tk.LEFT)
self.close_button = tk.Button(self.root, text='close', command=self.quit)
self.close_button.pack(side=tk.TOP)
self.root.bind('<<ScannerQuit>>', self.stop)
self.root.bind('<Control-s>', self.start)
self.root.bind('<Control-q>', self.quit)
self.root.protocol('WM_DELETE_WINDOW', self.quit)
def start(self, *args):
self.start_button.configure(state='disabled')
self.scan_window.start()
self.stop_button.configure(state='active')
def stop(self, *args):
self.scan_window.stop()
self.start_button.configure(state='active')
self.stop_button.configure(state='disabled')
def quit(self, *args):
self.scan_window.stop()
self.root.destroy()
def main():
root = tk.Tk()
gui = GUI(root)
root.mainloop()
main()