I tried code from this answer and it crashes with error Process finished with exit code -1073740771 (0xC000041D) after some time (2-10 sec.) and sometimes with 0xC0000005. It crashes immediately if I try to drag the window.
However when I put time.sleep(0.1) in run it works fine. If I use sleeps shorter than 0.1 it crashes again.
from PyQt5.QtWidgets import QApplication, QMainWindow, QWidget, QLabel,QMessageBox
from PyQt5.QtGui import QImage, QPixmap
from PyQt5.QtCore import QThread, pyqtSignal, pyqtSlot, Qt
import cv2
import sys
import time
class CamThread(QThread):
changemap = pyqtSignal('QImage')
def run(self):
cap = cv2.VideoCapture(0)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
while True:
ret, img_rgb = cap.read()
if ret:
self.rgb = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2RGB)
self.convert = QImage(self.rgb.data, self.rgb.shape[1], self.rgb.shape[0], QImage.Format_RGB888)
self.p = self.convert.scaled(640, 480, Qt.KeepAspectRatio)
self.changemap.emit(self.p)
#time.sleep(0.1)
class App(QWidget):
def __init__(self):
super().__init__()
self.title = 'webcam'
self.initUI()
#pyqtSlot('QImage')
def setImage(self, image):
self.label.setPixmap(QPixmap.fromImage(image))
def initUI(self):
self.setWindowTitle(self.title)
self.setGeometry(100, 100, 640, 480)
self.resize(640, 480)
self.label = QLabel(self)
self.label.resize(640, 480)
thr = CamThread(self)
thr.changemap.connect(self.setImage)
thr.start()
app = QApplication(sys.argv)
win = App()
#win.setAttribute(Qt.WA_DeleteOnClose, True)
win.show()
app.exit(app.exec_())
I thought that the problem is somewhere in signals/slots but haven't been able to find anything relevant.
Windows 10
Python - 3.7
Pyqt - 5.12
OpenCV - 3.4.5.20
Fixed it using QMutex and QWaitCondition to prevent update call while main thread is already updating. Apparently, issue was in that. eyllanesc, I'm new here as you see, should I make an answer in original thread?
from PyQt5.QtWidgets import QApplication, QMainWindow, QWidget, QLabel, QMessageBox
from PyQt5.QtGui import QImage, QPixmap
from PyQt5.QtCore import QThread, pyqtSignal, pyqtSlot, Qt, QMutex, QWaitCondition
import cv2
import sys
import time
class CamThread(QThread):
changemap = pyqtSignal('QImage')
def __init__(self, mutex, condition):
super().__init__()
self.mutex = mutex
self.condition = condition
def run(self):
cap = cv2.VideoCapture(0)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
while True:
try:
ret, img_rgb = cap.read()
if ret:
rgb = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2RGB)
#any other image processing here
convert = QImage(rgb.data, rgb.shape[1], rgb.shape[0], QImage.Format_RGB888)
p = convert.scaled(640, 480, Qt.KeepAspectRatio)
self.changemap.emit(p)
self.condition.wait(self.mutex)
except:
print('error')
class App(QWidget):
time = 0
def __init__(self):
super().__init__()
self.title = 'webcam'
self.mutex = QMutex()
self.condition = QWaitCondition()
self.initUI()
#pyqtSlot('QImage')
def setImage(self, image):
self.mutex.lock()
try:
self.label.setPixmap(QPixmap.fromImage(image))
finally:
self.mutex.unlock()
self.condition.wakeAll()
def initUI(self):
self.mutex.lock()
self.setWindowTitle(self.title)
self.setGeometry(100, 100, 640, 480)
self.resize(640, 480)
self.label = QLabel(self)
self.label.resize(640, 480)
self.thr = CamThread(mutex = self.mutex,condition=self.condition)
self.thr.changemap.connect(self.setImage)
self.thr.start()
app = QApplication(sys.argv)
win = App()
win.show()
app.exit(app.exec_())
N.B. You still need to properly stop thread and close camera connection in this example.
Related
I want to make Webcam gui so that when I click the cam on button the second window pops up and video will start.
When I click the return button the video should stop and go to mainwindow, but when i try, I can't see video, even if the flag is printed.
import threading
import cv2
import threading
import sys
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtTest import *
from PyQt5 import QtWidgets
from PyQt5 import QtGui
from PyQt5 import QtCore
running = False
class MyApp(QMainWindow, QWidget):
def __init__(self):
super().__init__()
self.second_UI = QDialog()
self.label = QtWidgets.QLabel()
self.first_UI()
self.show()
def first_UI(self):
btn_start = QPushButton("cam on", self)
btn_start.move(50, 50)
btn_start.clicked.connect(self.cam_start)
btn_start.clicked.connect(self.second_exc)
self.sb = self.statusBar()
self.sb.showMessage('Ready')
# Window setting
self.setWindowTitle('Select Servo Motor')
self.setGeometry(300, 300, 1024, 768)
self.show()
def cam_run(self):
global running
print("Flag1")
self.cap = cv2.VideoCapture(0)
# 'http://192.168.66.1:9527/videostream.cgi?loginuse=admin&loginpas=admin')
self.wid = 500
self.hei = 500
self.label.resize(self.wid, self.hei)
while running:
print("Flag2")
self.ret, self.img = self.cap.read()
if self.ret:
self.img = cv2.cvtColor(self.img, cv2.COLOR_BGR2RGB)
self.h, self.w, self.c = self.img.shape
self.qImg = QtGui.QImage(self.img.data, self.w, self.h, self.w*self.c,
QtGui.QImage.Format_RGB888)
self.pixmap = QtGui.QPixmap.fromImage(self.qImg)
self.label.setPixmap(self.pixmap)
print("Flag3")
self.cap.release()
print("Thread end.")
def cam_start(self): # , UI_param):
#UI_param = self.second_UI
global running
running = True
self.th = threading.Thread(target=self.cam_run)
self.th.start()
print("started...")
def second_exc(self):
self.close()
self.cam_run
return_btn = QPushButton('Return', self.second_UI)
return_btn.move(50, 700) # set position
# return_btn.resize(return_btn.sizeHint())
return_btn.clicked.connect(self.first_UI)
self.second_UI.setWindowTitle('Select User Mode')
self.second_UI.setGeometry(300, 300, 1024, 768)
self.second_UI.show()
if __name__ == '__main__':
app = QApplication(sys.argv)
ex_ = MyApp()
sys.exit(app.exec_())
Look at the following coding. It does what you are aiming for. I would recommend to create ui with qt designer and not fumble around with coding.
import cv2
import sys
from PyQt5 import uic
from PyQt5.QtWidgets import QMainWindow, QApplication
cap = cv2.VideoCapture(0)
class MainWindow(QMainWindow):
def __init__(self):
super(MainWindow, self).__init__()
uic.loadUi(r"main_window.ui", self)
self.btn_start_cam.clicked.connect(self.start_cam)
self.btn_return.clicked.connect(self.stop_cam)
def start_cam(self):
#Initialize video capture
#scaling factor
scaling_factor = 0.5
ret, frame = cap.read()
# Resize the frame
frame = cv2.resize(frame, None, fx=scaling_factor,
fy=scaling_factor, interpolation=cv2.INTER_AREA)
# Display the image
cv2.imshow('Webcam', frame)
def stop_cam(self):
# Release the video capture object
cap.release()
# Close all active windows
cv2.destroyAllWindows()
if __name__ == "__main__":
app = QApplication(sys.argv)
myapp = MainWindow()
myapp.show()
sys.exit(app.exec_())
Try to link PyQt and Opencv video feed, can't understand how to apply while loop for continuously streaming video. It just take a still picture.Please can anyone help to solve the problem.
PtQt=5
Python=3.6.1
class App(QWidget):
def __init__(self):
super().__init__()
self.title = 'PyQt5 Video'
self.left = 100
self.top = 100
self.width = 640
self.height = 480
self.initUI()
def initUI(self):
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
self.resize(1800, 1200)
#create a label
label = QLabel(self)
cap = cv2.VideoCapture(0)
ret, frame = cap.read()
rgbImage = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
convertToQtFormat = QtGui.QImage(rgbImage.data, rgbImage.shape[1], rgbImage.shape[0],
QtGui.QImage.Format_RGB888)
convertToQtFormat = QtGui.QPixmap.fromImage(convertToQtFormat)
pixmap = QPixmap(convertToQtFormat)
resizeImage = pixmap.scaled(640, 480, QtCore.Qt.KeepAspectRatio)
QApplication.processEvents()
label.setPixmap(resizeImage)
self.show()
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = App()
sys.exit(app.exec_())
The problem is that the function that obtains the image is executed only once and not updating the label.
The correct way is to place it inside a loop, but it will result in blocking the main window. This blocking of main window can be solved by using the QThread class and send through a signal QImage to update the label. For example:
import cv2
import sys
from PyQt5.QtWidgets import QWidget, QLabel, QApplication
from PyQt5.QtCore import QThread, Qt, pyqtSignal, pyqtSlot
from PyQt5.QtGui import QImage, QPixmap
class Thread(QThread):
changePixmap = pyqtSignal(QImage)
def run(self):
cap = cv2.VideoCapture(0)
while True:
ret, frame = cap.read()
if ret:
# https://stackoverflow.com/a/55468544/6622587
rgbImage = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
h, w, ch = rgbImage.shape
bytesPerLine = ch * w
convertToQtFormat = QImage(rgbImage.data, w, h, bytesPerLine, QImage.Format_RGB888)
p = convertToQtFormat.scaled(640, 480, Qt.KeepAspectRatio)
self.changePixmap.emit(p)
class App(QWidget):
def __init__(self):
super().__init__()
[...]
self.initUI()
#pyqtSlot(QImage)
def setImage(self, image):
self.label.setPixmap(QPixmap.fromImage(image))
def initUI(self):
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
self.resize(1800, 1200)
# create a label
self.label = QLabel(self)
self.label.move(280, 120)
self.label.resize(640, 480)
th = Thread(self)
th.changePixmap.connect(self.setImage)
th.start()
self.show()
Updating this for PySide2 and qimage2ndarray
from PySide2.QtCore import *
from PySide2.QtGui import *
import cv2 # OpenCV
import qimage2ndarray # for a memory leak,see gist
import sys # for exiting
# Minimal implementation...
def displayFrame():
ret, frame = cap.read()
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
image = qimage2ndarray.array2qimage(frame)
label.setPixmap(QPixmap.fromImage(image))
app = QApplication([])
window = QWidget()
# OPENCV
cap = cv2.VideoCapture(0)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 320)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 240)
# timer for getting frames
timer = QTimer()
timer.timeout.connect(displayFrame)
timer.start(60)
label = QLabel('No Camera Feed')
button = QPushButton("Quiter")
button.clicked.connect(sys.exit) # quiter button
layout = QVBoxLayout()
layout.addWidget(button)
layout.addWidget(label)
window.setLayout(layout)
window.show()
app.exec_()
# See also: https://gist.github.com/bsdnoobz/8464000
Thank you Taimur Islam for the question.
Thank you eyllanesc for wonderful answering and I have modified your code little bit. I used PtQt=4 Python=2.7 and I didn't use opencv
import sys
import numpy as np
import flycapture2 as fc2
from PyQt4.QtCore import (QThread, Qt, pyqtSignal)
from PyQt4.QtGui import (QPixmap, QImage, QApplication, QWidget, QLabel)
class Thread(QThread):
changePixmap = pyqtSignal(QImage)
def __init__(self, parent=None):
QThread.__init__(self, parent=parent)
self.cameraSettings()
def run(self):
while True:
im = fc2.Image()
self.c.retrieve_buffer(im)
a = np.array(im)
rawImage = QImage(a.data, a.shape[1], a.shape[0], QImage.Format_Indexed8)
self.changePixmap.emit(rawImage)
def cameraSettings(self):
print(fc2.get_library_version())
self.c = fc2.Context()
numberCam = self.c.get_num_of_cameras()
print(numberCam)
self.c.connect(*self.c.get_camera_from_index(0))
print(self.c.get_camera_info())
m, f = self.c.get_video_mode_and_frame_rate()
print(m, f)
print(self.c.get_property_info(fc2.FRAME_RATE))
p = self.c.get_property(fc2.FRAME_RATE)
print(p)
self.c.set_property(**p)
self.c.start_capture()
class App(QWidget):
def __init__(self):
super(App,self).__init__()
self.title = 'PyQt4 Video'
self.left = 100
self.top = 100
self.width = 640
self.height = 480
self.initUI()
def initUI(self):
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
self.resize(800, 600)
# create a label
self.label = QLabel(self)
self.label.move(0, 0)
self.label.resize(640, 480)
th = Thread(self)
th.changePixmap.connect(lambda p: self.setPixMap(p))
th.start()
def setPixMap(self, p):
p = QPixmap.fromImage(p)
p = p.scaled(640, 480, Qt.KeepAspectRatio)
self.label.setPixmap(p)
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = App()
ex.show()
sys.exit(app.exec_())
I am working on a GUI in pyqt and I want to display both the original and processed images coming from OpenCV. My have an attempt using two pyslots, but they show the same image. I am not sure how to do this without having the same code for each image which seems inefficient.
from pypylon import pylon
#from pypylon_opencv_viewer import BaslerOpenCVViewer
import cv2
import sys
from PyQt5.QtWidgets import QWidget, QLabel, QApplication
from PyQt5.QtCore import QThread, Qt, pyqtSignal, pyqtSlot
from PyQt5.QtGui import QImage, QPixmap
import numpy as np
from scipy.interpolate import interp2d
class Thread(QThread):
#changePixmap = pyqtSignal(QImage)
changePixmap_raw = pyqtSignal(QImage)
changePixmap_th = pyqtSignal(QImage)
def run(self):
def process_image(img):
*SNIP*
return result
serial_number = '23437639'
info = None
for i in pylon.TlFactory.GetInstance().EnumerateDevices():
if i.GetSerialNumber() == serial_number:
info = i
break
else:
print('Camera with {} serial number not found '.format(serial_number))
if info is not None:
camera = pylon.InstantCamera(pylon.TlFactory.GetInstance().CreateDevice(info))
camera.Open()
# Grabing Continusely (video) with minimal delay
camera.StartGrabbing(pylon.GrabStrategy_LatestImageOnly)
converter = pylon.ImageFormatConverter()
# converting to opencv bgr format
converter.OutputPixelFormat = pylon.PixelType_BGR8packed
converter.OutputBitAlignment = pylon.OutputBitAlignment_MsbAligned
while camera.IsGrabbing():
grabResult = camera.RetrieveResult(5000, pylon.TimeoutHandling_ThrowException)
if grabResult.GrabSucceeded():
img = converter.Convert(grabResult)
img = img.GetArray()
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
h, w = img.shape
result_img = process_image(img)
bytesPerLine = w
original_QT = QImage(img.data, w, h, bytesPerLine, QImage.Format_Grayscale8)
threshold_QT = QImage(result_img.data, w, h, bytesPerLine, QImage.Format_Grayscale8)
p1 = original_QT.scaled(640, 480, Qt.KeepAspectRatio)
p2 = threshold_QT.scaled(640, 480, Qt.KeepAspectRatio)
self.changePixmap_raw.emit(p1)
self.changePixmap_th.emit(p2)
class App(QWidget):
def __init__(self):
super().__init__()
[...]
self.initUI()
#pyqtSlot(QImage)
def setImage_raw(self, image):
self.label_raw.setPixmap(QPixmap.fromImage(image))
#pyqtSlot(QImage)
def setImage_th(self, image):
self.label_th.setPixmap(QPixmap.fromImage(image))
def initUI(self):
#self.setWindowTitle(self.title)
#self.setGeometry(self.left, self.top, self.width, self.height)
self.resize(1800, 1200)
# create a label
self.label_raw = QLabel(self)
self.label_raw.move(280, 120)
self.label_raw.resize(640, 480)
self.label_th = QLabel(self)
self.label_th.move(1000,120)
self.label_th.resize(640,480)
th = Thread(self)
th.changePixmap_raw.connect(self.setImage_raw)
th.changePixmap_th.connect(self.setImage_th)
th.start()
self.show()
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = App()
sys.exit(app.exec_())
I am hoping to add two more views and I would hate to add 2 more emitters etc... to implement that.
Thanks in advance.
i am working on an Opencv project and when trying to add the gui for it, i wanted to use PyQt because i am familiar with it.
in this code
import cv2
import sys
from PyQt5.QtWidgets import QWidget, QLabel, QApplication
from PyQt5.QtCore import QThread, Qt, pyqtSignal, pyqtSlot
from PyQt5.QtGui import QImage, QPixmap
class Thread(QThread):
changePixmap = pyqtSignal(QImage)
def run(self):
cap = cv2.VideoCapture("../Testing-Video.mp4")
while True:
ret, frame = cap.read()
if ret:
rgbImage = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
h, w, ch = rgbImage.shape
bytesPerLine = ch * w
convertToQtFormat = QImage(rgbImage.data, w, h, bytesPerLine,
QImage.Format_RGB888)
p = convertToQtFormat.scaled(640, 480, Qt.KeepAspectRatio)
self.changePixmap.emit(p)
class App(QWidget):
def __init__(self):
super().__init__()
self.left=0
self.top=0
self.width=600
self.height=800
self.initUI()
#pyqtSlot(QImage)
def setImage(self, image):
self.label.setPixmap(QPixmap.fromImage(image))
def initUI(self):
self.setWindowTitle("Testing PyQt")
self.setGeometry(self.left, self.top, self.width, self.height)
self.resize(800, 600)
# create a label
self.label = QLabel(self)
self.label.resize(640, 480)
th = Thread(self)
th.changePixmap.connect(self.setImage)
th.start()
self.show()
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = App()
ex.show()
sys.exit(app.exec_())
which is something i didn't write myself, it's slightly adjusted from here , anyways using the Camera video capture, it works perfectly but when using any other video media, the playback is extremely fast,
should i use a timer to call the frames constantly? better than using signal and slots?
Try to link PyQt and Opencv video feed, can't understand how to apply while loop for continuously streaming video. It just take a still picture.Please can anyone help to solve the problem.
PtQt=5
Python=3.6.1
class App(QWidget):
def __init__(self):
super().__init__()
self.title = 'PyQt5 Video'
self.left = 100
self.top = 100
self.width = 640
self.height = 480
self.initUI()
def initUI(self):
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
self.resize(1800, 1200)
#create a label
label = QLabel(self)
cap = cv2.VideoCapture(0)
ret, frame = cap.read()
rgbImage = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
convertToQtFormat = QtGui.QImage(rgbImage.data, rgbImage.shape[1], rgbImage.shape[0],
QtGui.QImage.Format_RGB888)
convertToQtFormat = QtGui.QPixmap.fromImage(convertToQtFormat)
pixmap = QPixmap(convertToQtFormat)
resizeImage = pixmap.scaled(640, 480, QtCore.Qt.KeepAspectRatio)
QApplication.processEvents()
label.setPixmap(resizeImage)
self.show()
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = App()
sys.exit(app.exec_())
The problem is that the function that obtains the image is executed only once and not updating the label.
The correct way is to place it inside a loop, but it will result in blocking the main window. This blocking of main window can be solved by using the QThread class and send through a signal QImage to update the label. For example:
import cv2
import sys
from PyQt5.QtWidgets import QWidget, QLabel, QApplication
from PyQt5.QtCore import QThread, Qt, pyqtSignal, pyqtSlot
from PyQt5.QtGui import QImage, QPixmap
class Thread(QThread):
changePixmap = pyqtSignal(QImage)
def run(self):
cap = cv2.VideoCapture(0)
while True:
ret, frame = cap.read()
if ret:
# https://stackoverflow.com/a/55468544/6622587
rgbImage = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
h, w, ch = rgbImage.shape
bytesPerLine = ch * w
convertToQtFormat = QImage(rgbImage.data, w, h, bytesPerLine, QImage.Format_RGB888)
p = convertToQtFormat.scaled(640, 480, Qt.KeepAspectRatio)
self.changePixmap.emit(p)
class App(QWidget):
def __init__(self):
super().__init__()
[...]
self.initUI()
#pyqtSlot(QImage)
def setImage(self, image):
self.label.setPixmap(QPixmap.fromImage(image))
def initUI(self):
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
self.resize(1800, 1200)
# create a label
self.label = QLabel(self)
self.label.move(280, 120)
self.label.resize(640, 480)
th = Thread(self)
th.changePixmap.connect(self.setImage)
th.start()
self.show()
Updating this for PySide2 and qimage2ndarray
from PySide2.QtCore import *
from PySide2.QtGui import *
import cv2 # OpenCV
import qimage2ndarray # for a memory leak,see gist
import sys # for exiting
# Minimal implementation...
def displayFrame():
ret, frame = cap.read()
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
image = qimage2ndarray.array2qimage(frame)
label.setPixmap(QPixmap.fromImage(image))
app = QApplication([])
window = QWidget()
# OPENCV
cap = cv2.VideoCapture(0)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 320)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 240)
# timer for getting frames
timer = QTimer()
timer.timeout.connect(displayFrame)
timer.start(60)
label = QLabel('No Camera Feed')
button = QPushButton("Quiter")
button.clicked.connect(sys.exit) # quiter button
layout = QVBoxLayout()
layout.addWidget(button)
layout.addWidget(label)
window.setLayout(layout)
window.show()
app.exec_()
# See also: https://gist.github.com/bsdnoobz/8464000
Thank you Taimur Islam for the question.
Thank you eyllanesc for wonderful answering and I have modified your code little bit. I used PtQt=4 Python=2.7 and I didn't use opencv
import sys
import numpy as np
import flycapture2 as fc2
from PyQt4.QtCore import (QThread, Qt, pyqtSignal)
from PyQt4.QtGui import (QPixmap, QImage, QApplication, QWidget, QLabel)
class Thread(QThread):
changePixmap = pyqtSignal(QImage)
def __init__(self, parent=None):
QThread.__init__(self, parent=parent)
self.cameraSettings()
def run(self):
while True:
im = fc2.Image()
self.c.retrieve_buffer(im)
a = np.array(im)
rawImage = QImage(a.data, a.shape[1], a.shape[0], QImage.Format_Indexed8)
self.changePixmap.emit(rawImage)
def cameraSettings(self):
print(fc2.get_library_version())
self.c = fc2.Context()
numberCam = self.c.get_num_of_cameras()
print(numberCam)
self.c.connect(*self.c.get_camera_from_index(0))
print(self.c.get_camera_info())
m, f = self.c.get_video_mode_and_frame_rate()
print(m, f)
print(self.c.get_property_info(fc2.FRAME_RATE))
p = self.c.get_property(fc2.FRAME_RATE)
print(p)
self.c.set_property(**p)
self.c.start_capture()
class App(QWidget):
def __init__(self):
super(App,self).__init__()
self.title = 'PyQt4 Video'
self.left = 100
self.top = 100
self.width = 640
self.height = 480
self.initUI()
def initUI(self):
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
self.resize(800, 600)
# create a label
self.label = QLabel(self)
self.label.move(0, 0)
self.label.resize(640, 480)
th = Thread(self)
th.changePixmap.connect(lambda p: self.setPixMap(p))
th.start()
def setPixMap(self, p):
p = QPixmap.fromImage(p)
p = p.scaled(640, 480, Qt.KeepAspectRatio)
self.label.setPixmap(p)
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = App()
ex.show()
sys.exit(app.exec_())