I'm trying to embed video capture with my dialog box (created in pyqt4). For the same I tired the code below. But it just starts the capturing and does not display anything on the dialog. Please help me know what's missing in the following code.
Here, self.videoFrame is a QLabel under QtGui.
def onRun(self):
self.playing = True
capture = cv2.VideoCapture(0)
data1=np.array([])
while self.playing:
_, data = capture.read()
data1 = cv2.cvtColor(data, cv2.cv.CV_BGR2RGB)
qImage = QtGui.QImage(data1, data1.shape[2], data1.shape[2],
QtGui.QImage.Format_RGB888)
qImage=QtGui.QPixmap.fromImage(qImage)
self.videoFrame.setPixmap(
qImage)
self.videoFrame.setScaledContents(True)
QtGui.qApp.processEvents()
cv2.waitKey(5)
cv2.destroyAllWindows()
This works:
from PyQt4 import QtCore,QtGui
import sys
import cv2
import numpy as np
class ImageWidget(QtGui.QWidget):
def __init__(self,parent=None):
super(ImageWidget,self).__init__(parent)
self.image=None
def setImage(self,image):
self.image=image
sz=image.size()
self.setMinimumSize(sz)
self.update()
def paintEvent(self,event):
qp=QtGui.QPainter()
qp.begin(self)
if self.image:
qp.drawImage(QtCore.QPoint(0,0),self.image)
qp.end()
class MainWindow(QtGui.QMainWindow):
def __init__(self,parent=None):
super(MainWindow,self).__init__(parent)
self.videoFrame=ImageWidget()
self.setCentralWidget(self.videoFrame)
self.timer=QtCore.QTimer(self)
self.timer.timeout.connect(self.updateImage)
self.timer.start(30)
self.capture = cv2.VideoCapture(0)
def updateImage(self):
_, img = self.capture.read()
#img=cv2.cvtColor(img, cv.CV_BGR2RGB)
height, width, bpc = img.shape
bpl = bpc * width
image = QtGui.QImage(img.data, width, height, bpl, QtGui.QImage.Format_RGB888)
self.videoFrame.setImage(image)
def main():
app=QtGui.QApplication(sys.argv)
w=MainWindow()
w.show()
app.exec_()
if __name__=='__main__':
main()
Related
Here is my code, It's Flashing When I runs it bur working both cameras, Also It is a Very difficult for me to put the camera display correctly one camera on left and other on right but tried a lot to do so. Actually I am a Beginner right now. It a Python Code in PYQT5 with OpenCV(Python) in it. I am going to use it on Raspberry.
import sys
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
import cv2
from PyQt5 import QtCore, QtGui, QtWidgets
class MainWindow(QWidget):
def __init__(self):
super(MainWindow, self).__init__()
self.VBL = QVBoxLayout()
self.showMaximized()
self.FeedLabel = QLabel()
self.VBL.addWidget(self.FeedLabel)
self.setGeometry(0, 0, 1920, 1000)
self.VBL1 = QVBoxLayout()
# self.showMaximized()
# self.FeedLabel = QLabel()
# self.VBL1.addWidget(self.FeedLabel)
self.VBL1.setGeometry(QtCore.QRect(0, 0, 900, 1000))
self.setLayout(self.VBL)
self.VBL2 = QVBoxLayout()
# self.showMaximized()
# self.FeedLabel = QLabel()
# self.VBL2.addWidget(self.FeedLabel)
self.setGeometry(QtCore.QRect(QPoint(100, 200), QSize(11, 16)))
# self.setGeometry()
self.setLayout(self.VBL)
# self.CancelBTN = QPushButton(Cancel)
# self.CancelBTN.clicked.connect(self.CancelFeed)
# self.VBL.addWidget(self.CancelBTN)
# self.frame = QtWidgets.QFrame(self)
# self.frame.setGeometry(QtCore.QRect(1, 1, 200, 200))
# self.frame.setFrameShape(QtWidgets.QFrame.StyledPanel)
# self.frame.setFrameShadow(QtWidgets.QFrame.Raised)
self.Worker1 = Worker1()
self.Worker1.start()
self.Worker1.ImageUpdate1.connect(self.ImageUpdateSlot1)
self.setLayout(self.VBL1)
self.Worker2 = Worker2()
self.Worker2.start()
self.Worker2.ImageUpdate.connect(self.ImageUpdateSlot)
self.setLayout(self.VBL2)
def ImageUpdateSlot(self, Image):
self.FeedLabel.setPixmap(QPixmap.fromImage(Image))
def ImageUpdateSlot1(self, Image):
self.FeedLabel.setPixmap(QPixmap.fromImage(Image))
def CancelFeed(self):
self.Worker1.stop()
class Worker1(QThread):
ImageUpdate1 = pyqtSignal(QImage)
def run(self):
self.ThreadActive = True
Capture = cv2.VideoCapture(0)
while self.ThreadActive:
ret, frame = Capture.read()
if ret:
Image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
FlippedImage = cv2.flip(Image, 1)
ConvertToQtFormat = QImage(FlippedImage.data, FlippedImage.shape[1], FlippedImage.shape[0], QImage.Format_RGB888)
Pic = ConvertToQtFormat.scaled(960, 1080)
self.ImageUpdate1.emit(Pic)
def stop(self):
self.ThreadActive = False
self.quit()
class Worker2(QThread):
ImageUpdate = pyqtSignal(QImage)
def run(self):
self.ThreadActive = True
Capture = cv2.VideoCapture(1)
while self.ThreadActive:
ret, frame = Capture.read()
if ret:
Image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
FlippedImage = cv2.flip(Image, 1)
ConvertToQtFormat = QImage(FlippedImage.data, FlippedImage.shape[1], FlippedImage.shape[0], QImage.Format_RGB888)
Pic = ConvertToQtFormat.scaled(400, 800)
self.ImageUpdate.emit(Pic)
def stop(self):
self.ThreadActive = False
self.quit()
if __name__ == "__main__":
App = QApplication(sys.argv)
Root = MainWindow()
Root.show()
sys.exit(App.exec())
Try to link PyQt and Opencv video feed, can't understand how to apply while loop for continuously streaming video. It just take a still picture.Please can anyone help to solve the problem.
PtQt=5
Python=3.6.1
class App(QWidget):
def __init__(self):
super().__init__()
self.title = 'PyQt5 Video'
self.left = 100
self.top = 100
self.width = 640
self.height = 480
self.initUI()
def initUI(self):
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
self.resize(1800, 1200)
#create a label
label = QLabel(self)
cap = cv2.VideoCapture(0)
ret, frame = cap.read()
rgbImage = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
convertToQtFormat = QtGui.QImage(rgbImage.data, rgbImage.shape[1], rgbImage.shape[0],
QtGui.QImage.Format_RGB888)
convertToQtFormat = QtGui.QPixmap.fromImage(convertToQtFormat)
pixmap = QPixmap(convertToQtFormat)
resizeImage = pixmap.scaled(640, 480, QtCore.Qt.KeepAspectRatio)
QApplication.processEvents()
label.setPixmap(resizeImage)
self.show()
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = App()
sys.exit(app.exec_())
The problem is that the function that obtains the image is executed only once and not updating the label.
The correct way is to place it inside a loop, but it will result in blocking the main window. This blocking of main window can be solved by using the QThread class and send through a signal QImage to update the label. For example:
import cv2
import sys
from PyQt5.QtWidgets import QWidget, QLabel, QApplication
from PyQt5.QtCore import QThread, Qt, pyqtSignal, pyqtSlot
from PyQt5.QtGui import QImage, QPixmap
class Thread(QThread):
changePixmap = pyqtSignal(QImage)
def run(self):
cap = cv2.VideoCapture(0)
while True:
ret, frame = cap.read()
if ret:
# https://stackoverflow.com/a/55468544/6622587
rgbImage = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
h, w, ch = rgbImage.shape
bytesPerLine = ch * w
convertToQtFormat = QImage(rgbImage.data, w, h, bytesPerLine, QImage.Format_RGB888)
p = convertToQtFormat.scaled(640, 480, Qt.KeepAspectRatio)
self.changePixmap.emit(p)
class App(QWidget):
def __init__(self):
super().__init__()
[...]
self.initUI()
#pyqtSlot(QImage)
def setImage(self, image):
self.label.setPixmap(QPixmap.fromImage(image))
def initUI(self):
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
self.resize(1800, 1200)
# create a label
self.label = QLabel(self)
self.label.move(280, 120)
self.label.resize(640, 480)
th = Thread(self)
th.changePixmap.connect(self.setImage)
th.start()
self.show()
Updating this for PySide2 and qimage2ndarray
from PySide2.QtCore import *
from PySide2.QtGui import *
import cv2 # OpenCV
import qimage2ndarray # for a memory leak,see gist
import sys # for exiting
# Minimal implementation...
def displayFrame():
ret, frame = cap.read()
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
image = qimage2ndarray.array2qimage(frame)
label.setPixmap(QPixmap.fromImage(image))
app = QApplication([])
window = QWidget()
# OPENCV
cap = cv2.VideoCapture(0)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 320)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 240)
# timer for getting frames
timer = QTimer()
timer.timeout.connect(displayFrame)
timer.start(60)
label = QLabel('No Camera Feed')
button = QPushButton("Quiter")
button.clicked.connect(sys.exit) # quiter button
layout = QVBoxLayout()
layout.addWidget(button)
layout.addWidget(label)
window.setLayout(layout)
window.show()
app.exec_()
# See also: https://gist.github.com/bsdnoobz/8464000
Thank you Taimur Islam for the question.
Thank you eyllanesc for wonderful answering and I have modified your code little bit. I used PtQt=4 Python=2.7 and I didn't use opencv
import sys
import numpy as np
import flycapture2 as fc2
from PyQt4.QtCore import (QThread, Qt, pyqtSignal)
from PyQt4.QtGui import (QPixmap, QImage, QApplication, QWidget, QLabel)
class Thread(QThread):
changePixmap = pyqtSignal(QImage)
def __init__(self, parent=None):
QThread.__init__(self, parent=parent)
self.cameraSettings()
def run(self):
while True:
im = fc2.Image()
self.c.retrieve_buffer(im)
a = np.array(im)
rawImage = QImage(a.data, a.shape[1], a.shape[0], QImage.Format_Indexed8)
self.changePixmap.emit(rawImage)
def cameraSettings(self):
print(fc2.get_library_version())
self.c = fc2.Context()
numberCam = self.c.get_num_of_cameras()
print(numberCam)
self.c.connect(*self.c.get_camera_from_index(0))
print(self.c.get_camera_info())
m, f = self.c.get_video_mode_and_frame_rate()
print(m, f)
print(self.c.get_property_info(fc2.FRAME_RATE))
p = self.c.get_property(fc2.FRAME_RATE)
print(p)
self.c.set_property(**p)
self.c.start_capture()
class App(QWidget):
def __init__(self):
super(App,self).__init__()
self.title = 'PyQt4 Video'
self.left = 100
self.top = 100
self.width = 640
self.height = 480
self.initUI()
def initUI(self):
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
self.resize(800, 600)
# create a label
self.label = QLabel(self)
self.label.move(0, 0)
self.label.resize(640, 480)
th = Thread(self)
th.changePixmap.connect(lambda p: self.setPixMap(p))
th.start()
def setPixMap(self, p):
p = QPixmap.fromImage(p)
p = p.scaled(640, 480, Qt.KeepAspectRatio)
self.label.setPixmap(p)
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = App()
ex.show()
sys.exit(app.exec_())
I am working on a GUI in pyqt and I want to display both the original and processed images coming from OpenCV. My have an attempt using two pyslots, but they show the same image. I am not sure how to do this without having the same code for each image which seems inefficient.
from pypylon import pylon
#from pypylon_opencv_viewer import BaslerOpenCVViewer
import cv2
import sys
from PyQt5.QtWidgets import QWidget, QLabel, QApplication
from PyQt5.QtCore import QThread, Qt, pyqtSignal, pyqtSlot
from PyQt5.QtGui import QImage, QPixmap
import numpy as np
from scipy.interpolate import interp2d
class Thread(QThread):
#changePixmap = pyqtSignal(QImage)
changePixmap_raw = pyqtSignal(QImage)
changePixmap_th = pyqtSignal(QImage)
def run(self):
def process_image(img):
*SNIP*
return result
serial_number = '23437639'
info = None
for i in pylon.TlFactory.GetInstance().EnumerateDevices():
if i.GetSerialNumber() == serial_number:
info = i
break
else:
print('Camera with {} serial number not found '.format(serial_number))
if info is not None:
camera = pylon.InstantCamera(pylon.TlFactory.GetInstance().CreateDevice(info))
camera.Open()
# Grabing Continusely (video) with minimal delay
camera.StartGrabbing(pylon.GrabStrategy_LatestImageOnly)
converter = pylon.ImageFormatConverter()
# converting to opencv bgr format
converter.OutputPixelFormat = pylon.PixelType_BGR8packed
converter.OutputBitAlignment = pylon.OutputBitAlignment_MsbAligned
while camera.IsGrabbing():
grabResult = camera.RetrieveResult(5000, pylon.TimeoutHandling_ThrowException)
if grabResult.GrabSucceeded():
img = converter.Convert(grabResult)
img = img.GetArray()
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
h, w = img.shape
result_img = process_image(img)
bytesPerLine = w
original_QT = QImage(img.data, w, h, bytesPerLine, QImage.Format_Grayscale8)
threshold_QT = QImage(result_img.data, w, h, bytesPerLine, QImage.Format_Grayscale8)
p1 = original_QT.scaled(640, 480, Qt.KeepAspectRatio)
p2 = threshold_QT.scaled(640, 480, Qt.KeepAspectRatio)
self.changePixmap_raw.emit(p1)
self.changePixmap_th.emit(p2)
class App(QWidget):
def __init__(self):
super().__init__()
[...]
self.initUI()
#pyqtSlot(QImage)
def setImage_raw(self, image):
self.label_raw.setPixmap(QPixmap.fromImage(image))
#pyqtSlot(QImage)
def setImage_th(self, image):
self.label_th.setPixmap(QPixmap.fromImage(image))
def initUI(self):
#self.setWindowTitle(self.title)
#self.setGeometry(self.left, self.top, self.width, self.height)
self.resize(1800, 1200)
# create a label
self.label_raw = QLabel(self)
self.label_raw.move(280, 120)
self.label_raw.resize(640, 480)
self.label_th = QLabel(self)
self.label_th.move(1000,120)
self.label_th.resize(640,480)
th = Thread(self)
th.changePixmap_raw.connect(self.setImage_raw)
th.changePixmap_th.connect(self.setImage_th)
th.start()
self.show()
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = App()
sys.exit(app.exec_())
I am hoping to add two more views and I would hate to add 2 more emitters etc... to implement that.
Thanks in advance.
Try to link PyQt and Opencv video feed, can't understand how to apply while loop for continuously streaming video. It just take a still picture.Please can anyone help to solve the problem.
PtQt=5
Python=3.6.1
class App(QWidget):
def __init__(self):
super().__init__()
self.title = 'PyQt5 Video'
self.left = 100
self.top = 100
self.width = 640
self.height = 480
self.initUI()
def initUI(self):
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
self.resize(1800, 1200)
#create a label
label = QLabel(self)
cap = cv2.VideoCapture(0)
ret, frame = cap.read()
rgbImage = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
convertToQtFormat = QtGui.QImage(rgbImage.data, rgbImage.shape[1], rgbImage.shape[0],
QtGui.QImage.Format_RGB888)
convertToQtFormat = QtGui.QPixmap.fromImage(convertToQtFormat)
pixmap = QPixmap(convertToQtFormat)
resizeImage = pixmap.scaled(640, 480, QtCore.Qt.KeepAspectRatio)
QApplication.processEvents()
label.setPixmap(resizeImage)
self.show()
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = App()
sys.exit(app.exec_())
The problem is that the function that obtains the image is executed only once and not updating the label.
The correct way is to place it inside a loop, but it will result in blocking the main window. This blocking of main window can be solved by using the QThread class and send through a signal QImage to update the label. For example:
import cv2
import sys
from PyQt5.QtWidgets import QWidget, QLabel, QApplication
from PyQt5.QtCore import QThread, Qt, pyqtSignal, pyqtSlot
from PyQt5.QtGui import QImage, QPixmap
class Thread(QThread):
changePixmap = pyqtSignal(QImage)
def run(self):
cap = cv2.VideoCapture(0)
while True:
ret, frame = cap.read()
if ret:
# https://stackoverflow.com/a/55468544/6622587
rgbImage = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
h, w, ch = rgbImage.shape
bytesPerLine = ch * w
convertToQtFormat = QImage(rgbImage.data, w, h, bytesPerLine, QImage.Format_RGB888)
p = convertToQtFormat.scaled(640, 480, Qt.KeepAspectRatio)
self.changePixmap.emit(p)
class App(QWidget):
def __init__(self):
super().__init__()
[...]
self.initUI()
#pyqtSlot(QImage)
def setImage(self, image):
self.label.setPixmap(QPixmap.fromImage(image))
def initUI(self):
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
self.resize(1800, 1200)
# create a label
self.label = QLabel(self)
self.label.move(280, 120)
self.label.resize(640, 480)
th = Thread(self)
th.changePixmap.connect(self.setImage)
th.start()
self.show()
Updating this for PySide2 and qimage2ndarray
from PySide2.QtCore import *
from PySide2.QtGui import *
import cv2 # OpenCV
import qimage2ndarray # for a memory leak,see gist
import sys # for exiting
# Minimal implementation...
def displayFrame():
ret, frame = cap.read()
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
image = qimage2ndarray.array2qimage(frame)
label.setPixmap(QPixmap.fromImage(image))
app = QApplication([])
window = QWidget()
# OPENCV
cap = cv2.VideoCapture(0)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 320)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 240)
# timer for getting frames
timer = QTimer()
timer.timeout.connect(displayFrame)
timer.start(60)
label = QLabel('No Camera Feed')
button = QPushButton("Quiter")
button.clicked.connect(sys.exit) # quiter button
layout = QVBoxLayout()
layout.addWidget(button)
layout.addWidget(label)
window.setLayout(layout)
window.show()
app.exec_()
# See also: https://gist.github.com/bsdnoobz/8464000
Thank you Taimur Islam for the question.
Thank you eyllanesc for wonderful answering and I have modified your code little bit. I used PtQt=4 Python=2.7 and I didn't use opencv
import sys
import numpy as np
import flycapture2 as fc2
from PyQt4.QtCore import (QThread, Qt, pyqtSignal)
from PyQt4.QtGui import (QPixmap, QImage, QApplication, QWidget, QLabel)
class Thread(QThread):
changePixmap = pyqtSignal(QImage)
def __init__(self, parent=None):
QThread.__init__(self, parent=parent)
self.cameraSettings()
def run(self):
while True:
im = fc2.Image()
self.c.retrieve_buffer(im)
a = np.array(im)
rawImage = QImage(a.data, a.shape[1], a.shape[0], QImage.Format_Indexed8)
self.changePixmap.emit(rawImage)
def cameraSettings(self):
print(fc2.get_library_version())
self.c = fc2.Context()
numberCam = self.c.get_num_of_cameras()
print(numberCam)
self.c.connect(*self.c.get_camera_from_index(0))
print(self.c.get_camera_info())
m, f = self.c.get_video_mode_and_frame_rate()
print(m, f)
print(self.c.get_property_info(fc2.FRAME_RATE))
p = self.c.get_property(fc2.FRAME_RATE)
print(p)
self.c.set_property(**p)
self.c.start_capture()
class App(QWidget):
def __init__(self):
super(App,self).__init__()
self.title = 'PyQt4 Video'
self.left = 100
self.top = 100
self.width = 640
self.height = 480
self.initUI()
def initUI(self):
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
self.resize(800, 600)
# create a label
self.label = QLabel(self)
self.label.move(0, 0)
self.label.resize(640, 480)
th = Thread(self)
th.changePixmap.connect(lambda p: self.setPixMap(p))
th.start()
def setPixMap(self, p):
p = QPixmap.fromImage(p)
p = p.scaled(640, 480, Qt.KeepAspectRatio)
self.label.setPixmap(p)
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = App()
ex.show()
sys.exit(app.exec_())
I'm using python PyQt4.
Which I want to do:
(1) scale the Image from 1280x1024 to 860x480, and show to QImageWidget.
(2) draw some line on the Image by using mouse.
# -*- coding: utf-8 -*-
import ConfigParser
from PyQt4 import QtCore,QtGui
from PyQt4.QtGui import QImage
from PyQt4.QtGui import QApplication, QMainWindow, QPushButton
import sys , time , datetime
import cv2
import os
import numpy as np
video = cv2.VideoCapture(0)
video.set(3,1280)
video.set(4,1024)
class ImageWidget(QtGui.QWidget):
def __init__(self,parent=None):
super(ImageWidget,self).__init__(parent)
self.image=None
def setImage(self,image):
self.image=image
sz=image.size()
self.setMinimumSize(sz)
self.update()
def paintEvent(self,event):
qp=QtGui.QPainter()
qp.begin(self)
if self.image:
qp.drawImage(QtCore.QPoint(0,0),self.image)
qp.end()
class MainWindow(QtGui.QMainWindow):
def __init__(self,parent=None):
super(MainWindow,self).__init__(parent)
self.videoFrame=ImageWidget()
self.setCentralWidget(self.videoFrame)
screen = QtGui.QDesktopWidget().screenGeometry()
self.showFullScreen()
self.timer=QtCore.QTimer(self)
self.timer.timeout.connect(self.update)
self.timer.start(50)
def update(self):
ret, frame = video.read()
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
image = QtGui.QImage(frame, frame.shape[1], frame.shape[0],frame.strides[0], QtGui.QImage.Format_RGB888)
# here I want to scale to 860x480...
self.videoFrame.setImage(image)
# draw ... ?
def main():
app=QtGui.QApplication(sys.argv)
w=MainWindow()
w.show()
app.exec_()
if __name__=='__main__':
main()
Does anyone know where could I find there example/doc ?
here is the scaled doc.
# -*- coding: utf-8 -*-
from PyQt4 import QtCore,QtGui
from PyQt4.QtGui import QImage
from PyQt4.QtGui import QApplication, QMainWindow, QPushButton
import sys , time , datetime
import cv2
import numpy as np
video = cv2.VideoCapture(0)
video.set(3,1280)
video.set(4,1024)
class ImageWidget(QtGui.QWidget):
def __init__(self,parent=None):
super(ImageWidget,self).__init__(parent)
self.image=None
def setImage(self,image):
self.image=image
sz=image.size()
self.setMinimumSize(sz)
self.update()
def paintEvent(self,event):
qp=QtGui.QPainter()
qp.begin(self)
if self.image:
qp.drawImage(QtCore.QPoint(0,0),self.image)
qp.end()
# here get the mouseclick and move..
def mousePressEvent(self, event): # click
print event.pos()
def mouseMoveEvent(self, event): # move
print event.pos()
class MainWindow(QtGui.QMainWindow):
def __init__(self,parent=None):
super(MainWindow,self).__init__(parent)
self.fAction = QtGui.QAction(u"click me", self)
menubar = self.menuBar()
self.fileMenu = menubar.addAction(u"click me")
font = self.fileMenu.font()
font.setPointSize(30)
menubar.setFont(font)
self.videoFrame=ImageWidget()
self.setCentralWidget(self.videoFrame)
screen = QtGui.QDesktopWidget().screenGeometry()
self.showFullScreen()
self.timer=QtCore.QTimer(self)
self.timer.timeout.connect(self.update)
self.timer.start(50)
def update(self):
ret, frame = video.read()
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
image = QtGui.QImage(frame, frame.shape[1], frame.shape[0],frame.strides[0], QtGui.QImage.Format_RGB888)
image.scaledToWidth (640,QtCore.Qt.SmoothTransformation)
image.scaledToHeight(480,QtCore.Qt.SmoothTransformation)
# here is sclae my image...
result = image.scaled(640, 480, QtCore.Qt.KeepAspectRatioByExpanding, QtCore.Qt.SmoothTransformation)
self.videoFrame.setImage(result)
def main():
app=QtGui.QApplication(sys.argv)
w=MainWindow()
w.show()
app.exec_()
if __name__=='__main__':
main()