I am working on a GUI in pyqt and I want to display both the original and processed images coming from OpenCV. My have an attempt using two pyslots, but they show the same image. I am not sure how to do this without having the same code for each image which seems inefficient.
from pypylon import pylon
#from pypylon_opencv_viewer import BaslerOpenCVViewer
import cv2
import sys
from PyQt5.QtWidgets import QWidget, QLabel, QApplication
from PyQt5.QtCore import QThread, Qt, pyqtSignal, pyqtSlot
from PyQt5.QtGui import QImage, QPixmap
import numpy as np
from scipy.interpolate import interp2d
class Thread(QThread):
#changePixmap = pyqtSignal(QImage)
changePixmap_raw = pyqtSignal(QImage)
changePixmap_th = pyqtSignal(QImage)
def run(self):
def process_image(img):
*SNIP*
return result
serial_number = '23437639'
info = None
for i in pylon.TlFactory.GetInstance().EnumerateDevices():
if i.GetSerialNumber() == serial_number:
info = i
break
else:
print('Camera with {} serial number not found '.format(serial_number))
if info is not None:
camera = pylon.InstantCamera(pylon.TlFactory.GetInstance().CreateDevice(info))
camera.Open()
# Grabing Continusely (video) with minimal delay
camera.StartGrabbing(pylon.GrabStrategy_LatestImageOnly)
converter = pylon.ImageFormatConverter()
# converting to opencv bgr format
converter.OutputPixelFormat = pylon.PixelType_BGR8packed
converter.OutputBitAlignment = pylon.OutputBitAlignment_MsbAligned
while camera.IsGrabbing():
grabResult = camera.RetrieveResult(5000, pylon.TimeoutHandling_ThrowException)
if grabResult.GrabSucceeded():
img = converter.Convert(grabResult)
img = img.GetArray()
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
h, w = img.shape
result_img = process_image(img)
bytesPerLine = w
original_QT = QImage(img.data, w, h, bytesPerLine, QImage.Format_Grayscale8)
threshold_QT = QImage(result_img.data, w, h, bytesPerLine, QImage.Format_Grayscale8)
p1 = original_QT.scaled(640, 480, Qt.KeepAspectRatio)
p2 = threshold_QT.scaled(640, 480, Qt.KeepAspectRatio)
self.changePixmap_raw.emit(p1)
self.changePixmap_th.emit(p2)
class App(QWidget):
def __init__(self):
super().__init__()
[...]
self.initUI()
#pyqtSlot(QImage)
def setImage_raw(self, image):
self.label_raw.setPixmap(QPixmap.fromImage(image))
#pyqtSlot(QImage)
def setImage_th(self, image):
self.label_th.setPixmap(QPixmap.fromImage(image))
def initUI(self):
#self.setWindowTitle(self.title)
#self.setGeometry(self.left, self.top, self.width, self.height)
self.resize(1800, 1200)
# create a label
self.label_raw = QLabel(self)
self.label_raw.move(280, 120)
self.label_raw.resize(640, 480)
self.label_th = QLabel(self)
self.label_th.move(1000,120)
self.label_th.resize(640,480)
th = Thread(self)
th.changePixmap_raw.connect(self.setImage_raw)
th.changePixmap_th.connect(self.setImage_th)
th.start()
self.show()
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = App()
sys.exit(app.exec_())
I am hoping to add two more views and I would hate to add 2 more emitters etc... to implement that.
Thanks in advance.
Related
I am working on trying to collect the video from a Basler GigE camera using PyPylon and converting it to an array. I want to be able to take this image array that is constantly updating and show it on a PyQt5 gui. I am unsure what I am doing wrong. Below is the code, any help would be greatly appreciated!
from PyQt5 import QtGui
from PyQt5.QtWidgets import QWidget, QApplication, QLabel, QVBoxLayout
from PyQt5.QtGui import QPixmap
import sys
import cv2
from PyQt5.QtCore import pyqtSignal, pyqtSlot, Qt, QThread
import numpy as np
from pypylon import pylon
class BaslerVideo():
def _videoGrab(self):
camera = pylon.InstantCamera(pylon.TlFactory.GetInstance().CreateFirstDevice())
camera.StartGrabbing(pylon.GrabStrategy_LatestImageOnly)
converter = pylon.ImageFormatConverter()
converter.OutputPixelFormat = pylon.PixelType_BGR8packed
converter.OutputBitAlignment = pylon.OutputBitAlignment_MsbAligned
while camera.IsGrabbing():
grabResult = camera.RetrieveResult(5000, pylon.TimeoutHandling_ThrowException)
if grabResult.GrabSucceeded():
image = converter.Convert(grabResult)
img = image.GetArray()
return img
class VideoThread(QThread, BaslerVideo):
change_pixmap_signal = pyqtSignal(np.ndarray)
def run(self):
# capture from web cam
cap = BaslerVideo()
while True:
cv_img = cap._videoGrab()
self.change_pixmap_signal.emit(cv_img)
class App(QWidget):
def __inti__(self):
super().__init__()
self.setWindowTitle("GigE Cam Test")
self.display_width = 640
self.display_height = 480
#Create the label that holds the image
self.image_label = QLabel(self)
self.image_label.resize(self.display_width,self.display_height)
#Create a text label
self.textLabel = QLabel("GigE Cam")
#Create the box that will hold the image
vbox = QVBoxLayout()
vbox.addWidget(self.image_label)
vbox.addWidget(self.textLabel)
#Set the layout
self.setLayout(vbox)
#create the video capture thread
self.thread = VideoThread()
#connect its signal to the update_image slot
self.thread.change_pixmap_signal.connect(self.update_image)
#Start the thread
self.thread.start()
#pyqtSlot(np.ndarray)
def update_image(self, cv_img):
"""Updates the image_label with a new opencv image"""
qt_img = self.convert_cv_qt(cv_img)
self.image_label.setPixmap(qt_img)
def convert_cv_qt(self, cv_img):
"""Convert from a numpy array img to a Qpixmap"""
rgb_image = cv2.cvtColor(cv_img, cv2.COLOR_BGR2RGB)
h, w, ch = rgb_image.shape
bytes_per_line = ch * w
convert_to_Qt_format = QtGui.QImage(rgb_image.data, w, h, bytes_per_line,QtGui.QImage.Format_RGB888)
p = convert_to_Qt_format.scaled(self.display_width,self.display_height, Qt.KeepAspectRatio)
return QPixmap().fromImage(p)
if __name__ == "__main__":
app = QApplication(sys.argv)
a = App()
a.show()
sys.exit(app.exec_())
Try to link PyQt and Opencv video feed, can't understand how to apply while loop for continuously streaming video. It just take a still picture.Please can anyone help to solve the problem.
PtQt=5
Python=3.6.1
class App(QWidget):
def __init__(self):
super().__init__()
self.title = 'PyQt5 Video'
self.left = 100
self.top = 100
self.width = 640
self.height = 480
self.initUI()
def initUI(self):
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
self.resize(1800, 1200)
#create a label
label = QLabel(self)
cap = cv2.VideoCapture(0)
ret, frame = cap.read()
rgbImage = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
convertToQtFormat = QtGui.QImage(rgbImage.data, rgbImage.shape[1], rgbImage.shape[0],
QtGui.QImage.Format_RGB888)
convertToQtFormat = QtGui.QPixmap.fromImage(convertToQtFormat)
pixmap = QPixmap(convertToQtFormat)
resizeImage = pixmap.scaled(640, 480, QtCore.Qt.KeepAspectRatio)
QApplication.processEvents()
label.setPixmap(resizeImage)
self.show()
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = App()
sys.exit(app.exec_())
The problem is that the function that obtains the image is executed only once and not updating the label.
The correct way is to place it inside a loop, but it will result in blocking the main window. This blocking of main window can be solved by using the QThread class and send through a signal QImage to update the label. For example:
import cv2
import sys
from PyQt5.QtWidgets import QWidget, QLabel, QApplication
from PyQt5.QtCore import QThread, Qt, pyqtSignal, pyqtSlot
from PyQt5.QtGui import QImage, QPixmap
class Thread(QThread):
changePixmap = pyqtSignal(QImage)
def run(self):
cap = cv2.VideoCapture(0)
while True:
ret, frame = cap.read()
if ret:
# https://stackoverflow.com/a/55468544/6622587
rgbImage = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
h, w, ch = rgbImage.shape
bytesPerLine = ch * w
convertToQtFormat = QImage(rgbImage.data, w, h, bytesPerLine, QImage.Format_RGB888)
p = convertToQtFormat.scaled(640, 480, Qt.KeepAspectRatio)
self.changePixmap.emit(p)
class App(QWidget):
def __init__(self):
super().__init__()
[...]
self.initUI()
#pyqtSlot(QImage)
def setImage(self, image):
self.label.setPixmap(QPixmap.fromImage(image))
def initUI(self):
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
self.resize(1800, 1200)
# create a label
self.label = QLabel(self)
self.label.move(280, 120)
self.label.resize(640, 480)
th = Thread(self)
th.changePixmap.connect(self.setImage)
th.start()
self.show()
Updating this for PySide2 and qimage2ndarray
from PySide2.QtCore import *
from PySide2.QtGui import *
import cv2 # OpenCV
import qimage2ndarray # for a memory leak,see gist
import sys # for exiting
# Minimal implementation...
def displayFrame():
ret, frame = cap.read()
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
image = qimage2ndarray.array2qimage(frame)
label.setPixmap(QPixmap.fromImage(image))
app = QApplication([])
window = QWidget()
# OPENCV
cap = cv2.VideoCapture(0)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 320)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 240)
# timer for getting frames
timer = QTimer()
timer.timeout.connect(displayFrame)
timer.start(60)
label = QLabel('No Camera Feed')
button = QPushButton("Quiter")
button.clicked.connect(sys.exit) # quiter button
layout = QVBoxLayout()
layout.addWidget(button)
layout.addWidget(label)
window.setLayout(layout)
window.show()
app.exec_()
# See also: https://gist.github.com/bsdnoobz/8464000
Thank you Taimur Islam for the question.
Thank you eyllanesc for wonderful answering and I have modified your code little bit. I used PtQt=4 Python=2.7 and I didn't use opencv
import sys
import numpy as np
import flycapture2 as fc2
from PyQt4.QtCore import (QThread, Qt, pyqtSignal)
from PyQt4.QtGui import (QPixmap, QImage, QApplication, QWidget, QLabel)
class Thread(QThread):
changePixmap = pyqtSignal(QImage)
def __init__(self, parent=None):
QThread.__init__(self, parent=parent)
self.cameraSettings()
def run(self):
while True:
im = fc2.Image()
self.c.retrieve_buffer(im)
a = np.array(im)
rawImage = QImage(a.data, a.shape[1], a.shape[0], QImage.Format_Indexed8)
self.changePixmap.emit(rawImage)
def cameraSettings(self):
print(fc2.get_library_version())
self.c = fc2.Context()
numberCam = self.c.get_num_of_cameras()
print(numberCam)
self.c.connect(*self.c.get_camera_from_index(0))
print(self.c.get_camera_info())
m, f = self.c.get_video_mode_and_frame_rate()
print(m, f)
print(self.c.get_property_info(fc2.FRAME_RATE))
p = self.c.get_property(fc2.FRAME_RATE)
print(p)
self.c.set_property(**p)
self.c.start_capture()
class App(QWidget):
def __init__(self):
super(App,self).__init__()
self.title = 'PyQt4 Video'
self.left = 100
self.top = 100
self.width = 640
self.height = 480
self.initUI()
def initUI(self):
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
self.resize(800, 600)
# create a label
self.label = QLabel(self)
self.label.move(0, 0)
self.label.resize(640, 480)
th = Thread(self)
th.changePixmap.connect(lambda p: self.setPixMap(p))
th.start()
def setPixMap(self, p):
p = QPixmap.fromImage(p)
p = p.scaled(640, 480, Qt.KeepAspectRatio)
self.label.setPixmap(p)
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = App()
ex.show()
sys.exit(app.exec_())
i am working on an Opencv project and when trying to add the gui for it, i wanted to use PyQt because i am familiar with it.
in this code
import cv2
import sys
from PyQt5.QtWidgets import QWidget, QLabel, QApplication
from PyQt5.QtCore import QThread, Qt, pyqtSignal, pyqtSlot
from PyQt5.QtGui import QImage, QPixmap
class Thread(QThread):
changePixmap = pyqtSignal(QImage)
def run(self):
cap = cv2.VideoCapture("../Testing-Video.mp4")
while True:
ret, frame = cap.read()
if ret:
rgbImage = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
h, w, ch = rgbImage.shape
bytesPerLine = ch * w
convertToQtFormat = QImage(rgbImage.data, w, h, bytesPerLine,
QImage.Format_RGB888)
p = convertToQtFormat.scaled(640, 480, Qt.KeepAspectRatio)
self.changePixmap.emit(p)
class App(QWidget):
def __init__(self):
super().__init__()
self.left=0
self.top=0
self.width=600
self.height=800
self.initUI()
#pyqtSlot(QImage)
def setImage(self, image):
self.label.setPixmap(QPixmap.fromImage(image))
def initUI(self):
self.setWindowTitle("Testing PyQt")
self.setGeometry(self.left, self.top, self.width, self.height)
self.resize(800, 600)
# create a label
self.label = QLabel(self)
self.label.resize(640, 480)
th = Thread(self)
th.changePixmap.connect(self.setImage)
th.start()
self.show()
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = App()
ex.show()
sys.exit(app.exec_())
which is something i didn't write myself, it's slightly adjusted from here , anyways using the Camera video capture, it works perfectly but when using any other video media, the playback is extremely fast,
should i use a timer to call the frames constantly? better than using signal and slots?
I tried code from this answer and it crashes with error Process finished with exit code -1073740771 (0xC000041D) after some time (2-10 sec.) and sometimes with 0xC0000005. It crashes immediately if I try to drag the window.
However when I put time.sleep(0.1) in run it works fine. If I use sleeps shorter than 0.1 it crashes again.
from PyQt5.QtWidgets import QApplication, QMainWindow, QWidget, QLabel,QMessageBox
from PyQt5.QtGui import QImage, QPixmap
from PyQt5.QtCore import QThread, pyqtSignal, pyqtSlot, Qt
import cv2
import sys
import time
class CamThread(QThread):
changemap = pyqtSignal('QImage')
def run(self):
cap = cv2.VideoCapture(0)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
while True:
ret, img_rgb = cap.read()
if ret:
self.rgb = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2RGB)
self.convert = QImage(self.rgb.data, self.rgb.shape[1], self.rgb.shape[0], QImage.Format_RGB888)
self.p = self.convert.scaled(640, 480, Qt.KeepAspectRatio)
self.changemap.emit(self.p)
#time.sleep(0.1)
class App(QWidget):
def __init__(self):
super().__init__()
self.title = 'webcam'
self.initUI()
#pyqtSlot('QImage')
def setImage(self, image):
self.label.setPixmap(QPixmap.fromImage(image))
def initUI(self):
self.setWindowTitle(self.title)
self.setGeometry(100, 100, 640, 480)
self.resize(640, 480)
self.label = QLabel(self)
self.label.resize(640, 480)
thr = CamThread(self)
thr.changemap.connect(self.setImage)
thr.start()
app = QApplication(sys.argv)
win = App()
#win.setAttribute(Qt.WA_DeleteOnClose, True)
win.show()
app.exit(app.exec_())
I thought that the problem is somewhere in signals/slots but haven't been able to find anything relevant.
Windows 10
Python - 3.7
Pyqt - 5.12
OpenCV - 3.4.5.20
Fixed it using QMutex and QWaitCondition to prevent update call while main thread is already updating. Apparently, issue was in that. eyllanesc, I'm new here as you see, should I make an answer in original thread?
from PyQt5.QtWidgets import QApplication, QMainWindow, QWidget, QLabel, QMessageBox
from PyQt5.QtGui import QImage, QPixmap
from PyQt5.QtCore import QThread, pyqtSignal, pyqtSlot, Qt, QMutex, QWaitCondition
import cv2
import sys
import time
class CamThread(QThread):
changemap = pyqtSignal('QImage')
def __init__(self, mutex, condition):
super().__init__()
self.mutex = mutex
self.condition = condition
def run(self):
cap = cv2.VideoCapture(0)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
while True:
try:
ret, img_rgb = cap.read()
if ret:
rgb = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2RGB)
#any other image processing here
convert = QImage(rgb.data, rgb.shape[1], rgb.shape[0], QImage.Format_RGB888)
p = convert.scaled(640, 480, Qt.KeepAspectRatio)
self.changemap.emit(p)
self.condition.wait(self.mutex)
except:
print('error')
class App(QWidget):
time = 0
def __init__(self):
super().__init__()
self.title = 'webcam'
self.mutex = QMutex()
self.condition = QWaitCondition()
self.initUI()
#pyqtSlot('QImage')
def setImage(self, image):
self.mutex.lock()
try:
self.label.setPixmap(QPixmap.fromImage(image))
finally:
self.mutex.unlock()
self.condition.wakeAll()
def initUI(self):
self.mutex.lock()
self.setWindowTitle(self.title)
self.setGeometry(100, 100, 640, 480)
self.resize(640, 480)
self.label = QLabel(self)
self.label.resize(640, 480)
self.thr = CamThread(mutex = self.mutex,condition=self.condition)
self.thr.changemap.connect(self.setImage)
self.thr.start()
app = QApplication(sys.argv)
win = App()
win.show()
app.exit(app.exec_())
N.B. You still need to properly stop thread and close camera connection in this example.
Try to link PyQt and Opencv video feed, can't understand how to apply while loop for continuously streaming video. It just take a still picture.Please can anyone help to solve the problem.
PtQt=5
Python=3.6.1
class App(QWidget):
def __init__(self):
super().__init__()
self.title = 'PyQt5 Video'
self.left = 100
self.top = 100
self.width = 640
self.height = 480
self.initUI()
def initUI(self):
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
self.resize(1800, 1200)
#create a label
label = QLabel(self)
cap = cv2.VideoCapture(0)
ret, frame = cap.read()
rgbImage = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
convertToQtFormat = QtGui.QImage(rgbImage.data, rgbImage.shape[1], rgbImage.shape[0],
QtGui.QImage.Format_RGB888)
convertToQtFormat = QtGui.QPixmap.fromImage(convertToQtFormat)
pixmap = QPixmap(convertToQtFormat)
resizeImage = pixmap.scaled(640, 480, QtCore.Qt.KeepAspectRatio)
QApplication.processEvents()
label.setPixmap(resizeImage)
self.show()
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = App()
sys.exit(app.exec_())
The problem is that the function that obtains the image is executed only once and not updating the label.
The correct way is to place it inside a loop, but it will result in blocking the main window. This blocking of main window can be solved by using the QThread class and send through a signal QImage to update the label. For example:
import cv2
import sys
from PyQt5.QtWidgets import QWidget, QLabel, QApplication
from PyQt5.QtCore import QThread, Qt, pyqtSignal, pyqtSlot
from PyQt5.QtGui import QImage, QPixmap
class Thread(QThread):
changePixmap = pyqtSignal(QImage)
def run(self):
cap = cv2.VideoCapture(0)
while True:
ret, frame = cap.read()
if ret:
# https://stackoverflow.com/a/55468544/6622587
rgbImage = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
h, w, ch = rgbImage.shape
bytesPerLine = ch * w
convertToQtFormat = QImage(rgbImage.data, w, h, bytesPerLine, QImage.Format_RGB888)
p = convertToQtFormat.scaled(640, 480, Qt.KeepAspectRatio)
self.changePixmap.emit(p)
class App(QWidget):
def __init__(self):
super().__init__()
[...]
self.initUI()
#pyqtSlot(QImage)
def setImage(self, image):
self.label.setPixmap(QPixmap.fromImage(image))
def initUI(self):
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
self.resize(1800, 1200)
# create a label
self.label = QLabel(self)
self.label.move(280, 120)
self.label.resize(640, 480)
th = Thread(self)
th.changePixmap.connect(self.setImage)
th.start()
self.show()
Updating this for PySide2 and qimage2ndarray
from PySide2.QtCore import *
from PySide2.QtGui import *
import cv2 # OpenCV
import qimage2ndarray # for a memory leak,see gist
import sys # for exiting
# Minimal implementation...
def displayFrame():
ret, frame = cap.read()
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
image = qimage2ndarray.array2qimage(frame)
label.setPixmap(QPixmap.fromImage(image))
app = QApplication([])
window = QWidget()
# OPENCV
cap = cv2.VideoCapture(0)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 320)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 240)
# timer for getting frames
timer = QTimer()
timer.timeout.connect(displayFrame)
timer.start(60)
label = QLabel('No Camera Feed')
button = QPushButton("Quiter")
button.clicked.connect(sys.exit) # quiter button
layout = QVBoxLayout()
layout.addWidget(button)
layout.addWidget(label)
window.setLayout(layout)
window.show()
app.exec_()
# See also: https://gist.github.com/bsdnoobz/8464000
Thank you Taimur Islam for the question.
Thank you eyllanesc for wonderful answering and I have modified your code little bit. I used PtQt=4 Python=2.7 and I didn't use opencv
import sys
import numpy as np
import flycapture2 as fc2
from PyQt4.QtCore import (QThread, Qt, pyqtSignal)
from PyQt4.QtGui import (QPixmap, QImage, QApplication, QWidget, QLabel)
class Thread(QThread):
changePixmap = pyqtSignal(QImage)
def __init__(self, parent=None):
QThread.__init__(self, parent=parent)
self.cameraSettings()
def run(self):
while True:
im = fc2.Image()
self.c.retrieve_buffer(im)
a = np.array(im)
rawImage = QImage(a.data, a.shape[1], a.shape[0], QImage.Format_Indexed8)
self.changePixmap.emit(rawImage)
def cameraSettings(self):
print(fc2.get_library_version())
self.c = fc2.Context()
numberCam = self.c.get_num_of_cameras()
print(numberCam)
self.c.connect(*self.c.get_camera_from_index(0))
print(self.c.get_camera_info())
m, f = self.c.get_video_mode_and_frame_rate()
print(m, f)
print(self.c.get_property_info(fc2.FRAME_RATE))
p = self.c.get_property(fc2.FRAME_RATE)
print(p)
self.c.set_property(**p)
self.c.start_capture()
class App(QWidget):
def __init__(self):
super(App,self).__init__()
self.title = 'PyQt4 Video'
self.left = 100
self.top = 100
self.width = 640
self.height = 480
self.initUI()
def initUI(self):
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
self.resize(800, 600)
# create a label
self.label = QLabel(self)
self.label.move(0, 0)
self.label.resize(640, 480)
th = Thread(self)
th.changePixmap.connect(lambda p: self.setPixMap(p))
th.start()
def setPixMap(self, p):
p = QPixmap.fromImage(p)
p = p.scaled(640, 480, Qt.KeepAspectRatio)
self.label.setPixmap(p)
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = App()
ex.show()
sys.exit(app.exec_())