How to update QLabel image correctly? [duplicate] - python

This question already has an answer here:
What is the difference between an opencv BGR image and its reverse version RGB image[:,:,::-1]?
(1 answer)
Closed 2 years ago.
I have a custom QLabel class which is called ImageLabel to show images and video stream. I want to update the image with the setPixmap() method but it doesn't update the image correctly.
Note: StreamThread method is related to the video stream and I don't use it to update the image for now.
Here is the distorted image:
Main class:
import sys
from PyQt5.QtCore import Qt, pyqtSlot
from PyQt5.QtGui import QImage, QPixmap
from PyQt5.QtWidgets import QApplication, QVBoxLayout, QWidget, QPushButton, QHBoxLayout
from ImageLabel import ImageLabel
from Stream_Thread import Stream_Thread
import cv2
class MainWindow(QWidget):
def __init__(self):
super().__init__()
self.init_UI()
def init_UI(self):
self.setFixedSize(690, 530)
self.image_lbl = ImageLabel()
# start the video stream of selected camera
th = Stream_Thread()
th.set_index(1)
th.changePixmap.connect(self.setImage, Qt.QueuedConnection)
th.start()
btn_cnt = QPushButton("Continue")
btn_pa = QPushButton("Pause")
hbox = QHBoxLayout()
hbox.addWidget(btn_cnt)
hbox.addWidget(btn_pa)
vbox = QVBoxLayout()
vbox.addWidget(self.image_lbl)
vbox.addLayout(hbox)
self.setLayout(vbox)
btn_pa.clicked.connect(lambda: self.btn_pa_clicked(th))
self.show()
#pyqtSlot(QImage)
def setImage(self, image):
self.image_lbl.pixmap = QPixmap.fromImage(image).scaled(720, 540)
self.image_lbl.setPixmap(QPixmap.fromImage(image).scaled(720, 540))
def btn_pa_clicked(self, th):
th.terminate() # terminates the video stream
image = cv2.imread("img/49.jpg", cv2.COLOR_BGR2RGB) # self.image_lbl.pixmap
# image = image.toImage()
if image is not None:
(h, w, c) = image.shape
qimage = QImage(image.data, h, w, 3 * h, QImage.Format_RGB888)
self.image_lbl.pixmap = QPixmap.fromImage(qimage)
#self.image_lbl.setPixmap(QPixmap.fromImage(qimage))
self.image_lbl.repaint()
def main():
app = QApplication(sys.argv)
main_form = MainWindow()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
StreamThread class:
import cv2
from PyQt5.QtCore import QThread, pyqtSignal, Qt
from PyQt5.QtGui import QImage
#https://stackoverflow.com/a/44404713/13080899
class Stream_Thread(QThread):
changePixmap = pyqtSignal(QImage)
def __init__(self):
super(Stream_Thread, self).__init__()
self.ref = False #refresh flag
def set_index(self, index = 0):
self.index = int(index)
def refresh(self):
self.ref = True
self.capt.open(self.index)
def run(self):
self.capt = cv2.VideoCapture(self.index, cv2.CAP_DSHOW)
while(True):
ret, frame = self.capt.read()
if ret:
rbgImage = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
h, w, ch = rbgImage.shape
bytesPerLine = ch*w
convertToQtFormat = QImage(rbgImage.data, w, h, bytesPerLine, QImage.Format_RGB888)
p = convertToQtFormat.scaled(640, 480, Qt.KeepAspectRatio)
self.changePixmap.emit(p)
ImageLabel class:
from PyQt5 import QtGui, QtCore
from PyQt5.QtCore import Qt, QSize, pyqtSignal, QObject, QPoint, pyqtSlot
from PyQt5.QtWidgets import QLabel
class Communicate(QObject):
cor_update = pyqtSignal()
cor_curr = pyqtSignal()
class ImageLabel(QLabel):
def __init__(self, width=720, height=540):
super(ImageLabel, self).__init__()
self.setMouseTracking(True)
self.pixmap = QtGui.QPixmap("img/im.jpg")
#### initialize coordinates ####
self.x1 = 0
self.y1 = 0
self.x_curr = 0
self.y_curr = 0
self.x2 = 0
self.y2 = 0
##### enable state to allow user for drawing
self.enable_labelling = False
##### enable state to track coordinates for drawing
self.enable_cor = False
################################
self.source = Communicate()
def paintEvent(self, event):
qp = QtGui.QPainter(self)
qp.drawPixmap(self.rect(), self.pixmap)
if self.enable_labelling == True:
self.paintRect(event, qp)
qp.end()
def paintRect(self, event, qp):
br = QtGui.QBrush(QtGui.QColor(50, 255, 255, 40))
qp.setBrush(br)
if self.enable_cor == True:
qp.drawRect(QtCore.QRect(QPoint(self.x1, self.y1), QSize(self.x_curr - self.x1, self.y_curr - self.y1)))
else:
qp.drawRect(QtCore.QRect(QPoint(self.x1, self.y1), QSize(self.x2 - self.x1, self.y2 - self.y1)))
def mousePressEvent(self, event):
if event.button() == Qt.LeftButton and self.enable_labelling == True:
self.x1 = event.x()
self.y1 = event.y()
self.enable_cor = True
self.source.cor_update.emit()
def mouseMoveEvent(self, event):
self.x_curr = event.x()
self.y_curr = event.y()
self.source.cor_curr.emit()
def mouseReleaseEvent(self, event):
if event.button() == Qt.LeftButton and self.enable_labelling == True:
self.x2 = event.x()
self.y2 = event.y()
self.source.cor_update.emit()
self.enable_cor = False

Instead of opening image by Imread and convert it to QImage, open it by QImage and convert the format that should solve your problem. Use of:
image = QImage("img/49.jpg");
image = image.convertToFormat(QImage.Format_RGB888);
this is psudo code and not tested, maybe some modification are needed.

Related

Exact Position of video in QVideoWidget

I have a custom Media Player, that can display images and videos with the help of PyQt. Media player is implemented by the following code in python:
from PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QHBoxLayout, QVBoxLayout,
QLabel, \
QSlider, QStyle, QSizePolicy, QFileDialog
import sys
from PyQt5.QtMultimedia import QMediaPlayer, QMediaContent
from PyQt5.QtMultimediaWidgets import QVideoWidget
from PyQt5.QtGui import QIcon, QPalette
from PyQt5.QtCore import Qt, QUrl
class Window(QWidget):
def __init__(self):
super().__init__()
self.setWindowTitle("PyQt5 Media Player")
self.setGeometry(350, 100, 700, 500)
self.setWindowIcon(QIcon('player.png'))
p =self.palette()
p.setColor(QPalette.Window, Qt.black)
self.setPalette(p)
self.init_ui()
self.show()
def init_ui(self):
#create media player object
self.mediaPlayer = QMediaPlayer(None, QMediaPlayer.VideoSurface)
#create videowidget object
videowidget = QVideoWidget()
#create open button
openBtn = QPushButton('Open Video')
openBtn.clicked.connect(self.open_file)
#create button for playing
self.playBtn = QPushButton()
self.playBtn.setEnabled(False)
self.playBtn.setIcon(self.style().standardIcon(QStyle.SP_MediaPlay))
self.playBtn.clicked.connect(self.play_video)
#create slider
self.slider = QSlider(Qt.Horizontal)
self.slider.setRange(0,0)
self.slider.sliderMoved.connect(self.set_position)
#create label
self.label = QLabel()
self.label.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Maximum)
#create hbox layout
hboxLayout = QHBoxLayout()
hboxLayout.setContentsMargins(0,0,0,0)
#set widgets to the hbox layout
hboxLayout.addWidget(openBtn)
hboxLayout.addWidget(self.playBtn)
hboxLayout.addWidget(self.slider)
#create vbox layout
vboxLayout = QVBoxLayout()
vboxLayout.addWidget(videowidget)
vboxLayout.addLayout(hboxLayout)
vboxLayout.addWidget(self.label)
self.setLayout(vboxLayout)
self.mediaPlayer.setVideoOutput(videowidget)
#media player signals
self.mediaPlayer.stateChanged.connect(self.mediastate_changed)
self.mediaPlayer.positionChanged.connect(self.position_changed)
self.mediaPlayer.durationChanged.connect(self.duration_changed)
def open_file(self):
filename, _ = QFileDialog.getOpenFileName(self, "Open Video")
if filename != '':
self.mediaPlayer.setMedia(QMediaContent(QUrl.fromLocalFile(filename)))
self.playBtn.setEnabled(True)
def play_video(self):
if self.mediaPlayer.state() == QMediaPlayer.PlayingState:
self.mediaPlayer.pause()
else:
self.mediaPlayer.play()
def mediastate_changed(self, state):
if self.mediaPlayer.state() == QMediaPlayer.PlayingState:
self.playBtn.setIcon(
self.style().standardIcon(QStyle.SP_MediaPause)
)
else:
self.playBtn.setIcon(
self.style().standardIcon(QStyle.SP_MediaPlay)
)
def position_changed(self, position):
self.slider.setValue(position)
def duration_changed(self, duration):
self.slider.setRange(0, duration)
def set_position(self, position):
self.mediaPlayer.setPosition(position)
def handle_errors(self):
self.playBtn.setEnabled(False)
self.label.setText("Error: " + self.mediaPlayer.errorString())
app = QApplication(sys.argv)
window = Window()
sys.exit(app.exec_())
What I am trying to do is get the x and y coordinates of the edges of the video/image played each time and while it feels like it should be easy I really can't figure out how to do this. As displayed in the images every video/image may have different corner positions. The only thing I could think of was getting videowidgets dimensions but it wasn't right.
print(videowidget.height())
print(videowidget.width())
print(videowidget.x())
print(videowidget.y())
I'm not sure if this exactly answers your question but I found a sort of solution by comparing aspect ratios of the video and the widget:
class VideoClickWidget(QVideoWidget):
def __init__(self):
QVideoWidget.__init__(self)
def mouseReleaseEvent(self, event):
widget_width = self.frameGeometry().width()
widget_height = self.frameGeometry().height()
widget_ratio = widget_width / widget_height
video_width = self.mediaObject().metaData("Resolution").width()
video_height = self.mediaObject().metaData("Resolution").height()
video_ratio = video_width / video_height
x, y = event.pos().x(), event.pos().y()
# It's wider
if widget_ratio > video_ratio:
percentage = video_ratio / widget_ratio
# we know that video occupies $percentage$ of the widget
dead_zone = int(np.round(widget_width * ((1 - percentage) / 2)))
new_x = np.clip(x - dead_zone, 0, widget_width - 2 * dead_zone)
print(new_x, y)
else:
percentage = widget_ratio / video_ratio
dead_zone = int(np.round(widget_height * ((1 - percentage) / 2)))
new_y = np.clip(y - dead_zone, 0, widget_height - 2 * dead_zone)
print(x, new_y)
super(QVideoWidget, self).mouseReleaseEvent(event)

How to display live camera video from opencv in pyqt5? [duplicate]

Try to link PyQt and Opencv video feed, can't understand how to apply while loop for continuously streaming video. It just take a still picture.Please can anyone help to solve the problem.
PtQt=5
Python=3.6.1
class App(QWidget):
def __init__(self):
super().__init__()
self.title = 'PyQt5 Video'
self.left = 100
self.top = 100
self.width = 640
self.height = 480
self.initUI()
def initUI(self):
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
self.resize(1800, 1200)
#create a label
label = QLabel(self)
cap = cv2.VideoCapture(0)
ret, frame = cap.read()
rgbImage = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
convertToQtFormat = QtGui.QImage(rgbImage.data, rgbImage.shape[1], rgbImage.shape[0],
QtGui.QImage.Format_RGB888)
convertToQtFormat = QtGui.QPixmap.fromImage(convertToQtFormat)
pixmap = QPixmap(convertToQtFormat)
resizeImage = pixmap.scaled(640, 480, QtCore.Qt.KeepAspectRatio)
QApplication.processEvents()
label.setPixmap(resizeImage)
self.show()
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = App()
sys.exit(app.exec_())
The problem is that the function that obtains the image is executed only once and not updating the label.
The correct way is to place it inside a loop, but it will result in blocking the main window. This blocking of main window can be solved by using the QThread class and send through a signal QImage to update the label. For example:
import cv2
import sys
from PyQt5.QtWidgets import QWidget, QLabel, QApplication
from PyQt5.QtCore import QThread, Qt, pyqtSignal, pyqtSlot
from PyQt5.QtGui import QImage, QPixmap
class Thread(QThread):
changePixmap = pyqtSignal(QImage)
def run(self):
cap = cv2.VideoCapture(0)
while True:
ret, frame = cap.read()
if ret:
# https://stackoverflow.com/a/55468544/6622587
rgbImage = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
h, w, ch = rgbImage.shape
bytesPerLine = ch * w
convertToQtFormat = QImage(rgbImage.data, w, h, bytesPerLine, QImage.Format_RGB888)
p = convertToQtFormat.scaled(640, 480, Qt.KeepAspectRatio)
self.changePixmap.emit(p)
class App(QWidget):
def __init__(self):
super().__init__()
[...]
self.initUI()
#pyqtSlot(QImage)
def setImage(self, image):
self.label.setPixmap(QPixmap.fromImage(image))
def initUI(self):
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
self.resize(1800, 1200)
# create a label
self.label = QLabel(self)
self.label.move(280, 120)
self.label.resize(640, 480)
th = Thread(self)
th.changePixmap.connect(self.setImage)
th.start()
self.show()
Updating this for PySide2 and qimage2ndarray
from PySide2.QtCore import *
from PySide2.QtGui import *
import cv2 # OpenCV
import qimage2ndarray # for a memory leak,see gist
import sys # for exiting
# Minimal implementation...
def displayFrame():
ret, frame = cap.read()
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
image = qimage2ndarray.array2qimage(frame)
label.setPixmap(QPixmap.fromImage(image))
app = QApplication([])
window = QWidget()
# OPENCV
cap = cv2.VideoCapture(0)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 320)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 240)
# timer for getting frames
timer = QTimer()
timer.timeout.connect(displayFrame)
timer.start(60)
label = QLabel('No Camera Feed')
button = QPushButton("Quiter")
button.clicked.connect(sys.exit) # quiter button
layout = QVBoxLayout()
layout.addWidget(button)
layout.addWidget(label)
window.setLayout(layout)
window.show()
app.exec_()
# See also: https://gist.github.com/bsdnoobz/8464000
Thank you Taimur Islam for the question.
Thank you eyllanesc for wonderful answering and I have modified your code little bit. I used PtQt=4 Python=2.7 and I didn't use opencv
import sys
import numpy as np
import flycapture2 as fc2
from PyQt4.QtCore import (QThread, Qt, pyqtSignal)
from PyQt4.QtGui import (QPixmap, QImage, QApplication, QWidget, QLabel)
class Thread(QThread):
changePixmap = pyqtSignal(QImage)
def __init__(self, parent=None):
QThread.__init__(self, parent=parent)
self.cameraSettings()
def run(self):
while True:
im = fc2.Image()
self.c.retrieve_buffer(im)
a = np.array(im)
rawImage = QImage(a.data, a.shape[1], a.shape[0], QImage.Format_Indexed8)
self.changePixmap.emit(rawImage)
def cameraSettings(self):
print(fc2.get_library_version())
self.c = fc2.Context()
numberCam = self.c.get_num_of_cameras()
print(numberCam)
self.c.connect(*self.c.get_camera_from_index(0))
print(self.c.get_camera_info())
m, f = self.c.get_video_mode_and_frame_rate()
print(m, f)
print(self.c.get_property_info(fc2.FRAME_RATE))
p = self.c.get_property(fc2.FRAME_RATE)
print(p)
self.c.set_property(**p)
self.c.start_capture()
class App(QWidget):
def __init__(self):
super(App,self).__init__()
self.title = 'PyQt4 Video'
self.left = 100
self.top = 100
self.width = 640
self.height = 480
self.initUI()
def initUI(self):
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
self.resize(800, 600)
# create a label
self.label = QLabel(self)
self.label.move(0, 0)
self.label.resize(640, 480)
th = Thread(self)
th.changePixmap.connect(lambda p: self.setPixMap(p))
th.start()
def setPixMap(self, p):
p = QPixmap.fromImage(p)
p = p.scaled(640, 480, Qt.KeepAspectRatio)
self.label.setPixmap(p)
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = App()
ex.show()
sys.exit(app.exec_())

simpledialog.askstring window from tkinter freezes

I have a problem using simpledialog.askstring from tkinter, it opens two pop up windows, one of them is the regular one to input a value, but the other is a blank window that freezes completely and you have to force the cloosure of the window, enter image description hereas shown in the image in the link. I'm using spyder.
What could be happening?
This is what I have so far:
from PyQt5 import QtWidgets, QtCore, QtGui
from PyQt5.QtCore import pyqtSignal, QObject
from PyQt5.QtWidgets import QApplication, QMainWindow, QComboBox,
QLabel, QPushButton, QGroupBox
import tkinter as tk
from tkinter import simpledialog
from PIL import ImageGrab
import sys
import cv2
import numpy as np
# import imageToString
import pyperclip
import webbrowser
import pytesseract
pytesseract.pytesseract.tesseract_cmd =
r"C:\Users\amc\AppData\Local\Programs\Tesseract-OCR\tesseract.exe"
class Communicate(QObject):
snip_saved = pyqtSignal()
class MyWindow(QMainWindow):
def __init__(self, parent=None):
super(MyWindow, self).__init__()
self.win_width = 340
self.win_height = 200
self.setGeometry(50, 50, self.win_width, self.win_height)
self.setWindowTitle("PSD Generator")
self.initUI()
def initUI(self):
#Define buttons
self.searchOpen = QPushButton(self)
self.searchOpen.setText("Digitilize Graph")
self.searchOpen.move(10,75)
self.searchOpen.setFixedSize(150,40)
self.searchOpen.clicked.connect(self.snip_graph_clicked)
self.copyPartNum = QPushButton(self)
self.copyPartNum.setText("Snip Table")
self.copyPartNum.move((self.win_width/2)+10, 75)
self.copyPartNum.setFixedSize(150,40)
self.copyPartNum.clicked.connect(self.snip_table_clicked)
self.notificationBox = QGroupBox("Notification Box", self)
self.notificationBox.move(10,135)
self.notificationBox.setFixedSize(self.win_width-20,55)
self.notificationText = QLabel(self)
self.notificationText.move(20, 145)
self.reset_notif_text()
def snip_graph_clicked(self):
self.snipWin = SnipWidget(False, True, self)
self.snipWin.notification_signal.connect(self.reset_notif_text)
self.snipWin.show()
self.notificationText.setText("Snipping... Press ESC to quit snipping")
self.update_notif()
def snip_table_clicked(self):
self.snipWin = SnipWidget(True, False, self)
self.snipWin.notification_signal.connect(self.reset_notif_text)
self.snipWin.show()
self.notificationText.setText("Snipping... Press ESC to quit snipping")
self.update_notif()
def reset_notif_text(self):
self.notificationText.setText("Idle...")
self.update_notif()
def define_notif_text(self, msg):
print('notification was sent')
self.notificationText.setText('notification was sent')
self.update_notif()
def update_notif(self):
self.notificationText.move(20, 155)
self.notificationText.adjustSize()
class SnipWidget(QMainWindow):
notification_signal = pyqtSignal()
def __init__(self, copy_table, scan_graph, parent):
super(SnipWidget, self).__init__()
self.copy_table = copy_table
self.scan_graph = scan_graph
root = tk.Tk()# instantiates window
screen_width = root.winfo_screenwidth()
screen_height = root.winfo_screenheight()
self.setGeometry(0, 0, screen_width, screen_height)
self.setWindowTitle(' ')
self.begin = QtCore.QPoint()
self.end = QtCore.QPoint()
self.setWindowOpacity(0.3)
self.is_snipping = False
QtWidgets.QApplication.setOverrideCursor(
QtGui.QCursor(QtCore.Qt.CrossCursor)
)
self.setWindowFlags(QtCore.Qt.FramelessWindowHint)
self.c = Communicate()
self.show()
if self.copy_table == True:
# the input dialog
BH_Ref = simpledialog.askstring(title="New PSD",prompt="Borehole Number Ref:")
Depth = simpledialog.askstring(title="New PSD",prompt="Sample depth (m):")
self.c.snip_saved.connect(self.searchAndOpen)
if self.scan_graph == True:
# the input dialog
BH_Ref = simpledialog.askstring(title="New PSD",prompt="Borehole Number Ref:")
Depth = simpledialog.askstring(title="New PSD",prompt="Sample depth (m):")
self.c.snip_saved.connect(self.IdAndCopy)
def paintEvent(self, event):
if self.is_snipping:
brush_color = (0, 0, 0, 0)
lw = 0
opacity = 0
else:
brush_color = (128, 128, 255, 128)
lw = 3
opacity = 0.3
self.setWindowOpacity(opacity)
qp = QtGui.QPainter(self)
qp.setPen(QtGui.QPen(QtGui.QColor('black'), lw))
qp.setBrush(QtGui.QColor(*brush_color))
qp.drawRect(QtCore.QRect(self.begin, self.end))
def keyPressEvent(self, event):
if event.key() == QtCore.Qt.Key_Escape:
print('Quit')
QtWidgets.QApplication.restoreOverrideCursor();
self.notification_signal.emit()
self.close()
event.accept()
def mousePressEvent(self, event):
self.begin = event.pos()
self.end = self.begin
self.update()
def mouseMoveEvent(self, event):
self.end = event.pos()
self.update()
def mouseReleaseEvent(self, event):
x1 = min(self.begin.x(), self.end.x())
y1 = min(self.begin.y(), self.end.y())
x2 = max(self.begin.x(), self.end.x())
y2 = max(self.begin.y(), self.end.y())
self.is_snipping = True
self.repaint()
QtWidgets.QApplication.processEvents()
img = ImageGrab.grab(bbox=(x1, y1, x2, y2))
self.is_snipping = False
self.repaint()
QtWidgets.QApplication.processEvents()
img = cv2.cvtColor(np.array(img), cv2.COLOR_BGR2GRAY)
self.snipped_image = img
QtWidgets.QApplication.restoreOverrideCursor();
self.c.snip_saved.emit()
self.close()
self.msg = 'snip complete'
self.notification_signal.emit()
def searchAndOpen(self):
img_str = self.imgToStr(self.snipped_image)
msg_str = f'Text in snip is {img_str}'
def IdAndCopy(self):
img_str = self.imgToStr(self.snipped_image)
pyperclip.copy(img_str)
def find_str(self, image_data):
img = image_data
h,w = np.shape(img)
asp_ratio = float(w/h)
img_width = 500
img_height = int(round(img_width/asp_ratio))
desired_image_size = (img_width,img_height)
img_resized = cv2.resize(img, desired_image_size)
imgstr = str(pytesseract.image_to_string(img_resized))
print(imgstr)
return imgstr
def imgToStr(self, image):
# img_str = imageToString.main(image)
img_str = self.find_str(image)
return img_str
def window():
app = QApplication(sys.argv)
win = MyWindow()
win.show()
sys.exit(app.exec_())
window()

Stream multiple images from OpenCV at the same time

I am working on a GUI in pyqt and I want to display both the original and processed images coming from OpenCV. My have an attempt using two pyslots, but they show the same image. I am not sure how to do this without having the same code for each image which seems inefficient.
from pypylon import pylon
#from pypylon_opencv_viewer import BaslerOpenCVViewer
import cv2
import sys
from PyQt5.QtWidgets import QWidget, QLabel, QApplication
from PyQt5.QtCore import QThread, Qt, pyqtSignal, pyqtSlot
from PyQt5.QtGui import QImage, QPixmap
import numpy as np
from scipy.interpolate import interp2d
class Thread(QThread):
#changePixmap = pyqtSignal(QImage)
changePixmap_raw = pyqtSignal(QImage)
changePixmap_th = pyqtSignal(QImage)
def run(self):
def process_image(img):
*SNIP*
return result
serial_number = '23437639'
info = None
for i in pylon.TlFactory.GetInstance().EnumerateDevices():
if i.GetSerialNumber() == serial_number:
info = i
break
else:
print('Camera with {} serial number not found '.format(serial_number))
if info is not None:
camera = pylon.InstantCamera(pylon.TlFactory.GetInstance().CreateDevice(info))
camera.Open()
# Grabing Continusely (video) with minimal delay
camera.StartGrabbing(pylon.GrabStrategy_LatestImageOnly)
converter = pylon.ImageFormatConverter()
# converting to opencv bgr format
converter.OutputPixelFormat = pylon.PixelType_BGR8packed
converter.OutputBitAlignment = pylon.OutputBitAlignment_MsbAligned
while camera.IsGrabbing():
grabResult = camera.RetrieveResult(5000, pylon.TimeoutHandling_ThrowException)
if grabResult.GrabSucceeded():
img = converter.Convert(grabResult)
img = img.GetArray()
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
h, w = img.shape
result_img = process_image(img)
bytesPerLine = w
original_QT = QImage(img.data, w, h, bytesPerLine, QImage.Format_Grayscale8)
threshold_QT = QImage(result_img.data, w, h, bytesPerLine, QImage.Format_Grayscale8)
p1 = original_QT.scaled(640, 480, Qt.KeepAspectRatio)
p2 = threshold_QT.scaled(640, 480, Qt.KeepAspectRatio)
self.changePixmap_raw.emit(p1)
self.changePixmap_th.emit(p2)
class App(QWidget):
def __init__(self):
super().__init__()
[...]
self.initUI()
#pyqtSlot(QImage)
def setImage_raw(self, image):
self.label_raw.setPixmap(QPixmap.fromImage(image))
#pyqtSlot(QImage)
def setImage_th(self, image):
self.label_th.setPixmap(QPixmap.fromImage(image))
def initUI(self):
#self.setWindowTitle(self.title)
#self.setGeometry(self.left, self.top, self.width, self.height)
self.resize(1800, 1200)
# create a label
self.label_raw = QLabel(self)
self.label_raw.move(280, 120)
self.label_raw.resize(640, 480)
self.label_th = QLabel(self)
self.label_th.move(1000,120)
self.label_th.resize(640,480)
th = Thread(self)
th.changePixmap_raw.connect(self.setImage_raw)
th.changePixmap_th.connect(self.setImage_th)
th.start()
self.show()
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = App()
sys.exit(app.exec_())
I am hoping to add two more views and I would hate to add 2 more emitters etc... to implement that.
Thanks in advance.

Webcam streamer with PyQt and Python threading, PySerial and GPIO [duplicate]

Try to link PyQt and Opencv video feed, can't understand how to apply while loop for continuously streaming video. It just take a still picture.Please can anyone help to solve the problem.
PtQt=5
Python=3.6.1
class App(QWidget):
def __init__(self):
super().__init__()
self.title = 'PyQt5 Video'
self.left = 100
self.top = 100
self.width = 640
self.height = 480
self.initUI()
def initUI(self):
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
self.resize(1800, 1200)
#create a label
label = QLabel(self)
cap = cv2.VideoCapture(0)
ret, frame = cap.read()
rgbImage = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
convertToQtFormat = QtGui.QImage(rgbImage.data, rgbImage.shape[1], rgbImage.shape[0],
QtGui.QImage.Format_RGB888)
convertToQtFormat = QtGui.QPixmap.fromImage(convertToQtFormat)
pixmap = QPixmap(convertToQtFormat)
resizeImage = pixmap.scaled(640, 480, QtCore.Qt.KeepAspectRatio)
QApplication.processEvents()
label.setPixmap(resizeImage)
self.show()
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = App()
sys.exit(app.exec_())
The problem is that the function that obtains the image is executed only once and not updating the label.
The correct way is to place it inside a loop, but it will result in blocking the main window. This blocking of main window can be solved by using the QThread class and send through a signal QImage to update the label. For example:
import cv2
import sys
from PyQt5.QtWidgets import QWidget, QLabel, QApplication
from PyQt5.QtCore import QThread, Qt, pyqtSignal, pyqtSlot
from PyQt5.QtGui import QImage, QPixmap
class Thread(QThread):
changePixmap = pyqtSignal(QImage)
def run(self):
cap = cv2.VideoCapture(0)
while True:
ret, frame = cap.read()
if ret:
# https://stackoverflow.com/a/55468544/6622587
rgbImage = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
h, w, ch = rgbImage.shape
bytesPerLine = ch * w
convertToQtFormat = QImage(rgbImage.data, w, h, bytesPerLine, QImage.Format_RGB888)
p = convertToQtFormat.scaled(640, 480, Qt.KeepAspectRatio)
self.changePixmap.emit(p)
class App(QWidget):
def __init__(self):
super().__init__()
[...]
self.initUI()
#pyqtSlot(QImage)
def setImage(self, image):
self.label.setPixmap(QPixmap.fromImage(image))
def initUI(self):
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
self.resize(1800, 1200)
# create a label
self.label = QLabel(self)
self.label.move(280, 120)
self.label.resize(640, 480)
th = Thread(self)
th.changePixmap.connect(self.setImage)
th.start()
self.show()
Updating this for PySide2 and qimage2ndarray
from PySide2.QtCore import *
from PySide2.QtGui import *
import cv2 # OpenCV
import qimage2ndarray # for a memory leak,see gist
import sys # for exiting
# Minimal implementation...
def displayFrame():
ret, frame = cap.read()
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
image = qimage2ndarray.array2qimage(frame)
label.setPixmap(QPixmap.fromImage(image))
app = QApplication([])
window = QWidget()
# OPENCV
cap = cv2.VideoCapture(0)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 320)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 240)
# timer for getting frames
timer = QTimer()
timer.timeout.connect(displayFrame)
timer.start(60)
label = QLabel('No Camera Feed')
button = QPushButton("Quiter")
button.clicked.connect(sys.exit) # quiter button
layout = QVBoxLayout()
layout.addWidget(button)
layout.addWidget(label)
window.setLayout(layout)
window.show()
app.exec_()
# See also: https://gist.github.com/bsdnoobz/8464000
Thank you Taimur Islam for the question.
Thank you eyllanesc for wonderful answering and I have modified your code little bit. I used PtQt=4 Python=2.7 and I didn't use opencv
import sys
import numpy as np
import flycapture2 as fc2
from PyQt4.QtCore import (QThread, Qt, pyqtSignal)
from PyQt4.QtGui import (QPixmap, QImage, QApplication, QWidget, QLabel)
class Thread(QThread):
changePixmap = pyqtSignal(QImage)
def __init__(self, parent=None):
QThread.__init__(self, parent=parent)
self.cameraSettings()
def run(self):
while True:
im = fc2.Image()
self.c.retrieve_buffer(im)
a = np.array(im)
rawImage = QImage(a.data, a.shape[1], a.shape[0], QImage.Format_Indexed8)
self.changePixmap.emit(rawImage)
def cameraSettings(self):
print(fc2.get_library_version())
self.c = fc2.Context()
numberCam = self.c.get_num_of_cameras()
print(numberCam)
self.c.connect(*self.c.get_camera_from_index(0))
print(self.c.get_camera_info())
m, f = self.c.get_video_mode_and_frame_rate()
print(m, f)
print(self.c.get_property_info(fc2.FRAME_RATE))
p = self.c.get_property(fc2.FRAME_RATE)
print(p)
self.c.set_property(**p)
self.c.start_capture()
class App(QWidget):
def __init__(self):
super(App,self).__init__()
self.title = 'PyQt4 Video'
self.left = 100
self.top = 100
self.width = 640
self.height = 480
self.initUI()
def initUI(self):
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
self.resize(800, 600)
# create a label
self.label = QLabel(self)
self.label.move(0, 0)
self.label.resize(640, 480)
th = Thread(self)
th.changePixmap.connect(lambda p: self.setPixMap(p))
th.start()
def setPixMap(self, p):
p = QPixmap.fromImage(p)
p = p.scaled(640, 480, Qt.KeepAspectRatio)
self.label.setPixmap(p)
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = App()
ex.show()
sys.exit(app.exec_())

Categories

Resources