pyQT label rstp video streaming delay - python

My minimal working code snippet is below. When I read from rtsp IP camera which is in the same network with my computer now, I am getting delay around 1 second.
Is this because I am using python? this camera? or do you have a suggestion that If I am doing something wrong in the code
from PyQt5 import QtCore
from PyQt5.QtGui import QImage, QPixmap
from PyQt5.QtWidgets import QSizePolicy, QLabel
from CalibrationGUI.qtgui.CameraThread import CaptureIpCameraFramesWorker
class VideoLabel(QLabel):
def __init__(self,camera_unit,ui_state_obj, parentGiven=None):
super(VideoLabel, self).__init__(parent=parentGiven)
self.ui_state_obj = ui_state_obj
self.camera_unit=camera_unit
self.setSizePolicy(QSizePolicy.Ignored, QSizePolicy.Ignored)
self.setScaledContents(True)
self.installEventFilter(self)
self.setMaximumSize(1265536, 1265536)
self.setupUI()
def setupUI(self):
self.Camworker= CaptureIpCameraFramesWorker(self.camera_unit,self.ui_state_obj)
self.Camworker.ImageUpdated.connect(lambda image: self.ShowCamera(image))
#QtCore.pyqtSlot()
def ShowCamera(self, frame: QImage) -> None:
self.frame = frame
self.setPixmap(QPixmap.fromImage(frame))
def startStream(self):
self.Camworker.start()
def stopStream(self):
if self.Camworker.isRunning():
self.Camworker.quit()
def get_frame(self):
return self.Camworker.get_frame()
import cv2
from PyQt5.QtCore import pyqtSignal
from PyQt5.QtGui import QImage
from PyQt5.QtCore import QThread, Qt
class CaptureIpCameraFramesWorker(QThread):
# Signal emitted when a new image or a new frame is ready.
ImageUpdated = pyqtSignal(QImage)
def __init__(self,camera_unit,UI_state_obj) -> None:
super(CaptureIpCameraFramesWorker, self).__init__()
# Declare and initialize instance variables
self.camera_unit = camera_unit
self.name = camera_unit.get_name()
self.__thread_active = True
self.fps = 0
self.__thread_pause = False
self.readframe=None
def get_frame(self):
return self.readframe
def run(self) -> None:
# While the thread is active.
while self.__thread_active:
if not self.__thread_pause:
# Grabs, decodes and returns the next video frame.
frame = self.camera_unit.get_current_image()
#=camera_unit.get_current_image gives image as numpy array and
#camera_unit is fetching image from link actively at the back end.
ret = frame is not None
if ret:
self.readframe=frame
# Get the frame height, width and channels.
height, width, channels = frame.shape
# Calculate the number of bytes per line.
bytes_per_line = width * channels
# If frame is read correctly.
# Convert image from BGR (cv2 default color format) to RGB (Qt default color format).
cv_rgb_image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# Convert the image to Qt format.
qt_rgb_image = QImage(cv_rgb_image.data, width, height, bytes_per_line, QImage.Format_RGB888)
# Scale the image.
qt_rgb_image_scaled = qt_rgb_image.scaled(1280, 720, Qt.KeepAspectRatio) # 720p
self.ImageUpdated.emit(qt_rgb_image_scaled)
# When everything done, release the video capture object.
# cap.release()
# Tells the thread's event loop to exit with return code 0 (success).
self.quit()
I have modified the code in https://github.com/god233012yamil/Streaming-IP-Cameras-Using-PyQt-and-OpenCV/blob/main/Streaming_IP_Camera_Using_PyQt_OpenCV.py#L150

Related

Display webcam feed in Qt window

I'm a bit stuck here. I would like to display a webcam live feed in a PyQt5 window.
When i push the button the feed has to start, button turns green and text changes to "Stop camera" , on the next click the feed has to stop and the button has to revert to its original status and the feed is replaced with an image.
At the moment i only get a still image.
As soon as i get this working i would like to add some threading
Here is the code (updated):
import os
import threading
import timeit
import cv2
from PyQt5 import QtGui
from PyQt5 import QtWidgets
from PyQt5.QtCore import Qt, QThread, pyqtSignal
from PyQt5.QtGui import *
from PyQt5.uic import loadUi
class Worker1(QThread):
ImageUpdate = pyqtSignal(QImage)
def run(self):
self.ThreadActive = True
self.Capture = cv2.VideoCapture(0)
while self.ThreadActive:
ret, frame = self.Capture.read()
if ret:
Image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
FlippedImage = cv2.flip(Image, 1)
ConvertToQtFormat = QImage(FlippedImage.data, FlippedImage.shape[1], FlippedImage.shape[0], QImage.Format_RGB888)
Pic = ConvertToQtFormat.scaled(640, 480, Qt.KeepAspectRatio)
self.ImageUpdate.emit(Pic)
self.Capture.release()
cv2.destroyAllWindows()
def stop(self):
self.ThreadActive = False
self.terminate()
class FaceIdWindow(QtWidgets.QMainWindow):
def __init__(self):
super(FaceIdWindow, self).__init__()
self.ui = loadUi("uidesign/facereco/FaceId.ui", self)
self.ui.cmdChoosePicture.clicked.connect(self.ChoosePicture)
self.ui.cmdStartCamera.clicked.connect(self.StartCamera)
self.ui.cmdTrainFace.clicked.connect(self.TrainFace)
self.ui.cmdProcess.clicked.connect(self.Process)
self.status_camera = "STOPPED"
def StartCamera(self):
start = timeit.default_timer()
start = timeit.default_timer()
print("Start StartCamera\n")
print(self.status_camera)
if self.status_camera == "STOPPED":
self.status_camera = "STARTED"
self.ui.cmdStartCamera.setStyleSheet("background-color: green")
self.ui.cmdStartCamera.setText("Stop camera")
self.Worker1 = Worker1()
self.Worker1.start()
self.Worker1.ImageUpdate.connect(self.ImageUpdateSlot)
else:
self.status_camera = "STOPPED"
self.Worker1.stop()
image_path = str(os.getcwd())
image_path = image_path + "/assets/clipart/clipartfaceid2.png"
self.lblPicture.setPixmap(QtGui.QPixmap(image_path))
self.ui.cmdStartCamera.setStyleSheet("background-color: ")
self.ui.cmdStartCamera.setText("Start camera")
print("Stop StartCamera\n")
end = timeit.default_timer()
print("Process Time: ", (end - start))
def ImageUpdateSlot(self, Image):
self.lblPicture.setPixmap(QPixmap.fromImage(Image))
Any suggestions ?
Cheers , John
Seems i wasn't starting and stopping my thread in the right place.Code is updated , but if anyone has any improvements don't hesitate! Cheers John

PyQt widget keeps increasing in size and goes out the window

I have written an application in PyQt5. I am basically displaying a camera feed (in this case my web cam), but the problem is that the frame size keeps on increasing at run time and ultimately goes out of my laptop screen. I'm unable to figure out what the problem is.
Can anyone please explain what I'm doing wrong here?
Below is the code snippet.
from PyQt5 import QtCore, QtGui, QtWidgets
from threading import Thread
from collections import deque
from datetime import datetime
import time
import sys
import cv2
import imutils
class CameraWidget(QtWidgets.QWidget):
"""Independent camera feed
Uses threading to grab IP camera frames in the background
#param width - Width of the video frame
#param height - Height of the video frame
#param stream_link - IP/RTSP/Webcam link
#param aspect_ratio - Whether to maintain frame aspect ratio or force into fraame
"""
def __init__(self, width=0, height=0, aspect_ratio=False, parent=None, deque_size=1):
super(CameraWidget, self).__init__(parent)
# Initialize deque used to store frames read from the stream
self.deque = deque(maxlen=deque_size)
self.maintain_aspect_ratio = aspect_ratio
self.camera_stream_link = 0
# Flag to check if camera is valid/working
self.online = False
self.capture = None
self.video_frame = QtWidgets.QLabel()
self.load_network_stream()
# Start background frame grabbing
self.get_frame_thread = Thread(target=self.get_frame, args=())
self.get_frame_thread.daemon = True
self.get_frame_thread.start()
# Periodically set video frame to display
self.timer = QtCore.QTimer()
self.timer.timeout.connect(self.set_frame)
self.timer.start(.5)
print('Started camera: {}'.format(self.camera_stream_link))
def load_network_stream(self):
"""Verifies stream link and open new stream if valid"""
def load_network_stream_thread():
if self.verify_network_stream(self.camera_stream_link):
self.capture = cv2.VideoCapture(self.camera_stream_link)
self.online = True
self.load_stream_thread = Thread(target=load_network_stream_thread, args=())
self.load_stream_thread.daemon = True
self.load_stream_thread.start()
def verify_network_stream(self, link):
"""Attempts to receive a frame from given link"""
cap = cv2.VideoCapture(link)
if not cap.isOpened():
return False
cap.release()
return True
def get_frame(self):
# time.sleep(5)
"""Reads frame, resizes, and converts image to pixmap"""
while True:
try:
if self.capture.isOpened() and self.online:
# Read next frame from stream and insert into deque
status, frame = self.capture.read()
if status:
self.deque.append(frame)
else:
self.capture.release()
self.online = False
else:
# Attempt to reconnect
print('attempting to reconnect', self.camera_stream_link)
self.load_network_stream()
self.spin(2)
self.spin(.001)
except AttributeError:
pass
def spin(self, seconds):
"""Pause for set amount of seconds, replaces time.sleep so program doesnt stall"""
time_end = time.time() + seconds
while time.time() < time_end:
QtWidgets.QApplication.processEvents()
def set_frame(self):
"""Sets pixmap image to video frame"""
if not self.online:
self.spin(1)
return
if self.deque and self.online:
# Grab latest frame
frame = self.deque[-1]
# Keep frame aspect ratio
if self.maintain_aspect_ratio:
self.frame = imutils.resize(frame, width=self.screen_width)
# Force resize
else:
self.frame = cv2.resize(frame, (self.screen_width, self.screen_height))
self.frame = cv2.cvtColor(self.frame, cv2.COLOR_BGR2RGB)
h, w, ch = self.frame.shape
bytesPerLine = ch * w
# Convert to pixmap and set to video frame
self.img = QtGui.QImage(self.frame, w, h, bytesPerLine, QtGui.QImage.Format_RGB888)
self.pix = QtGui.QPixmap.fromImage(self.img)
self.video_frame.setPixmap(self.pix)
def set_frame_params(self, width, height):
self.screen_width = width
self.screen_height = height
def get_video_frame(self):
self.video_frame.setScaledContents(True)
return self.video_frame
class MainWindow(QtWidgets.QMainWindow):
def __init__(self, parent=None):
super(MainWindow, self).__init__(parent)
# Middle frame
self.mid_frame = QtWidgets.QFrame()
self.mid_frame.setStyleSheet("background-color: rgb(153, 187, 255)")
self.camera = CameraWidget()
# Create camera widgets
print('Creating Camera Widgets...')
self.video_frame = self.camera.get_video_frame()
self.mid_layout = QtWidgets.QHBoxLayout()
self.mid_layout.addWidget(self.video_frame)
self.mid_frame.setLayout(self.mid_layout)
self.widget = QtWidgets.QWidget()
self.layout = QtWidgets.QVBoxLayout()
self.layout.addWidget(self.mid_frame)
self.layout.setContentsMargins(0,0,0,0)
self.layout.setSpacing(0)
self.widget.setLayout(self.layout)
self.setCentralWidget(self.widget)
def event(self, e):
if e.type() in (QtCore.QEvent.Show, QtCore.QEvent.Resize):
print("resize ", self.mid_frame.width(), self.mid_frame.height())
self.camera.set_frame_params(self.mid_frame.width()-10, self.mid_frame.height()-10)
return QtWidgets.QMainWindow.event(self, e)
if __name__ == '__main__':
# Create main application window
app = QtWidgets.QApplication([])
app.setStyle(QtWidgets.QStyleFactory.create("Cleanlooks"))
w = MainWindow()
w.showMaximized()
sys.exit(app.exec_())

QSplitter, QWidget resizing, setSizes(), setStretchFactor(), and sizeHint() - how to make it all work together?

I'm struggling with working out how to make all the stuff in the title work together in a certain situation. I'm using PyQt5 here, but feel free to respond with regular C++ Qt as I can translate pretty easily.
I'm attempting to make a UI with the following:
A main form (inherits from QWidget, could just as well use QMainWindow)
The main form should contain a QSplitter oriented vertically containing a QTextEdit at the top and containing a custom class (inheriting from QLabel) to show an image taking up the rest of the space.
The QTextEdit at the top should default to about 3 lines of text high, but this should be resizable to any reasonable extreme via the QSplitter.
The custom class should resize the image to be as big as possible given the available space while maintaining the aspect ratio.
Of course the tricky part is getting everything to resize correctly depending on how big a monitor the user has and how the move the form around. I need this to run on screens as small as about 1,000 px width and perhaps as big as 3,000+ px width.
Here is what I have so far:
# QSplitter3.py
import cv2
import numpy as np
from PyQt5.QtWidgets import QApplication, QWidget, QHBoxLayout, QVBoxLayout, QLabel, QGridLayout, QSizePolicy, \
QFrame, QTabWidget, QTextEdit, QSplitter
from PyQt5.QtGui import QImage, QPixmap, QPainter
from PyQt5.Qt import Qt
from PyQt5.Qt import QPoint
def main():
app = QApplication([])
screenSize = app.primaryScreen().size()
print('screenSize = ' + str(screenSize.width()) + ', ' + str(screenSize.height()))
mainForm = MainForm(screenSize)
mainForm.show()
app.exec()
# end function
class MainForm(QWidget):
def __init__(self, screenSize):
super().__init__()
# set the title and size of the Qt QWidget window
self.setWindowTitle('Qt Window')
self.setGeometry(screenSize.width() * 0.2, screenSize.height() * 0.2,
screenSize.width() * 0.5 , screenSize.height() * 0.7)
# declare a QTextEdit to show user messages at the top, set the font size, height, and read only property
self.txtUserMessages = QTextEdit()
self.setFontSize(self.txtUserMessages, 14)
self.txtUserMessages.setReadOnly(True)
# make the min height of the text box about 2 lines of text high
self.txtUserMessages.setMinimumHeight(self.getTextEditHeightForNLines(self.txtUserMessages, 2))
# populate the user messages text box with some example text
self.txtUserMessages.append('message 1')
self.txtUserMessages.append('message 2')
self.txtUserMessages.append('message 3')
self.txtUserMessages.append('stuff here')
self.txtUserMessages.append('bla bla bla')
self.txtUserMessages.append('asdasdsadds')
# instantiate the custom ImageWidget class below to show the image
self.imageWidget = ImageWidget()
self.imageWidget.setMargin(0)
self.imageWidget.setContentsMargins(0, 0, 0, 0)
self.imageWidget.setScaledContents(True)
self.imageWidget.setSizePolicy(QSizePolicy.Ignored, QSizePolicy.Ignored)
self.imageWidget.setAlignment(Qt.AlignCenter)
# declare the splitter, then add the user message text box and tab widget
self.splitter = QSplitter(Qt.Vertical)
self.splitter.addWidget(self.txtUserMessages)
self.splitter.addWidget(self.imageWidget)
defaultTextEditHeight = self.getTextEditHeightForNLines(self.txtUserMessages, 3)
print('defaultTextEditHeight = ' + str(defaultTextEditHeight))
# How can I use defaultTextEditHeight height here, but still allow resizing ??
# I really don't like this line, the 1000 is a guess and check that may only work with one screen size !!!
self.splitter.setSizes([defaultTextEditHeight, 1000])
# Should setStretchFactor be used here ?? This does not seem to work
# self.splitter.setStretchFactor(0, 0)
# self.splitter.setStretchFactor(1, 1)
# What about sizeHint() ?? Should that be used here, and if so, how ??
# set the main form's layout to the QGridLayout
self.gridLayout = QGridLayout()
self.gridLayout.addWidget(self.splitter)
self.setLayout(self.gridLayout)
# open the two images in OpenCV format
self.openCvImage = cv2.imread('image.jpg')
if self.openCvImage is None:
print('error opening image')
return
# end if
# convert the OpenCV image to QImage
self.qtImage = openCvImageToQImage(self.openCvImage)
# show the QImage on the ImageWidget
self.imageWidget.setPixmap(QPixmap.fromImage(self.qtImage))
# end function
def setFontSize(self, widget, fontSize):
font = widget.font()
font.setPointSize(fontSize)
widget.setFont(font)
# end function
def getTextEditHeightForNLines(self, textEdit, numLines):
fontMetrics = textEdit.fontMetrics()
rowHeight = fontMetrics.lineSpacing()
rowHeight = rowHeight * 1.21
textEditHeight = int(numLines * rowHeight)
return textEditHeight
# end function
# end class
def openCvImageToQImage(openCvImage):
# get the height, width, and num channels of the OpenCV image, then compute the byte value
height, width, numChannels = openCvImage.shape
byteValue = numChannels * width
# make the QImage from the OpenCV image
qtImage = QImage(openCvImage.data, width, height, byteValue, QImage.Format_RGB888).rgbSwapped()
return qtImage
# end function
class ImageWidget(QLabel):
def __init__(self):
super(QLabel, self).__init__()
# end function
def setPixmap(self, pixmap):
self.pixmap = pixmap
# end function
def paintEvent(self, event):
size = self.size()
painter = QPainter(self)
point = QPoint(0, 0)
scaledPixmap = self.pixmap.scaled(size, Qt.KeepAspectRatio, transformMode=Qt.SmoothTransformation)
point.setX((size.width() - scaledPixmap.width()) / 2)
point.setY((size.height() - scaledPixmap.height()) / 2)
painter.drawPixmap(point, scaledPixmap)
# end function
# end class
if __name__ == '__main__':
main()
Currently I'm testing on a 2560x1440 screen and with the magic 1000 entered it works on this screen size, but I really don't like the hard-coded 1000. I suspect the area of the code where I'm missing something is this part:
# declare the splitter, then add the user message text box and tab widget
self.splitter = QSplitter(Qt.Vertical)
self.splitter.addWidget(self.txtUserMessages)
self.splitter.addWidget(self.imageWidget)
defaultTextEditHeight = self.getTextEditHeightForNLines(self.txtUserMessages, 3)
print('defaultTextEditHeight = ' + str(defaultTextEditHeight))
# How can I use defaultTextEditHeight height here, but still allow resizing ??
# I really don't like this line, the 1000 is a guess and check that may only work with one screen size !!!
self.splitter.setSizes([defaultTextEditHeight, 1000])
# Should setStretchFactor be used here ?? This does not seem to work
# self.splitter.setStretchFactor(0, 0)
# self.splitter.setStretchFactor(1, 1)
# What about sizeHint() ?? Should that be used here, and if so, how ??
# set the main form's layout to the QGridLayout
self.gridLayout = QGridLayout()
self.gridLayout.addWidget(self.splitter)
With the hard coded 1000 and on this particular screen it works pretty well:
To reiterate (hopefully more clearly) I'm attempting to be able to remove the hard-coded 1000 and command Qt as follows:
Initially make the form take up about 2/3 of the screen
Initially make the text box about 3 lines of text high (min of 2 lines of text high)
Allow the user to use the QSplitter to resize the text box and image at any time and without limit
When the form is resized (or minimized or maximized), resize the text box and image proportionally per how the user had them at the time of the resize
I've tried about every combination of the stuff mentioned in the title and so far in this post but I've not been able to get this functionality, except with the hard-coded 1000 that probably won't work with a different screen size.
How can I remove the hard-coded 1000 and modify the above to achieve the intended functionality?
In my solution I will not take into account the part of opencv since it adds unnecessary complexity.
The solution is to use the setStretchFactor() method, in this case override the sizeHint() method of the QTextEdit to set the initial size and setMinimumHeight() for the minimum height. To show the image I use a QGraphicsView instead of the QLabel since the logic is easier.
from PyQt5 import QtCore, QtGui, QtWidgets
class TextEdit(QtWidgets.QTextEdit):
def __init__(self, parent=None):
super().__init__(parent)
self.setReadOnly(True)
font = self.font()
font.setPointSize(14)
self.setFont(font)
self.setMinimumHeight(self.heightForLines(2))
def heightForLines(self, n):
return (
n * self.fontMetrics().lineSpacing() + 2 * self.document().documentMargin()
)
def showEvent(self, event):
self.verticalScrollBar().setValue(self.verticalScrollBar().minimum())
def sizeHint(self):
s = super().sizeHint()
s.setHeight(self.heightForLines(3))
return s
class GraphicsView(QtWidgets.QGraphicsView):
def __init__(self, parent=None):
super().__init__(parent)
self.setFrameShape(QtWidgets.QFrame.NoFrame)
self.setBackgroundBrush(self.palette().brush(QtGui.QPalette.Window))
scene = QtWidgets.QGraphicsScene(self)
self.setScene(scene)
self._pixmap_item = QtWidgets.QGraphicsPixmapItem()
scene.addItem(self._pixmap_item)
def setPixmap(self, pixmap):
self._pixmap_item.setPixmap(pixmap)
def resizeEvent(self, event):
self.fitInView(self._pixmap_item, QtCore.Qt.KeepAspectRatio)
self.centerOn(self._pixmap_item)
super().resizeEvent(event)
class Widget(QtWidgets.QWidget):
def __init__(self, parent=None):
super().__init__(parent)
self.textedit = TextEdit()
for i in range(10):
self.textedit.append("Message {}".format(i))
self.graphicsview = GraphicsView()
self.graphicsview.setPixmap(QtGui.QPixmap("image.jpg"))
splitter = QtWidgets.QSplitter(QtCore.Qt.Vertical)
splitter.addWidget(self.textedit)
splitter.addWidget(self.graphicsview)
splitter.setStretchFactor(1, 1)
lay = QtWidgets.QGridLayout(self)
lay.addWidget(splitter)
screenSize = QtWidgets.QApplication.primaryScreen().size()
self.setGeometry(
screenSize.width() * 0.2,
screenSize.height() * 0.2,
screenSize.width() * 0.5,
screenSize.height() * 0.7,
)
def main():
app = QtWidgets.QApplication([])
w = Widget()
w.resize(640, 480)
w.show()
app.exec_()
if __name__ == "__main__":
main()

PyQt 5: QPainter returns false while rendering QGraphicsScene to a QImage

Currently I am working on a program, to display SIP-Trace log files. It is written in Python 3.7 using the PyQt 5(.11.3) module to load and operate a GUI made in QDesigner. As a main feature it parses the SIP-Trace file and displays it as a sequence diagram to a QGraphicsScene with QGraphicsObjects.
My problem lies in the following: For later reference, the content of the QGraphicsScene should be saved as an image file, like .jpg or .png. In the Qt/PyQt documentation I found the useful sounding command QGraphicsScene.render() which renders the content of the GraphicsScene to a saveable file like QImage using QPainter. In the last days, I tried a couple of ways/sample codes found here and elsewhere, but cannot render the GraphicsScene to the QImage much less to an image file. Since I am rather new to Python and Qt, I think I am missing some basic setting somewhere. Following is a minimal version of my code.
# -*- coding: utf8 -*-
"""Class for getting a sequence diagram of a sip traffic"""
from PyQt5.QtWidgets import *
from PyQt5 import uic
from PyQt5.QtGui import *
from PyQt5.QtCore import *
import sys
class VoipGui(QMainWindow):
""" Class that handles the interaction with the UI """
def __init__(self, parent=None):
super().__init__(parent)
self.ui = uic.loadUi("main_window.ui", self)
self.showMaximized()
self.sequence_scene = QGraphicsScene()
self.ui.graphicsView.setScene(self.sequence_scene)
# self.sequence_scene.setSceneRect(0, 0, 990, 2048)
# sets the spacing between nodes
# For more than three nodes columns should be generated in a more automatic way
self.left_column = 51
self.middle_column = 381
self.right_column = 711
self.flow_height = 60 # Sets the spacing between the arrows in the flowchart
# --------------------------------- /class init and var set -------------------------------------------
self.actionOpenFile.triggered.connect(self.on_open_file)
self.actionCloseFile.triggered.connect(self.on_close_file)
self.actionCloseProgram.triggered.connect(self.close)
self.actionSaveFile.triggered.connect(self.save_seq_image)
# --------------------------------- /connecting slots and signals ----------------------------
def on_open_file(self):
"""Dummy version of the open file dialog"""
self.draw_node(self.left_column, 5, "192.168.2.1", 10)
self.draw_node(self.middle_column, 5, "192.168.2.22", 10)
def on_close_file(self):
self.ui.textBrowser.clear()
self.sequence_scene.clear()
def save_seq_image(self):
""" Here lies the problem: Save the rendered sequence scene to file for later use"""
rect_f = self.sequence_scene.sceneRect()
# rect = self.sequence_scene.sceneRect().toRect()
# img = QPixmap(rect.size())
img = QImage()
p = QPainter()
# p.setPen(QColor(255, 255, 255))
# p.setViewport(rect)
painting = p.begin(img)
self.sequence_scene.render(p, target=QRectF(img.rect()), source=rect_f)
p.end()
if painting:
print("Painter init pass")
elif not painting:
print("Painter init fail")
saving = img.save("save.jpg")
if saving:
print("Saving Pass")
elif not saving:
print("Saving Not Pass")
def draw_node(self, x_pos, y_pos, ip_address, y_stops):
"""Participating devices are displayed as these nodes"""
width = 100.0
height = 40.0
pc_box = QGraphicsRectItem(x_pos - 50, y_pos, width, height)
self.sequence_scene.addItem(pc_box)
pc_ip = QGraphicsTextItem("%s" % ip_address)
pc_ip.setPos(x_pos - 50, y_pos)
self.sequence_scene.addItem(pc_ip)
node_line = QGraphicsLineItem(x_pos, y_pos + 40, x_pos, y_pos + (y_stops * self.flow_height))
self.sequence_scene.addItem(node_line)
def show_window():
app = QApplication(sys.argv)
dialog = VoipGui()
dialog.show()
sys.exit(app.exec_())
if __name__ == "__main__":
show_window()
The problem is simple, in render() you are indicating that the size of the target is equal to that of QImage, and how size is QImage?, how are you using QImage() the size is QSize(0, 0) so it can not be generated the image, the solution is to create a QImage with a size:
def save_seq_image(self):
""" Here lies the problem: Save the rendered sequence scene to file for later use"""
rect_f = self.sequence_scene.sceneRect()
img = QImage(QSize(640, 480), QImage.Format_RGB888)
img.fill(Qt.white)
p = QPainter(img)
self.sequence_scene.render(p, target=QRectF(img.rect()), source=rect_f)
p.end()
saving = img.save("save.jpg")
print("Saving Pass" if saving else "Saving Not Pass")
Output:

Repeatedly displaying random-noise images using NumPy and PyQt4

I'm having trouble using a QTimer to repeatedly
generate a height-by-width-by-3 numpy array
convert the numpy array to a Qt-friendly image, and
display the image in the main Qt window
(Eventually the images won't be random.)
Here is the relevant code.
import numpy as np
from scipy.misc.pilutil import toimage
from PIL.ImageQt import ImageQt
def nparrayToQPixmap(arrayImage):
pilImage = toimage(arrayImage)
qtImage = ImageQt(pilImage)
qImage = QtGui.QImage(qtImage)
qPixmap = QtGui.QPixmap(qImage)
return qPixmap
class DetectionWidget(QtGui.QWidget):
def __init__(self):
super(DetectionWidget, self).__init__()
self.timer = QtCore.QTimer()
self.init_UI()
def init_UI(self):
self.setFixedSize(self.WIDTH, self.HEIGHT)
self.label = QtGui.QLabel(self)
self.label.resize(self.WIDTH, self.HEIGHT)
self.timer.timeout.connect(self.onTimeout)
self.timer.start(1000)
def onTimeout(self):
npImage = np.random.rand(self.HEIGHT, self.WIDTH, 3)
qPixmap = nparrayToQPixmap(npImage)
self.label.setPixmap(qPixmap)
This displays the FIRST image, but Python segmentation faults on the second iteration at self.label.setPixmap(qPixmap). Also, it segmentation faults even if I DON'T update the label but instead save the image using qPixmap.save(...), which makes me think that the resulting qPixmap is somehow corrupt after the first iteration.
I will appreciate any help!
This seems to be because of a bug in the QImage to QPixmap conversion. The code works as long as the QImage is in the right format..
qImage = QtGui.QImage(qtImage)
becomes
qImage = QtGui.QImage(qtImage).convertToFormat(QtGui.QImage.Format_ARGB32)

Categories

Resources