I'm working with tif stacks and QImage appears to be skewing some images to a 45 degree angle. Matplotlib is able to display the images without a problem in both test cases (links to two tif stacks are provided below) so I don't think I've screwed up my array somewhere.
Here's a working example: (NOTE: this example only shows the first image in the tif stack for simplicity)
import matplotlib.pyplot as plt
import sys
from PIL import Image
from PyQt5.QtGui import QPixmap, QImage
from PyQt5.QtWidgets import (QMainWindow, QApplication, QVBoxLayout,
QWidget, QFileDialog, QGraphicsPixmapItem, QGraphicsView,
QGraphicsScene)
import numpy as np
class Example(QMainWindow):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
# set up a widget to hold a pixmap
wid = QWidget(self)
self.setCentralWidget(wid)
self.local_grview = QGraphicsView()
self.local_scene = QGraphicsScene()
vbox = QVBoxLayout()
self.local_grview.setScene( self.local_scene )
vbox.addWidget(self.local_grview)
wid.setLayout(vbox)
# load and display the image
self.loadImage()
# display the widget
self.show()
# also use matplotlib to display the data as it should appear
plt.imshow(self.dataUint8[0], cmap='gray')
plt.show()
def loadImage(self):
fname = QFileDialog.getOpenFileName(self, 'Open file', '/home')[0]
# use the tif reader to read in the tif stack
self.data = self.readTif(fname)
# convert to uint8 for display
self.dataUint8 = self.uint8Convert(self.data)
###############################################################################################################################
# I suspect this is where something goes wrong
###############################################################################################################################
# create a QImage object
self.im = QImage(self.dataUint8[0], self.dataUint8[0].shape[1], self.dataUint8[0].shape[0], QImage.Format_Grayscale8)
# if we save using self.im.save() we also have a skewed image
###############################################################################################################################
# send the QImage object to the pixmap generator
self.pixmap = QPixmap(self.im)
self.pixMapItem = QGraphicsPixmapItem(self.pixmap, None)
self.local_scene.addItem(self.pixMapItem)
def readTif(self, filename): # use this function to read in a tif stack and return a 3D numpy array
# read in the file
stack = Image.open(filename)
# extract each frame from the file and store in the frames variable
frames = []
i = 0
while True:
try:
stack.seek(i) # move to the ith position in the stack
frames.append(np.array(stack) )
i += 1
except EOFError:
# end of stack
break
del stack # probably unnecessary but this presumably saves a bit of memory
return frames
def uint8Convert(self, frames): # use this function to scale a 3D numpy array of floats to 0-255 so it plays well with Qt methods
# convert float array to uint8 array
if np.min(frames)<0:
frames_uint8 = [np.uint8((np.array(frames[i]) - np.min(frames[i]))/np.max(frames[i])*255) for i in range(np.shape(frames)[0])]
else:
frames_uint8 = [np.uint8(np.array(frames[i])/np.max(frames[i])*255) for i in range(np.shape(frames)[0])]
return frames_uint8
if __name__=='__main__':
app = QApplication(sys.argv)
ex = Example()
sys.exit(app.exec_())
Here's a screenshot of the output:
Qimage vs matplotlib
Here's a link to a tif stack that displays properly:
https://drive.google.com/uc?export=download&id=0B9EG5AHWC9qzX3NrNTJRb2toV2c
And here's a link to a tif stack that becomes skewed when displayed:
https://drive.google.com/uc?export=download&id=0B9EG5AHWC9qzbFB4TDU4c2x1OE0
Any help understanding why QImage is skewing this image would be much appreciated. The only major difference between the two tif stacks is that the one that displays skewed has a padded black area (zeros) around the image which makes the array larger.
UPDATE: I've now discovered that if I crop the offending image to 1024x1024 or 512x512 or 1023x1024 QImage displays properly but cropping by 1024x1023 displays skewed. So it appears that the x (horizontal) length must be a power of 2 in order for QImage to handle it as expected. That's a ridiculous limitation! There must be something I'm not understanding. Surely there's a way for it to handle arbitrarily shaped arrays.
...I suppose, in principle, one could first apply a skew to the image and just let QImage deskew it back... (<== not a fan of this solution)
Many thanks to bnaecker for the 32 bit aligned hint and providing the link to the source. Here is the solution.
QImage needs to know how many bytes per line the array is, otherwise it will just guess (and it guesses wrong in some cases). Thus, using the following in the loadImage() function produces the correct output.
# get the shape of the array
nframes, height, width = np.shape(self.dataUint8)
# calculate the total number of bytes in the frame
totalBytes = self.dataUint8[0].nbytes
# divide by the number of rows
bytesPerLine = int(totalBytes/height)
# create a QImage object
self.im = QImage(self.dataUint8[0], width, height, bytesPerLine, QImage.Format_Grayscale8)
The rest of the code is the same.
The image is not being skewed, the underlying data is being interpreted incorrectly.
In the constructor you're using, the data buffer is flat, and you must also specify a row and column size in pixels. You've somehow specified the rows as being too long, so that the beginning of the next row is wrapped onto the end of the current one. This is why you get "striping" of the image, and why there's a progressively larger amount wrapping as you get to later rows. This also explains why it works when you use the QImage(fname) version of the constructor. That constructor uses the Qt library code to read the image data, which doesn't have the problem your own code does.
There are several places the data might be read incorrectly. I don't know details of the PIL package, but the np.array(stack) line looks like a plausible candidate. I don't know how the stack object exposes a buffer interface, but it may be doing it differently than you think, e.g., the data is column- rather than row-major. Also note that the QImage constructor you use expects data to be 32-bit aligned, even for 8-bit data. That might be a problem.
Another plausible candidate is the uint8Convert method, which might be inadvertently transposing the data or otherwise rolling it forwards/backwards. This might be why the square sizes work, but rectangular don't.
Related
The answer in the post How to show tooltip image when hover on button pyqt5 shows that one can display a saved image on QToolTip. Is there way to achieve an equivalent result on an image represented by an numpy nd array without saving?
More precisely, if panda.jpg is a saved image right under C drive, then the following code modifed from the above reference link runs:
import sys
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
class MainWindow(QMainWindow):
def __init__(self):
QMainWindow.__init__(self)
pybutton = QPushButton('Display Image via Tool Tip', self)
pybutton.setToolTip(r'<img src="C:\panda.jpg">')
if __name__ == "__main__":
app = QApplication(sys.argv)
mainWin = MainWindow()
mainWin.show()
sys.exit( app.exec_() )
The code gives:
Consider now instead:
import sys
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
import numpy as np
class MainWindow(QMainWindow):
def __init__(self):
QMainWindow.__init__(self)
pybutton = QPushButton('Display Image via Tool Tip', self)
imarray = np.random.rand(1000,1000,3) * 255
#pybutton.setToolTip(imarray) #This line is not working
if __name__ == "__main__":
app = QApplication(sys.argv)
mainWin = MainWindow()
mainWin.show()
sys.exit( app.exec_() )
Is there a way to attain an equivalent result without any saving? Converting to QImage does not seem to help.
This question is motivated because:
I have lots of array to display via a lot of tooltips and none of them will be used at all after they are displayed on the tooltip.
I have one tooltip where I want to display a video, which I will be able to do as soon as I know how to display one image using arrays without any saving because all I will need then is the QTime and update of the array.
While Qt support for HTML is limited to a smaller subset of HTML4, that support is quite compliant and consistent, including the Base64 encoding for embedded image data.
The solution is then to save the image data as an image file in a memory buffer, convert its contents to the base 64 encoding and use that for the img tag.
Be aware: base64 is a six-bit encoding, and any unencoded data cannot be divided with that data size will cause padding of the bytes. This obviously means that the memory footprint of the stored data will always be equal (rarely) or bigger (most likely) than the original.
In the following example I'm showing the random image as a QPixmap set for a QLabel, and the tooltip of that image can be visible by hovering it.
I also added a basic text viewer to show the actual contents of the "raw data" in order to realize the possible size (and memory requirement) for each possible image tooltip. Note that Qt will obviously use memory for both the base64 data and the cached image.
If you are not interested in high quality of the tooltip image, you can obviously use the 'JPG' format: quality results will vary, but you will certainly get a smaller memory requirement.
Remember that the above is quite important: the contents of a QToolTip are evaluated dynamically at runtime, and since every time a different tool tip content is going to be shown, the whole contents of the next tool tip will be evaluated, this will add a considerable overhead: Qt will check if the tool tip text possibly contains rich text, then create a new QTextDocument, parse the HTML and convert it to its own layout, compute all required sizes, and finally update the new QToolTip with the updated and laid out contents. Use this with extreme awareness, especially for high resolution images, for which you should really consider resizing before setting the tool tip contents.
class MainWindow(QMainWindow):
def __init__(self):
super().__init__()
imarray = np.random.rand(256, 256, 3) * 255
image = QImage(imarray, imarray.shape[1], imarray.shape[0],
QImage.Format_RGB888)
central = QWidget()
layout = QVBoxLayout(central)
self.setCentralWidget(central)
label = QLabel()
layout.addWidget(label, alignment=Qt.AlignCenter)
label.setPixmap(QPixmap.fromImage(image))
monospace = QFont()
monospace.setFamily('monospace')
dataView = QPlainTextEdit(readOnly=True, font=monospace)
dataView.setWordWrapMode(QTextOption.WrapAnywhere)
layout.addWidget(dataView)
bytearray = QByteArray()
buffer = QBuffer(bytearray)
image.save(buffer, 'PNG')
base64data = bytes(bytearray.toBase64()).decode()
dataView.appendHtml('''
Raw image data size: {}<br/>
Saved image data size: {}<br/>
Base64 data size: {}<br/>
Ratio: {}<br/><br/>
Base64 contents:<br/><br/>
{}
'''.format(
imarray.size,
len(bytearray),
len(base64data),
len(base64data) / imarray.size,
base64data
))
dataView.moveCursor(QTextCursor.Start)
imageData = '''
<img src="data:image/png;base64,{}" width=128 height=128>
'''.format(base64data)
label.setToolTip('This is a tooltip.<br/>' + imageData)
def sizeHint(self):
return QApplication.primaryScreen().size()
I want to get a list of image thumbnails, When QListWidget loads a large number of image, it will become very slow.More than 200 pictures will take 5s to load.
One-time loading seems to be a stupid way:
from PyQt5 import QtWidgets
from PyQt5 import QtGui
from PyQt5 import QtCore
from PyQt5 import Qt
import os
class MyQListWidgetItem(QtWidgets.QListWidgetItem):
'''icon item'''
def __init__(self, path, parent=None):
self.icon = QtGui.QIcon(path)
super(MyQListWidgetItem, self).__init__(self.icon, '', parent)
class MyQListWidget(QtWidgets.QListWidget):
def __init__(self):
super(MyQListWidget, self).__init__()
path = './imgpath'
self.setFlow(QtWidgets.QListView.LeftToRight)
self.setIconSize(QtCore.QSize(180, 160))
self.setResizeMode(Qt.QListWidget.Adjust)
#add icon
for fp in os.listdir(path):
self.addItem(MyQListWidgetItem(os.path.join(path, fp), self))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
w = MyQListWidget()
w.show()
sys.exit(app.exec_())
There are various possible approaches for this.
The important aspect is to delay the loading of images until it's actually required.
In the following example I used two custom roles to simplify the process: PathRole contains the full path to the image, ImageRequestedRole is a "flag" that tells if the image has been already loaded (or queued for loading).
The priority obviously goes to the images that are currently visible in the viewport, and we need to ensure that whenever the visible area changes the images are loaded as soon as possible.
To achieve that, I connected the scroll bar valueChanged and rangeChanged signals (the latter is mostly required on startup) to a function that checks the range of visible indexes and verifies whether they contain a path and if they have not been loaded nor queued yet. This will also queue loading of images whenever the window is enlarged to a bigger size which would show items previously hidden.
Once the function above finds that some images require loading, they are queued, and a timer is started (if not already active): using a timer ensures that loading is progressive and doesn't block the whole UI until all requested images are processed.
Some important aspects:
images are not stored as their source (otherwise you'll easily end up all resources), but scaled down.
a "lazy loader" ensures that images that are not currently shown are lazily loaded as soon as the current queue is completed; note that if you plan to browse through huge amount of images that are also very big, this is not suggested.
since the images are not loaded instantly, the items don't have a correct size by default: setting the icon size is not sufficient, as that size not considered until the item actually has a "decoration"; to work around that, a delegate is used, which implements the sizeHint method and sets a decoration size even if the image is not yet loaded: this ensures that the view already reserves enough space for each item without continuously computing positions relative to every other item.
setting the "loaded" flag requires writing data on the model, which by default causes the view to compute again sizes; to avoid that, a temporary signal blocker is used, so that the model is updated without letting it know to the view.
for performance reasons, you cannot have different widths for each image depending on the image aspect ratio.
PathRole = QtCore.Qt.UserRole + 1
ImageRequestedRole = PathRole + 1
class ImageDelegate(QtWidgets.QStyledItemDelegate):
def initStyleOption(self, opt, index):
super().initStyleOption(opt, index)
if index.data(PathRole):
opt.features |= opt.HasDecoration
opt.decorationSize = QtCore.QSize(180, 160)
class MyQListWidget(QtWidgets.QListWidget):
def __init__(self):
super(MyQListWidget, self).__init__()
path = './imgpath'
self.setFlow(QtWidgets.QListView.LeftToRight)
self.setIconSize(QtCore.QSize(180, 160))
self.setResizeMode(Qt.QListWidget.Adjust)
for fp in os.listdir(path):
imagePath = os.path.join(path, fp)
item = QtWidgets.QListWidgetItem()
if os.path.isfile(imagePath):
item.setData(PathRole, imagePath)
self.addItem(item)
self.imageDelegate = ImageDelegate(self)
self.setItemDelegate(self.imageDelegate)
self.imageQueue = []
self.loadTimer = QtCore.QTimer(
interval=25, timeout=self.loadImage, singleShot=True)
self.lazyTimer = QtCore.QTimer(
interval=100, timeout=self.lazyLoadImage, singleShot=True)
self.lazyIndex = 0
self.horizontalScrollBar().valueChanged.connect(self.checkVisible)
self.horizontalScrollBar().rangeChanged.connect(self.checkVisible)
def checkVisible(self):
start = self.indexAt(QtCore.QPoint()).row()
end = self.indexAt(self.viewport().rect().bottomRight()).row()
if end < 0:
end = start
model = self.model()
for row in range(start, end + 1):
index = model.index(row, 0)
if not index.data(ImageRequestedRole) and index.data(PathRole):
with QtCore.QSignalBlocker(model):
model.setData(index, True, ImageRequestedRole)
self.imageQueue.append(index)
if self.imageQueue and not self.loadTimer.isActive():
self.loadTimer.start()
def requestImage(self, index):
with QtCore.QSignalBlocker(self.model()):
self.model().setData(index, True, ImageRequestedRole)
self.imageQueue.append(index)
if not self.loadTimer.isActive():
self.loadTimer.start()
def loadImage(self):
if not self.imageQueue:
return
index = self.imageQueue.pop()
image = QtGui.QPixmap(index.data(PathRole))
if not image.isNull():
self.model().setData(
index,
image.scaled(self.iconSize(), QtCore.Qt.KeepAspectRatio),
QtCore.Qt.DecorationRole
)
if self.imageQueue:
self.loadTimer.start()
else:
self.lazyTimer.start()
def lazyLoadImage(self):
self.lazyIndex += 1
if self.lazyIndex >= self.count():
return
index = self.model().index(self.lazyIndex, 0)
if not index.data(ImageRequestedRole) and index.data(PathRole):
with QtCore.QSignalBlocker(self.model()):
self.model().setData(index, True, ImageRequestedRole)
image = QtGui.QPixmap(index.data(PathRole))
if not image.isNull():
self.model().setData(
index,
image.scaled(self.iconSize(), QtCore.Qt.KeepAspectRatio),
QtCore.Qt.DecorationRole
)
else:
self.lazyLoadImage()
return
if not self.imageQueue:
self.lazyTimer.start()
Finally, consider that this is a very basic and simple implementation for learning purposes:
an image viewer should not store all images in memory (not even as thumbnails as in my example): consider that images are stored as raster ("bitmap"), so even a thumbnail could occupy much more memory than the original compressed image;
a cache could be used in a temporary path in case a maximum amount of thumbnail is reached;
image loading should happen in a separate thread, possibly displaying a placeholder until the process is complete;
appropriate checks should be done to ensure that a file is actually an image, and/or if the image is corrupted;
unless you plan to show something else beside the thumbnail (file name, stats, etc), you should probably consider implementing the paint function of the delegate, otherwise some margin will always be shown on the right of the image;
In a pyQt application, I'm copying matplotlib figures (self.canvas) to a QClipboard instance using either
cb = QClipboard(self)
img = QImage(self.canvas.grab())
cb.setImage(img)
or
img = QPixmap(self.canvas.grab())
self.cb.setPixmap(img)
Both work well, however, I haven't managed to control the resolution of the exported image. This would be possible by creating and exporting a temporary file, however, this is slow and has potential problems due to file system restrictions:
self.canvas.figure.savefig(self.temp_file, dpi = 300, type = 'png')
temp_img = QImage(self.temp_file)
cb.setImage(temp_img)
So, is there a way to set the resolution of the copied image without taking a detour through the file system?
-------------------------------------
Edit: I just found out that the above doesn't work under pyqt4. Instead, you can use
img = QPixmap.grabWidget(self.canvas)
self.cb.setPixmap(img)
-------------------------------------
Edit: Another solution that nearly works is the following piece of code, unfortunately it changes the colors (back to matplotlib defaults?):
# construct image from raw rgba data, this changes the colormap:
size = self.canvas.size()
width, height = size.width(), size.height()
im = QImage(self.canvas.buffer_rgba(), width, height, QImage.Format_ARGB32)
self.cb.setImage(im)
The following is an incomplete answer. So further answers, making this complete or alternative solutions are still welcome.
You may, instead of saving the image to disk, save it to a filebuffer and read it from there. However, reading a buffer directly into QImage may be hard. This was asked here, but the answer uses a workaround to save the file to disk first. This is undesired here.
Instead, you may read the figure in terms of a numpy array into the QImage.
This may involve the steps of
Saving the figure with the desired resolution to a buffer, then reading the buffer and creating a numpy array out of it. This is tackled in this question:
How can I render a Matplotlib Axes object to an image (as a Numpy array)?
Creating a QImage from a numpy array. This is tackled in this question: How to set an numpy array image on a QWidget using PyQt5 and also Convert 16-bit grayscale to QImage
Unfortunately I am currently unable to get the RGB array to show correctly, but the following would the solution to showing the figure as Greyscale image:
import io
import sys
from PyQt4 import QtGui
import matplotlib.pyplot as plt
import numpy as np
def get_image():
plt.plot([1,3,2])
buff = io.BytesIO()
plt.savefig(buff, format="png", dpi=100)
buff.seek(0)
img = plt.imread(buff)
return img
class App(QtGui.QWidget):
def __init__(self):
super(App, self).__init__()
self.setGeometry(300, 300, 250, 150)
self.setLayout(QtGui.QVBoxLayout())
label = QtGui.QLabel()
label2 = QtGui.QLabel("Some other text label")
img = get_image()
im = img.mean(axis=2)
im = ((im - im.min()) / (im.ptp() / 255.0)).astype(np.uint8)
print im.shape, im.max(), im.min()
temp_img = QtGui.QImage(im,im.shape[1],im.shape[0],im.shape[1], QtGui.QImage.Format_Indexed8)
pixmap = QtGui.QPixmap(temp_img)
label.setPixmap(pixmap)
self.layout().addWidget(label)
self.layout().addWidget(label2)
self.show()
if __name__ == '__main__':
app = QtGui.QApplication([])
ex = App()
sys.exit(app.exec_())
I have implemented pyqtgraph inside QGraphicsView in PyQt5. When I display the image the following way, it is stretched out and expands in the same aspect ratio as the screen. How do I fix this?
image = pg.ImageItem(asarray(Image.open('pic.png')) )
self.graphicsView.addItem(image)
image.rotate(270)
EDIT: found out how to rotate image, so I updated question with the solution. Now I am just trying to scale it properly.
You probably want something like:
import pyqtgraph as pg
from PIL import Image
from numpy import asarray
app = pg.mkQApp()
# Set up a window with ViewBox inside
gv = pg.GraphicsView()
vb = pg.ViewBox()
gv.setCentralItem(vb)
gv.show()
# configure view for images
vb.setAspectLocked()
vb.invertY()
# display image
img_data = asarray(Image.open('/home/luke/tmp/graph.png'))
image = pg.ImageItem(img_data, axisOrder='row-major')
vb.addItem(image)
The important pieces here that set the image scaling/orientation are:
using ImageItem(axisOrder='row-major') because image files are stored in row-major order
vb.invertY() because image files have the +y axis pointing downward
and vb.setAspectLocked() to keep the pixels square
I used np.rot90() instead, it's much faster and cythonable
image = pg.ImageItem(np.rot90(np.asarray(Image.open('pic.png'))))
I am programming a GUI application for Data visualization using Python and Qt via PySide.
I experience occasional crashes ('python.exe has stopped working') which I think I narrowed down to the following problem:
When creating a pixmap from a numpy array, somehow the memory is freed by python (?) even when the pixmap already exists. This does not happen if the image format used is QImage.Format_ARGB32. (Why not?). Check out the code example below, I hope you can reproduce the problem.
EDIT: To clarify - If the numpy array is not deleted by python, everything works just as expected. However, in my application, new data is generated constantly and I would have to find a good way to track which dataset is currently displayed as a pixmap, and delete it as soon as it is not displayed anymore. I would like to find the correct way for Qt to take care of the (image-) data and store it in memory until not required anymore.
As far as I understood the documentation of Qt and PySide, the pixmap should hold all the data of the image, thus Qt should be responsible for the memory management.
Is this a bug in Qt, Pyside, or did I not understand something? I could not find any details on the memory management in the regular documentation.
Background: I need to regularly update the data to display, thus it may happen that between creating the pixmap and displaying it, the numpy data array is already overwritten by python (as there are some CPU intensive threads involved that sometimes slow the GUI). Thus, storing the numpy array forever is not an option.
Here is a code example, the interesting bits happen in the display_image method:
import numpy as np
from PySide import QtCore, QtGui
import sys
class displaywidget(QtGui.QWidget):
def __init__(self,parent = None):
super(displaywidget, self).__init__(parent)
## set up the GUI elements
self.setLayout(QtGui.QGridLayout())
self.view = QtGui.QGraphicsView()
self.layout().addWidget(self.view)
self.scene = QtGui.QGraphicsScene()
self.view.setScene(self.scene)
# create a pixmap and display it on the graphicsview
self.display_image()
def display_image(self):
# create image data in numpy array
size = 1024
r = np.linspace(0,255, num = size**2, dtype = np.uint32)
argb = (r<<16) +(255<<24)
# image should display a black to red shading
image = QtGui.QImage(argb, size,size, size*4, QtGui.QImage.Format_RGB32)
### using ARGB format option does not cause the problem
# image = QtGui.QImage(argb, size,size, size*4, QtGui.QImage.Format_RGB32)
pixmap = QtGui.QPixmap.fromImage(image)
self.scene.addPixmap(pixmap)
### when the image data is stored, everything works fine, too
# self.cache = argb
### if only the pixmap and image is stored, the problem still exists
# self.cache = [pixmap, image]
def main(argv):
## create application and main window
try:
app = QtGui.QApplication(argv)
new_qtapp = True
except:
new_qtapp = False
mainwindow = QtGui.QMainWindow()
mainwindow.setCentralWidget(displaywidget())
mainwindow.show()
if new_qtapp:
sys.exit(app.exec_())
return mainwindow
if __name__=="__main__":
w = main(sys.argv)
I am using 32 bit Python 2.7.6 and PySide 1.2.2 on a generic Windows7 Office PC.
Thanks for your help!
This simple change keeps the image from being garbage collected when the function is done. Which seems to be what caused the problem
self.argb = (r<<16) +(255<<24)
# image should display a black to red shading
image = QtGui.QImage(self.argb, size,size, size*4, QtGui.QImage.Format_RGB32)