How to copy sections of pyQT QPixmaps without copying the whole pixmap? - python

I have an image that contains tiles. I want to make a QPixmap of each tile within the QPixmap. I thought QPixmap's copy(x,y,width,height) method would do this for me, but must be copying the entire image, not just the rectangle defined by the arguments, and thus consuming WAY too much memory.
An example below illustrates the problem. My png happens to be 3400x3078 pixels, dividing by the 57 rows, 50 columns, the minipixmaps should each be 68x54 pixels, but are obviously using much more memory than that.
What am I missing?
from PyQt5.QtGui import QPixmap
from PyQt5.QtWidgets import QApplication
import os, sys
app = QApplication(sys.argv)
file = r"/path/to/image/grid-of-tiles.png"
# image I'm using has these many rows and columns of tiles
r = 57
c = 50
pixmap = QPixmap(file)
# the tiles are known to divide evenly
width = pixmap.width() / c
height = pixmap.height() / r
# after executing the below, mem use climbs north of 9GB!
# I was expecting on the order of twice the original image's size
minipixmaps = [pixmap.copy(row*height, col*width, width, height) for row in range(r) for col in range(c)]

Thanks to eyllanesc's comment. QImage does seem better behaved:
from PyQt5.QtGui import QPixmap, QImage
from PyQt5.QtWidgets import QApplication
import os, sys
app = QApplication(sys.argv)
file = r"/path/to/image/grid-of-tiles.png"
# image I'm using has these many rows and columns of tiles
r = 57
c = 50
mosaic = QImage(file) # QImage instead of QPixmap
# the tiles are known to divide evenly
width = mosaic.width() / c
height = mosaic.height() / r
# much less memory!
minipixmaps = [QPixmap.fromImage(mosaic.copy(row*height, col*width, width, height)) for row in range(r) for col in range(c)]

Related

pyqtgraph image point selection

I'm trying to make a tool for my lab for manual image registration--where the user can select some points on two different images to align them. I made this in matplotlib, but zooming in/out was way too slow (I think because the images we're aligning are pretty high res). Is there a good way to do that in pyqtgraph? I just need to be able to select points on two image plots side by side and display where the point selections were.
Currently I have the images in ImageViews and I tried doing it with imv.scene.sigMouseClicked.connect(mouse_click), but in mouse_click(evt) evt.pos(), evt.scenePos(), and evt.screenPos() all gave coordinates that weren't in the image's coordinates. I also played around with doing the point selection with ROI free handles (since I could get the correct coordinates from those), but it doesn't seem like you could color the handles, which isn't a total deal-breaker I was wondering if there was a better option. Is there a better way to do this?
Edit:
The answer was great, I used it to make this pile of spaghetti:
https://github.com/xkstein/ManualAlign
Figured I'd like it in case someone was looking for something similar and didn't want to hassle with coding a new one from scratch.
Your question is unclear about how you want the program to match the points, here I provide a simple solution to allow you (1) Show an image. (2) Add points to the image.
The basic idea is to use a pg.GraphicsLayoutWidget, then add a pg.ImageItem and a pg.ScatterPlotItem, and each mouse click adds a point to the ScatterPlotItem. Code:
import sys
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QApplication, QMainWindow, QWidget, QHBoxLayout
import pyqtgraph as pg
import cv2
pg.setConfigOption('background', 'w')
pg.setConfigOption('foreground', 'k')
class ImagePlot(pg.GraphicsLayoutWidget):
def __init__(self):
super(ImagePlot, self).__init__()
self.p1 = pg.PlotItem()
self.addItem(self.p1)
self.p1.vb.invertY(True) # Images need inverted Y axis
# Use ScatterPlotItem to draw points
self.scatterItem = pg.ScatterPlotItem(
size=10,
pen=pg.mkPen(None),
brush=pg.mkBrush(255, 0, 0),
hoverable=True,
hoverBrush=pg.mkBrush(0, 255, 255)
)
self.scatterItem.setZValue(2) # Ensure scatterPlotItem is always at top
self.points = [] # Record Points
self.p1.addItem(self.scatterItem)
def setImage(self, image_path, size):
self.p1.clear()
self.p1.addItem(self.scatterItem)
# pg.ImageItem.__init__ method takes input as an image array
# I use opencv to load image, you can replace with other packages
image = cv2.imread(image_path, 1)
# resize image to some fixed size
image = cv2.resize(image, size)
self.image_item = pg.ImageItem(image)
self.image_item.setOpts(axisOrder='row-major')
self.p1.addItem(self.image_item)
def mousePressEvent(self, event):
point = self.p1.vb.mapSceneToView(event.pos()) # get the point clicked
# Get pixel position of the mouse click
x, y = int(point.x()), int(point.y())
self.points.append([x, y])
self.scatterItem.setPoints(pos=self.points)
super().mousePressEvent(event)
if __name__ == "__main__":
QApplication.setAttribute(Qt.AA_EnableHighDpiScaling)
app = QApplication([])
win = QMainWindow()
central_win = QWidget()
layout = QHBoxLayout()
central_win.setLayout(layout)
win.setCentralWidget(central_win)
image_plot1 = ImagePlot()
image_plot2 = ImagePlot()
layout.addWidget(image_plot1)
layout.addWidget(image_plot2)
image_plot1.setImage('/home/think/image1.png', (310, 200))
image_plot2.setImage('/home/think/image2.jpeg', (310, 200))
# You can access points by accessing image_plot1.points
win.show()
if (sys.flags.interactive != 1) or not hasattr(Qt.QtCore, "PYQT_VERSION"):
QApplication.instance().exec_()
The result looks like:

How to display a byte array image on a QLabel?

I read a byte array of size: height*width*3 (3=RGB) that represents an image. This is raw data that I receive from a USB camera.
I was able to display and save it using PIL on this thread. Now I'm trying to display it on a PyQt5 window.
I have tried using QLabel.setPixmap() but it seems I can not create a valid pixel map.
Failed attempt reading the byte array:
from PyQt5.QtGui import QPixmap
from PyQt5.QtCore import QByteArray
from PyQt5.QtWidgets import QLabel
self.camLabel = QLabel()
pixmap = QPixmap()
loaded = pixmap.loadFromData(QByteArray(img)) # img is a byte array of size: h*w*3
self.imgLabel.setPixmap(pixmap)
in this example loaded returns False so I know imgLabel.setPixmap will not work, but I don't know how to debug further to find out why the loading has failed.
A second failed attempt trying to use PIL library:
import PIL.Image
import PIL.ImageQt
pImage = PIL.Image.fromarray(RGB) # RGB is a numpy array of the data in img
qtImage = PIL.ImageQt.ImageQt(pImage)
pixmap = QPixmap.fromImage(qtImage)
self.imgLabel.setPixmap(pixmap)
In this example the application crashes when I'm running: self.imgLabel.setPixmap(pixmap), so again, I'm not sure how to debug further.
Any help will be appreciated!
To get a QPixmap from the numpy array you could create an QImage first and use that to create the QPixmap. For example:
from PyQt5 import QtCore, QtWidgets, QtGui
import numpy as np
# generate np array of (r, g, b) triplets with dtype uint8
height = width = 255
RGBarray = np.array([[r % 256, c % 256, -c % 256] for r in range(height) for c in range(width)], dtype=np.uint8)
app = QtWidgets.QApplication([])
label = QtWidgets.QLabel()
# create QImage from numpy array
image = QtGui.QImage(bytes(RGBarray), width, height, 3*width, QtGui.QImage.Format_RGB888)
pixmap = QtGui.QPixmap(image)
label.setPixmap(pixmap)
label.show()
app.exec()

How do I adjust the image size in the QTableWidget?

I want to adjust the image size in the QTableWidget.
But it is size comes out small. (The part that looks like an "이미지")
Below is the original image.
Please help me by referring to the code below.
Thanks.
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtCore import *
imgPath = "D:\Image\Image1.png"
self.tableWidget1 = QTableWidget(self.tab1)
self.tableWidget1.setRowCount(30)
self.tableWidget1.setColumnCount(3)
self.tableWidget1.setHorizontalHeaderLabels(setNameList)
self.tableWidget1.resize(1824, 760)
self.tableWidget1.move(0, 0)
self.tableWidget1.setItem(0, 0, QTableWidgetItem(QIcon(QPixmap(imgPath).scaled(QSize(1280, 640), Qt.KeepAspectRatio)), ''))

How to display clickable RGB image similar to pyqtgraph ImageView?

Despite not being a proficient GUI programmer, I figured out how to use the pyqtgraph module's ImageView function to display an image that I can pan/zoom and click on to get precise pixel coordinates. The complete code is given below. The only problem is that ImageView can apparently only display a single-channel (monochrome) image.
My question: How do I do EXACTLY the same thing as this program (ignoring histogram, norm, and ROI features, which I don't really need), but with the option to display a true color image (e.g., the original JPEG photo)?
import numpy as np
from pyqtgraph.Qt import QtCore, QtGui
import pyqtgraph as pg
import matplotlib.image as mpimg
# Load image from disk and reorient it for viewing
fname = 'R0000187.JPG' # This can be any photo image file
photo=np.array(mpimg.imread(fname))
photo = photo.transpose()
# select for red color and extract as monochrome image
img = photo[0,:,:] # WHAT IF I WANT TO DISPLAY THE ORIGINAL RGB IMAGE?
# Create app
app = QtGui.QApplication([])
## Create window with ImageView widget
win = QtGui.QMainWindow()
win.resize(1200,800)
imv = pg.ImageView()
win.setCentralWidget(imv)
win.show()
win.setWindowTitle(fname)
## Display the data
imv.setImage(img)
def click(event):
event.accept()
pos = event.pos()
print (int(pos.x()),int(pos.y()))
imv.getImageItem().mouseClickEvent = click
## Start Qt event loop unless running in interactive mode.
if __name__ == '__main__':
import sys
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.instance().exec_()
pyqtgraph.ImageView does support rgb / rgba images. For example:
import numpy as np
import pyqtgraph as pg
data = np.random.randint(255, size=(100, 100, 3))
pg.image(data)
..and if you want to display the exact image data without automatic level adjustment:
pg.image(data, levels=(0, 255))
As pointed out by Luke, ImageView() does display RGB, provided the correct array shape is passed. In my sample program, I should have used photo.transpose([1,0,2]) to keep the RGB in the last dimension rather than just photo.transpose(). When ImageView is confronted with an array of dimension (3, W, H), it treats the array as a video consisting of 3 monochrome images, with a slider at the bottom to select the frame.
(Corrected to incorporate followup comment by Luke, below)

Convert numpy array to PySide QPixmap

I want to convert an image into a NumPy array to a PySide QPixmap, so I can display it (EDIT: in my PySide UI). I already found this tool: qimage2ndarray, but it only works for PyQt4. I tried to change it to get it working with PySide, but I would have to change the C part of the tool and I have no experience with C. How can I do this or are there any alternatives?
One alternative is to just use PIL library.
>>> import numpy as np
>>> import Image
>>> im = Image.fromarray(np.random.randint(0,256,size=(100,100,3)).astype(np.uint8))
>>> im.show()
You can look at the QPixmap constructor at http://www.pyside.org/docs/pyside/PySide/QtGui/QImage.html.
It looks like you should be able to use a numpy array directly in the constructor:
class PySide.QtGui.QImage(data, width, height, format)
where the format argument is one of these: http://www.pyside.org/docs/pyside/PySide/QtGui/QImage.html#PySide.QtGui.PySide.QtGui.QImage.Format.
So, for example you could do something like:
>>> a = np.random.randint(0,256,size=(100,100,3)).astype(np.uint32)
>>> b = (255 << 24 | a[:,:,0] << 16 | a[:,:,1] << 8 | a[:,:,2]).flatten() # pack RGB values
>>> im = PySide.QtGui.QImage(b, 100, 100, PySide.QtGui.QImage.Format_RGB32)
I don't have PySide installed so I haven't tested this. Chances are it won't work as is, but it might guide you in the right direction.
If you create the data yourself, using numpy for example, I think the fastest method is to directly access a QImage. You can create a ndarray from the buffer object QImage.bits(), do some work using the numpy methods and create a QPixmap from QImage when you are done. You can also read or modify existing QImages that way.
import numpy as np
from PySide.QtGui import QImage
img = QImage(30, 30, QImage.Format_RGB32)
imgarr = np.ndarray(shape=(30,30), dtype=np.uint32, buffer=img.bits())
# qt write, numpy read
img.setPixel(0, 0, 5)
print "%x" % imgarr[0,0]
# numpy write, qt read
imgarr[0,1] = 0xff000006
print "%x" % img.pixel(1,0)
Be sure that the array does not outlive the image object. If you want, you can use a more sophisticated dtype, like a record array for individual access to the alpha, red, green and blue bits (beware of endianess though).
In case there is no efficient way to calculate the pixel values using numpy, you can also use scipy.weave to inline some C/C++ code that operates on the array img.bits() points to.
If you already have an image in ARGB format, creating the QImage from data as suggested before is probably easier.
In addition to #user545424 answer about using PIL, if you didn't want to depend on PIL, you could manually construct your Image directly from your np array:
width = 100
height = 100
data = np.random.randint(0,256,size=(width,height,3)).astype(np.uint8)
img = QtGui.QImage(width, height, QtGui.QImage.Format_RGB32)
for x in xrange(width):
for y in xrange(height):
img.setPixel(x, y, QtGui.QColor(*data[x][y]).rgb())
pix = QtGui.QPixmap.fromImage(img)
I'm sure, using PIL, there is a way to read the actual image data into a QImage, but I will let #user545424 address that part since its from his answer. PIL comes with the ImageQt module which is convenient for directly converting an Image -> QPixmap, but unfortunately thats a PyQt4 QPixmap, which doesn't help you.
If the answer of user545424 does not work as expected: you see artifacts in the image, then I would suggest you change the parameter to
PySide.QtGui.QImage.Format_ARGB32
a = np.random.randint(0,256,size=(100,100,3)).astype(np.uint32)
b = (255 << 24 | a[:,:,0] << 16 | a[:,:,1] << 8 | a[:,:,2]).flatten() # pack RGB values
im = PySide.QtGui.QImage(b, 100, 100, PySide.QtGui.QImage.Format_ARGB32)

Categories

Resources