I would like to know how i can select a pixel with a mouse click in an image (QImge) and get pixel position and value.
Thanks
self.image = QLabel()
self.image.setPixmap(QPixmap("C:\\myImg.jpg"))
self.image.setObjectName("image")
self.image.mousePressEvent = self.getPos
def getPos(self , event):
x = event.pos().x()
y = event.pos().y()
This question is old but for everybody getting here, like me, this is my solution based on Jareds answer:
self.img = QImage('fname.png')
pixmap = QPixmap(QPixmap.fromImage(self.img))
img_label = QLabel()
img_label.setPixmap(pixmap)
img_label.mousePressEvent = self.getPixel
def self.getPixel(self, event):
x = event.pos().x()
y = event.pos().y()
c = self.img.pixel(x,y) # color code (integer): 3235912
# depending on what kind of value you like (arbitary examples)
c_qobj = QColor(c) # color object
c_rgb = QColor(c).getRgb() # 8bit RGBA: (255, 23, 0, 255)
c_rgbf = QColor(c).getRgbf() # RGBA float: (1.0, 0.3123, 0.0, 1.0)
return x, y, c_rgb
Make sure the size of the label matches the size of the image, otherwise the x and y mouse coords need to be transformed to image coords. And I guess it's also possible to use the .pixel() method directly on a pixmap as well, but the QImage object seems to perform better in my case.
First you have to draw the image. You can do this my making a QLabel widget and call setPixmap. You need to convert your QImage to QPixmap before doing this (you can use QPixmap.fromImage(img)).
You can get mouse clicks by subclassing the QImage and intercepting mousePressEvent. Look up the pixel value with QImage.pixel().
Related
I want to centre an image inside a rect note that I am using the image rect but asseperate rect to mount the image on.
rect_1 = pygame.Rect(1, 1, 178, 178)
...
cross = pygame.image.load("cross.png").convert_alpha()
cross_minified = pygame.transform.rotozoom(cross, 0, 0.25)
...
screen.blit(cross_minified, rect_1)
and after these codes I cannot find a way to center my x to the middle of the rect_1
it looks somewhat like this
Use the functionality of pygame.Rect. Get a rectangle with the size of the image by pygame.Surface.get_rect and set its center by the keyword argument center from the center of rect_1:
cross_rect = cross_minified.get_rect(center = rect_1.center)
screen.blit(cross_minified, cross_rect)
I'm trying to make a tool for my lab for manual image registration--where the user can select some points on two different images to align them. I made this in matplotlib, but zooming in/out was way too slow (I think because the images we're aligning are pretty high res). Is there a good way to do that in pyqtgraph? I just need to be able to select points on two image plots side by side and display where the point selections were.
Currently I have the images in ImageViews and I tried doing it with imv.scene.sigMouseClicked.connect(mouse_click), but in mouse_click(evt) evt.pos(), evt.scenePos(), and evt.screenPos() all gave coordinates that weren't in the image's coordinates. I also played around with doing the point selection with ROI free handles (since I could get the correct coordinates from those), but it doesn't seem like you could color the handles, which isn't a total deal-breaker I was wondering if there was a better option. Is there a better way to do this?
Edit:
The answer was great, I used it to make this pile of spaghetti:
https://github.com/xkstein/ManualAlign
Figured I'd like it in case someone was looking for something similar and didn't want to hassle with coding a new one from scratch.
Your question is unclear about how you want the program to match the points, here I provide a simple solution to allow you (1) Show an image. (2) Add points to the image.
The basic idea is to use a pg.GraphicsLayoutWidget, then add a pg.ImageItem and a pg.ScatterPlotItem, and each mouse click adds a point to the ScatterPlotItem. Code:
import sys
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QApplication, QMainWindow, QWidget, QHBoxLayout
import pyqtgraph as pg
import cv2
pg.setConfigOption('background', 'w')
pg.setConfigOption('foreground', 'k')
class ImagePlot(pg.GraphicsLayoutWidget):
def __init__(self):
super(ImagePlot, self).__init__()
self.p1 = pg.PlotItem()
self.addItem(self.p1)
self.p1.vb.invertY(True) # Images need inverted Y axis
# Use ScatterPlotItem to draw points
self.scatterItem = pg.ScatterPlotItem(
size=10,
pen=pg.mkPen(None),
brush=pg.mkBrush(255, 0, 0),
hoverable=True,
hoverBrush=pg.mkBrush(0, 255, 255)
)
self.scatterItem.setZValue(2) # Ensure scatterPlotItem is always at top
self.points = [] # Record Points
self.p1.addItem(self.scatterItem)
def setImage(self, image_path, size):
self.p1.clear()
self.p1.addItem(self.scatterItem)
# pg.ImageItem.__init__ method takes input as an image array
# I use opencv to load image, you can replace with other packages
image = cv2.imread(image_path, 1)
# resize image to some fixed size
image = cv2.resize(image, size)
self.image_item = pg.ImageItem(image)
self.image_item.setOpts(axisOrder='row-major')
self.p1.addItem(self.image_item)
def mousePressEvent(self, event):
point = self.p1.vb.mapSceneToView(event.pos()) # get the point clicked
# Get pixel position of the mouse click
x, y = int(point.x()), int(point.y())
self.points.append([x, y])
self.scatterItem.setPoints(pos=self.points)
super().mousePressEvent(event)
if __name__ == "__main__":
QApplication.setAttribute(Qt.AA_EnableHighDpiScaling)
app = QApplication([])
win = QMainWindow()
central_win = QWidget()
layout = QHBoxLayout()
central_win.setLayout(layout)
win.setCentralWidget(central_win)
image_plot1 = ImagePlot()
image_plot2 = ImagePlot()
layout.addWidget(image_plot1)
layout.addWidget(image_plot2)
image_plot1.setImage('/home/think/image1.png', (310, 200))
image_plot2.setImage('/home/think/image2.jpeg', (310, 200))
# You can access points by accessing image_plot1.points
win.show()
if (sys.flags.interactive != 1) or not hasattr(Qt.QtCore, "PYQT_VERSION"):
QApplication.instance().exec_()
The result looks like:
I am creating a program which must change the color of individual pixels in a pyglet window. I am unable to find any way to do this in the docs. Is there a way to do this?
For funsies, I'll add another answer that is more along the lines of what you might need. Because the window itself will be whatever "clear" color buffer you decide via:
window = pyglet.window.Window(width=width, height=height)
pyglet.gl.glClearColor(0.5,0,0,1) # Note that these are values 0.0 - 1.0 and not (0-255).
So changing the background is virtually impossible because it's "nothing".
You can however draw pixels on the background via the .draw() function.
import pyglet
from random import randint
width, height = 500, 500
window = pyglet.window.Window(width=width, height=height)
#window.event
def on_draw():
window.clear()
for i in range(10):
x = randint(0,width)
y = randint(0,height)
pyglet.graphics.draw(1, pyglet.gl.GL_POINTS,
('v2i', (x, y)),
('c3B', (255, 255, 255))
)
pyglet.app.run()
This will create 10 randomly placed white dots on the background.
To add anything above that simply place your .blit() or .draw() features after the pyglet.graphics.draw() line.
You could use the magic function SolidColorImagePattern and modify the data you need.
R,G,B,A = 255,255,255,255
pyglet.image.SolidColorImagePattern((R,G,B,A).create_image(width,height)
This is a .blit():able image. It's white, and probably not what you want.
So we'll do some more wizardry and swap out all the pixels for random ones (War of the ants):
import pyglet
from random import randint
width, height = 500, 500
window = pyglet.window.Window(width=width, height=height)
image = pyglet.image.SolidColorImagePattern((255,255,255,255)).create_image(width, height)
data = image.get_image_data().get_data('RGB', width*3)
new_image = b''
for i in range(0, len(data), 3):
pixel = bytes([randint(0,255)]) + bytes([randint(0,255)]) + bytes([randint(0,255)])
new_image += pixel
image.set_data('RGB', width*3, new_image)
#window.event
def on_draw():
window.clear()
image.blit(0, 0)
pyglet.app.run()
For educational purposes, I'll break it down into easier chunks.
image = pyglet.image.SolidColorImagePattern((255,255,255,255)).create_image(width, height)
Creates a solid white image, as mentioned. It's width and height matches the window-size.
We then grab the image data:
data = image.get_image_data().get_data('RGB', width*3)
This bytes string will contain width*height*<format>, meaning a 20x20 image will be 1200 bytes big because RGB takes up 3 bytes per pixel.
new_image = b''
for i in range(0, len(data), 3):
pixel = bytes([randint(0,255)]) + bytes([randint(0,255)]) + bytes([randint(0,255)])
new_image += pixel
This whole block loops over all the pixels (len(data) is just a convenience thing, you could do range(0, width*height*3, 3) as well, but meh.
The pixel contists of 3 randint(255) bytes objects combined into one string like so:
pixel = b'xffxffxff'
That's also the reason for why we step 3 in our range(0, len(data), 3). Because one pixel is 3 bytes "wide".
Once we've generated all the pixels (for some reason the bytes object image can't be modified.. I could swear I've modified bytes "strings" before.. I'm tired tho so that's probably a utopian dream or something.
Anyhow, once all that sweet image building is done, we give the image object it's new data by doing:
image.set_data('RGB', width*3, new_image)
And that's it. Easy as butter in sunshine on a -45 degree winter day.
Docs:
https://pyglet.readthedocs.io/en/pyglet-1.2-maintenance/programming_guide/quickstart.html
https://github.com/Torxed/PygletGui/blob/master/gui_classes_generic.py
https://pythonhosted.org/pyglet/api/pyglet.image.ImageData-class.html#get_image_data
https://pythonhosted.org/pyglet/api/pyglet.image.ImageData-class.html#set_data
You can also opt in to get a region, and just modify a region.. But I'll leave the tinkering up to you :)
You can blit pixels into background 'image'. You can look at this Stack Overflow question.
If you mean background color, I can help. There is one option that I know of, the pyglet.gl.glClearColor function.
for example,:
import pyglet
from pyglet.gl import glClearColor
win = pyglet.window.Window(600, 600, caption = "test")
glClearColor(255, 255, 255, 1.0) # red, green, blue, and alpha(transparency)
def on_draw():
win.clear()
That will create a window with a white background(as opposed to the default, black)
I have two pieces of code, both of them should create test.png containing a black square. The first one does it, but the second returns a transparent square. The difference between them is that the first one has a clear stripe at the left and the second does not.
The first example:
root = Tk()
image = PhotoImage(width = 50, height = 50)
for x in range(1, 50):
for y in range(50):
pixel(image, (x,y), (0,0,0))
image.write('test.png', format='png')
The second example:
root = Tk()
image = PhotoImage(width = 50, height = 50)
for x in range(50):
for y in range(50):
pixel(image, (x,y), (0,0,0))
image.write('test.png', format='png')
I also import tkinter and use function pixel(), which has this code:
def pixel(image, pos, color):
"""Place pixel at pos=(x,y) on image, with color=(r,g,b)."""
r,g,b = color
x,y = pos
image.put("#%02x%02x%02x" % (r,g,b), (x, y))
To make it short: Tkinter's PhotoImage class can't really save PNGs. It does only support GIF, PGM and PPM. You may have noticed that the preview image is correctly colored, but when you open the file, it's blank.
To save PNG images, you have to use the Python Imaging Library or, for Python 3, Pillow.
With this, the image creation is even easier:
from PIL import Image
image = Image.new("RGB", (50, 50), (0,0,0))
image.save('test.png', format='PNG')
If you need, you can convert it to PIL's ImageTk.PhotoImage object that can be used in Tkinter.
I have used the Python Imaging Library to load a .ttf font. Here is the code:
self.rect = Image.new("RGBA", (600,100), (255,255,255))
self.draw = ImageDraw.Draw(self.rect)
self.font = ImageFont.truetype("font.ttf", 96)
self.draw.text((5,0), "activatedgeek", (0,0,0), font=self.font)
self.texture = self.loadFont(self.rect)
Here is the loadFont() function of the respective class:
def loadFont(self, im):
try:
ix, iy, image = im.size[0], im.size[1], im.tostring("raw", "RGBA", 0, -1)
except SystemError:
ix, iy, image = im.size[0], im.size[1], im.tostring("raw", "RGBX", 0, -1)
retid = gl.glGenTextures(1)
gl.glPixelStorei(gl.GL_UNPACK_ALIGNMENT,1)
gl.glBindTexture(gl.GL_TEXTURE_2D,retid)
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_LINEAR_MIPMAP_LINEAR)
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_LINEAR_MIPMAP_LINEAR)
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_WRAP_S, gl.GL_CLAMP)
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_WRAP_T, gl.GL_CLAMP)
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_GENERATE_MIPMAP, gl.GL_TRUE)
gl.glTexImage2D(gl.GL_TEXTURE_2D,0,3,ix,iy,0,gl.GL_RGBA,gl.GL_UNSIGNED_BYTE,image)
return retid
Here is a snapshot I have taken using glReadPixels() unfortunately same as one rendered on the window created using PyQt.
It shows an unwanted border, some artefact. Please help me rectify this.
Have you considered using a more reasonable wrap state, such as GL_CLAMP_TO_EDGE? I have a strong feeling that this is related to border color beyond the edges of your texture image.
There are a number of approaches you could take to solve an issue like this, ranging from pre-multiplied alpha to an extra texel border around the entire image, but the simplest thing to try would be GL_CLAMP_TO_EDGE.
GL_CLAMP is something of a joke as far as wrap modes go, it does not clamp the range of texture coordinates to texel centers and calamity ensues when the nearest texel becomes the border color. Needless to say, this behavior is usually undesirable.