PyQt5 drawArc with wide "pen" - python

I am trying to use PyQt5 to draw a round gauge (MacOS 11.0.1, Python 3.9).
I used the drawArc statement to created the gauge background, so I set the pen width to a large value (70). The resulting arc looks like a horseshoe, presumably because the "pen" is a 70 pixels square, not a line perpendicular to the direction of travel.
Is there a way of creating an arc - in PyQt5 - like the one on the right side of the picture?
I am open to suggestions: the application has already been written with Python+Tkinter, but thanks to the lack of anti-aliasing on Tkinter+Raspberry, I need re-write it.
(Plan B is to continue with PyQt, create a pie slice (drawPie) and cover the centre area with a circle of background colour - but this is not ideal, as it imposes some limitations to my design.)
# importing libraries
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtCore import *
import sys
arcreading = 0
adder = .1
# creating a Gauge class
class Gauge(QMainWindow):
# constructor
def __init__(self):
super().__init__()
timer = QTimer(self) # create a timer object
timer.timeout.connect(self.update) # add action to the timer, update the whole code
timer.start(0) # update cycle in milliseconds
self.setGeometry(200, 200, 600, 600) # window location and size
self.setStyleSheet("background : black;") # background color
# -----------------------
# method for paint event
# -----------------------
def paintEvent(self, event):
global arcreading
global adder
# print('x')
kanvasx = 50 # binding box origin: x
kanvasy = 50 # binding box origin: y
kanvasheight = 150 # binding box height
kanvaswidth = 150 # binding box width
arcsize = 270 # arc angle between start and end.
arcwidth = 70 # arc width
painter = QPainter(self) # create a painter object
painter.setRenderHint(QPainter.Antialiasing) # tune up painter
painter.setPen(QPen(Qt.green, arcwidth)) # set color and width
# ---------- the following lines simulate sensor reading. -----------
if arcreading > arcsize or arcreading < 0: # variable to make arc move
adder = -adder # arcreading corresponds to the
# value to be indicated by the arc.
arcreading = arcreading + adder
# --------------------- end simulation ------------------------------
#print(arcreading)
# drawArc syntax:
# drawArc(x_axis, y_axis, width, length, startAngle, spanAngle)
painter.drawArc(kanvasx, kanvasy, # binding box: x0, y0, pixels
kanvasheight + arcwidth, # binding box: height
kanvaswidth + arcwidth, # binding box: width
int((arcsize + (180 - arcsize) / 2)*16), # arc start point, degrees (?)
int(-arcreading*16)) # arc span
painter.end() # end painter
# Driver code
if __name__ == '__main__':
app = QApplication(sys.argv)
# creating a Gauge object
win = Gauge()
# show
win.show()
exit(app.exec_())

You need to set the capStyle of the pen with the appropriate Qt.PenCapStyle, in your case you should use FlatCap, which ends exactly at the end of the line, while the default is SquareCap (which covers the end and extends by half the line width):
painter.setPen(QPen(Qt.green, arcwidth, cap=Qt.FlatCap))

Related

Changing a rectangle's X value does not actually move it in the GUI

I'm working on a GUI in Python with PySide2. I have a GraphicsView, where I'll put an image, and I'd like to draw and move a polygon around on that image. I've found many examples of simply drawing polygons, circles, etc. in PySide, PySide2, or PyQt 4/5 in Python. However, I haven't been able to figure out why my graphics items do not move on an event without deleting and redrawing.
I'm using the keyboard to change the X value on a PySide2 QRectF. The X value is clearly changing, but the rectangle does not actually move.
Here is a minimal example:
from PySide2 import QtCore, QtGui, QtWidgets
from functools import partial
class DebuggingDrawing(QtWidgets.QGraphicsView):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# initialize the scene and set the size
self._scene = QtWidgets.QGraphicsScene(self)
self._scene.setSceneRect(0,0,500,500)
self.setScene(self._scene)
# make a green pen and draw a 10 wide, 20 high rectangle at x=20, y=30
self.pen = QtGui.QPen(QtCore.Qt.green, 0)
self.draw_rect = QtCore.QRectF(20, 30, 10, 20)
# add the rectangle to our scene
self._scene.addRect(self.draw_rect, self.pen)
def move_rect(self, dx: int):
# method for moving the existing rectangle
# get the x value
x = self.draw_rect.x()
print('x: {} dx: {}'.format(x, dx))
# use the moveLeft method of QRectF to change the rectangle's left side x value
self.draw_rect.moveLeft(x + dx)
self.update()
class MainWindow(QtWidgets.QMainWindow):
def __init__(self):
super().__init__()
self.labelImg = DebuggingDrawing()
# Get a keyboard shortcut and hook it up to the move_rect method
next_shortcut = QtWidgets.QShortcut(QtGui.QKeySequence('Right'), self)
next_shortcut.activated.connect(partial(self.labelImg.move_rect, 1))
# get the left key shortcut, move_rect one pixel left
back_shortcut = QtWidgets.QShortcut(QtGui.QKeySequence('Left'), self)
back_shortcut.activated.connect(partial(self.labelImg.move_rect, -1))
self.setCentralWidget(self.labelImg)
self.setMaximumHeight(480)
self.update()
if __name__ == '__main__':
app = QtWidgets.QApplication([])
testing = MainWindow()
testing.show()
app.exec_()
Here's what the output looks like:
You clearly can't see in the image, but even though the rectangle's x value is changing according to our print calls, nothing moves around in the image. I've confirmed it's not just my eyes, because if I draw new rectangles in move_rect, they clearly show up.
draw_rect is a QRectF is an input to create an item(QGraphicsRectItem) that is returned by the addRect() method similar to pen, that is, it takes the information but then no longer uses it. The idea is to move the item using setPos():
class DebuggingDrawing(QtWidgets.QGraphicsView):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# initialize the scene and set the size
self._scene = QtWidgets.QGraphicsScene(self)
self._scene.setSceneRect(0, 0, 500, 500)
self.setScene(self._scene)
# make a green pen and draw a 10 wide, 20 high rectangle at x=20, y=30
pen = QtGui.QPen(QtCore.Qt.green, 0)
draw_rect = QtCore.QRectF(20, 30, 10, 20)
# add the rectangle to our scene
self.item_rect = self._scene.addRect(draw_rect, pen)
def move_rect(self, dx: int):
p = self.item_rect.pos()
p += QtCore.QPointF(dx, 0)
self.item_rect.setPos(p)
If you still want to use draw_rect then you have to set it again in the item:
self.pen = QtGui.QPen(QtCore.Qt.green, 0)
self.draw_rect = QtCore.QRectF(20, 30, 10, 20)
# add the rectangle to our scene
self.item_rect = self._scene.addRect(self.draw_rect, self.pen)
def move_rect(self, dx: int):
# method for moving the existing rectangle
# get the x value
x = self.draw_rect.x()
print('x: {} dx: {}'.format(x, dx))
# use the moveLeft method of QRectF to change the rectangle's left side x value
self.draw_rect.moveLeft(x + dx)
self.item_rect.setRect(self.draw_rect)
It is recommended that "Graphics View Framework" be read so that the QGraphicsItems, QGraphicsView and QGraphicsScene work.

Capturing hover events with sceneEventFilter in pyqt

I have user-adjustable annotations in a graphics scene. The size/rotation of annotations is handled by dragging corners of a rectangle about the annotation. I'm using a custom rect (instead of the boundingRect) so it follows the rotation of the parent annotation. The control corners are marked by two ellipses whose parent is the rect so transformations of rect/ellipse/annotation are seamless.
I want to detect when the cursor is over one of the corners, which corner it is, and the exact coordinates. For this task it seems that I should filter the hoverevents with the parent rect using a sceneEventFilter.
I've tried umpty zilch ways of implementing the sceneEventFilter to no avail. All events go directly to the hoverEnterEvent function. I've only found a few bits of example code that do something like this but I'm just plain stuck. btw, I'm totally self taught on Python and QT over the past 3 months, so please bear with me. I'm sure I'm missing something very basic. The code is a simplified gui with two ellipses. We're looking to capture events in the sceneEventFilter but always goes to hoverEnterEvent.
from pyqtgraph.Qt import QtGui, QtCore
import pyqtgraph as pg
from PyQt5.QtWidgets import QGraphicsScene, QGraphicsView, QGraphicsItem
import sys
class myHandle(QtGui.QGraphicsEllipseItem):
def __init__(self, parent = None):
super(myHandle, self).__init__(parent)
def addTheHandle(self, h_parent = 'null', kind = 'null'):
handle_w = 40
if kind == 'scaling handle':
handle_x = h_parent.boundingRect().topRight().x() - handle_w/2
handle_y = h_parent.boundingRect().topRight().y() - handle_w/2
if kind == 'rotation handle':
handle_x = h_parent.boundingRect().topLeft().x() - handle_w/2
handle_y = h_parent.boundingRect().topLeft().y() - handle_w/2
the_handle = QtGui.QGraphicsEllipseItem(QtCore.QRectF(handle_x, handle_y, handle_w, handle_w))
the_handle.setPen(QtGui.QPen(QtGui.QColor(255, 100, 0), 3))
the_handle.setParentItem(h_parent)
the_handle.setAcceptHoverEvents(True)
the_handle.kind = kind
return the_handle
class myRect(QtGui.QGraphicsRectItem):
def __init__(self, parent = None):
super(myRect, self).__init__(parent)
def rectThing(self, boundingrectangle):
self.setAcceptHoverEvents(True)
self.setRect(boundingrectangle)
mh = myHandle()
rotation_handle = mh.addTheHandle(h_parent = self, kind = 'rotation handle')
scaling_handle = mh.addTheHandle(h_parent = self, kind = 'scaling handle')
self.installSceneEventFilter(rotation_handle)
self.installSceneEventFilter(scaling_handle)
return self, rotation_handle, scaling_handle
def sceneEventFilter(self, event):
print('scene ev filter')
return False
def hoverEnterEvent(self, event):
print('hover enter event')
class Basic(QtGui.QMainWindow):
def __init__(self):
super(Basic, self).__init__()
self.initUI()
def eventFilter(self, source, event):
return QtGui.QMainWindow.eventFilter(self, source, event)
def exit_the_program(self):
pg.exit()
def initUI(self):
self.resize(300, 300)
self.centralwidget = QtGui.QWidget()
self.setCentralWidget(self.centralwidget)
self.h_layout = QtGui.QHBoxLayout(self.centralwidget)
self.exit_program = QtGui.QPushButton('Exit')
self.exit_program.clicked.connect(self.exit_the_program)
self.h_layout.addWidget(self.exit_program)
self.this_scene = QGraphicsScene()
self.this_view = QGraphicsView(self.this_scene)
self.this_view.setMouseTracking(True)
self.this_view.viewport().installEventFilter(self)
self.h_layout.addWidget(self.this_view)
self.circle = self.this_scene.addEllipse(QtCore.QRectF(40, 40, 65, 65), QtGui.QPen(QtCore.Qt.black))
mr = myRect()
the_rect, rotation_handle, scaling_handle = mr.rectThing(self.circle.boundingRect())
the_rect.setPen(QtGui.QPen(QtCore.Qt.black))
the_rect.setParentItem(self.circle)
self.this_scene.addItem(the_rect)
self.this_scene.addItem(rotation_handle)
self.this_scene.addItem(scaling_handle)
def main():
app = QtGui.QApplication([])
main = Basic()
main.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
The main problem is that you are installing the event filter of the target items on the rectangle: the event filter of the rectangle will never receive anything. Moreover, sceneEventFilter accepts two arguments (the watched item and the event), but you only used one.
What you should do is to install the event filter of the rectangle on the target items:
rotation_handle.installSceneEventFilter(self)
scaling_handle.installSceneEventFilter(self)
That said, if you want to use those ellipse items for scaling or rotation of the source circle, your approach is a bit wrong to begin with.
from math import sqrt
# ...
class myRect(QtGui.QGraphicsRectItem):
def __init__(self, parent):
super(myRect, self).__init__(parent)
self.setRect(parent.boundingRect())
# rotation is usually based on the center of an object
self.parentItem().setTransformOriginPoint(self.parentItem().rect().center())
# a rectangle that has a center at (0, 0)
handleRect = QtCore.QRectF(-20, -20, 40, 40)
self.rotation_handle = QtGui.QGraphicsEllipseItem(handleRect, self)
self.scaling_handle = QtGui.QGraphicsEllipseItem(handleRect, self)
# position the handles by centering them at the right corners
self.rotation_handle.setPos(self.rect().topLeft())
self.scaling_handle.setPos(self.rect().topRight())
for source in (self.rotation_handle, self.scaling_handle):
# install the *self* event filter on the handles
source.installSceneEventFilter(self)
source.setPen(QtGui.QPen(QtGui.QColor(255, 100, 0), 3))
def sceneEventFilter(self, source, event):
if event.type() == QtCore.QEvent.GraphicsSceneMouseMove:
# map the handle event position to the ellipse parent item; we could
# also map to "self", but using the parent is more consistent
localPos = self.parentItem().mapFromItem(source, event.pos())
if source == self.rotation_handle:
# create a temporary line to get the rotation angle
line = QtCore.QLineF(self.boundingRect().center(), localPos)
# add the current rotation to the angle between the center and the
# top left corner, then subtract the new line angle
self.parentItem().setRotation(135 + self.parentItem().rotation() - line.angle())
# note that I'm assuming that the ellipse is a circle, so the top
# left angle will always be at 135°; if it's not a circle, the
# rect width and height won't match and the angle will be
# different, so you'll need to compute that
# parentRect = self.parentItem().rect()
# oldLine = QtCore.QLineF(parentRect.center(), parentRect.topLeft())
# self.parentItem().setRotation(
# oldLine.angle() + self.parentItem().rotation() - line.angle())
elif source == self.scaling_handle:
# still assuming a perfect circle, so the rectangle is a square;
# the line from the center to the top right corner is used to
# compute the square side size, which is the double of a
# right-triangle cathetus where the hypotenuse is the line
# between the center and any of its corners;
# if the ellipse is not a perfect circle, you'll have to
# compute both of the catheti
hyp = QtCore.QLineF(self.boundingRect().center(), localPos)
size = sqrt(2) * hyp.length()
rect = QtCore.QRectF(0, 0, size, size)
rect.moveCenter(self.rect().center())
self.parentItem().setRect(rect)
self.setRect(rect)
# update the positions of both handles
self.rotation_handle.setPos(self.rect().topLeft())
self.scaling_handle.setPos(self.rect().topRight())
return True
elif event.type() == QtCore.QEvent.GraphicsSceneMousePress:
# return True to the press event (which is almost as setting it as
# accepted, so that it won't be processed further more by the scene,
# allowing the sceneEventFilter to capture the following mouseMove
# events that the watched graphics items will receive
return True
return super(myRect, self).sceneEventFilter(source, event)
class Basic(QtGui.QMainWindow):
# ...
def initUI(self):
# ...
self.circle = self.this_scene.addEllipse(QtCore.QRectF(40, 40, 65, 65), QtGui.QPen(QtCore.Qt.black))
mr = myRect(self.circle)
self.this_scene.addItem(mr)

python wx speedmeter working as clock

I want to move hand of speedmeter (in 'wx' package) in Python with every second (same like clock) after click on a button. Action sequence would be like:
1. Click on a button
2. Hand of speedmeter starts moving after every 0.4 seconds.
But after clicking on button (to start hand of speedmeter), color of frame goes dim (seems as app is crashed but actually there is no error or exception). I think it is happening because I am writing an infinite loop (to update hand of speedmeter) which is blocking APP.MainLoop() function. And I also think that thread programming can help me but I don't know much about thread programming. So, help me for this.
Following is my code:
import threading
import os
import wx.lib.agw.speedmeter as SM
import wx.lib.agw.speedmeter as SM2
import sys
import wx
import math
from wx import animate
import time
from random import randint
import thread
class MyFrame(wx.Frame):
def __init__(self,parent,id,title):
# creation frame
l, h = wx.GetDisplaySize()
wx.Frame.__init__(self,parent,-1,title,size=(l, h))
self.speed = SM.SpeedMeter(self,agwStyle=SM.SM_DRAW_HAND|SM.SM_DRAW_SECTORS|SM.SM_DRAW_MIDDLE_TEXT|SM.SM_DRAW_SECONDARY_TICKS,pos=(15,10),size=(l/2,h/2))
# Set The Region Of Existence Of SpeedMeter 1
#self.speed.SetAngleRange(*-math.pi/4, math.pi/1.75)
#self.speed.SetAngleRange(-2*math.pi/4, 2.57*math.pi/1.75)
self.speed.SetAngleRange(-2*math.pi/4, 2.57*math.pi/1.75)
# SpeedMeter In Sectors
intervals = range(0, 5100, 100)
self.speed.SetIntervals(intervals)
# Assign The Same Colours To All Sectors
colours = [wx.WHITE]*50
self.speed.SetIntervalColours(colours)
# Assign The Ticks
ticks = [str(interval) for interval in intervals]
self.speed.SetTicks(ticks)
# Set The Ticks/Tick Markers Colour
self.speed.SetTicksColour(wx.RED)
# We Want To Draw 5 Secondary Ticks Between The Principal Ticks
self.speed.SetNumberOfSecondaryTicks(4)
# Set The Font For The Ticks Markers
self.speed.SetTicksFont(wx.Font(8.9, wx.SWISS, wx.NORMAL, wx.NORMAL))
# Set The Text In The Center Of SpeedMeter
self.speed.SetMiddleText("Actual")
# Assign The Colour To The Center Text
self.speed.SetMiddleTextColour(wx.BLACK)
# Assign A Font To The Center Text
self.speed.SetMiddleTextFont(wx.Font(22, wx.SWISS, wx.NORMAL, wx.BOLD))
# Set The Colour For The Hand Indicator
self.speed.SetHandColour(wx.BLUE)#wx.Colour(255, 250, 20))
# Do Not Draw The External (CONTAINER) Arc
self.speed.DrawExternalArc(True)
# adding button
self.btn = wx.Button(self,2,"OK", pos = (400, 820), size = (50, 30))
self.btn.Bind(wx.EVT_BUTTON, self.OnClicked)
def OnClicked(self, event):
try:
i = 0
while(True):
#r = randint(0, 5000)
if(i >= 5000):
i = 0
i += 50
self.speed.SetSpeedValue(i)
time.sleep(0.5)
except Exception, err:
print("Exception", Exception, err)
if __name__=='__main__' :
APP=wx.App()
frame=MyFrame(None,0,'Appsy')
APP.SetTopWindow(frame)
frame.Show()
APP.MainLoop()

PyQt mapping the coordinates of a mouse press to the coordinate system of an image

In my program I'm trying to map the coordinates of a mousepress back to the coordinate dimensions of an image. I'm using PyQt4 in Python. The program below demonstrates the problem. I have a widget that makes a few image transformations. After those image transformations the image is shown in the center of the widget, while maintaining the original aspect ratio of the image. Since the image is scaled and translated, a coordinate of a MouseEvent must be remapped to the coordinate system of the Image.
The program below has a class "ScalingWidget", that should be able to do these transformations and should also be able to remap the coordinate in a mouseReleaseEvent back to the coordinate system of the image. This works perfectly as expected when I show the widget outside a Layout and mainwindow, but it gets evil when I embed the widget in a bigger gui. Then the coordinates after mapping them back to the Image coordinates suddenly displays an offset.
The minimal program below can be started with and without the bug by specifing flag -b when starting the program. The option -n can put the instance of ScalingWidget deep and deeper inside a "gui", and the deeper it is embedded in the layouts the more strong the bug will be visible.
The stupid thing is, although drawing indicates that the transformations are correct, the mapped coordinates (printed in the window title and console) indicate that remapping them back to the image coordinates is screwed up when the -b flag is present.
So my question is: What am I doing wrong with remapping the mouse coordinates back to the image dimensions when my ScalingWidget is embedded in a layout?
I don't expect the remapping to be pixel perfect, but just as accurate as the end user can position the mouse. There are two points x=20, y=20 and at x=380 and y=380 these can be used as reference point.
Any help is most welcome!
#!/usr/bin/env python
from PyQt4 import QtGui
from PyQt4 import QtCore
import sys
import argparse
class ScalingWidget (QtGui.QWidget):
''' Displays a pixmap optimally in the center of the widget, in such way
the pixmap is shown in the middle
'''
white = QtGui.QColor(255,255,255)
black = QtGui.QColor( 0, 0, 0)
arcrect = QtCore.QRect(-10, -10, 20, 20)
def __init__(self):
super(ScalingWidget, self).__init__()
self.pixmap = QtGui.QPixmap(400, 400)
painter = QtGui.QPainter(self.pixmap)
painter.fillRect(self.pixmap.rect(), self.white)
self.point1 = QtCore.QPoint(20, 20)
self.point2 = QtCore.QPoint(380, 380)
painter.setPen(self.black)
painter.drawRect(QtCore.QRect(self.point1, self.point2))
painter.end()
self.matrix = None
def sizeHint(self):
return QtCore.QSize(500,400)
##
# Applies the default transformations
#
def _default_img_transform(self, painter):
#size of widget
winheight = float(self.height())
winwidth = float(self.width())
#size of pixmap
scrwidth = float(self.pixmap.width())
scrheight = float(self.pixmap.height())
assert(painter.transform().isIdentity())
if scrheight <= 0 or scrwidth <= 0:
raise RuntimeError(repr(self) + "Unable to determine Screensize")
widthr = winwidth / scrwidth
heightr = winheight / scrheight
if widthr > heightr:
translate = (winwidth - heightr * scrwidth) /2
painter.translate(translate, 0)
painter.scale(heightr, heightr)
else:
translate = (winheight - widthr * scrheight) / 2
painter.translate(0, translate)
painter.scale(widthr, widthr)
# now store the matrix used to map the mouse coordinates back to the
# coordinates of the pixmap
self.matrix = painter.deviceTransform()
def paintEvent(self, e):
painter = QtGui.QPainter(self)
painter.setClipRegion(e.region())
# fill the background of the entire widget.
painter.fillRect(self.rect(), QtGui.QColor(0,0,0))
# transform to place the image nicely in the center of the widget.
self._default_img_transform(painter)
painter.drawPixmap(self.pixmap.rect(), self.pixmap, self.pixmap.rect())
pen = QtGui.QPen(QtGui.QColor(255,0,0))
# Just draw on the points used to make the black rectangle of the pix map
# drawing is not affected, be remapping those coordinates with the "same"
# matrix is.
pen.setWidth(4)
painter.setPen(pen)
painter.save()
painter.translate(self.point1)
painter.drawPoint(0,0)
painter.restore()
painter.save()
painter.translate(self.point2)
painter.drawPoint(0,0)
painter.restore()
painter.end()
def mouseReleaseEvent(self, event):
x, y = float(event.x()), float(event.y())
inverted, invsucces = self.matrix.inverted()
assert(invsucces)
xmapped, ymapped = inverted.map(x,y)
print x, y
print xmapped, ymapped
self.setWindowTitle("mouse x,y = {}, {}, mapped x, y = {},{} "
.format(x, y, xmapped, ymapped)
)
def start_bug():
''' Displays the mouse press mapping bug.
This is a bit contrived, but in the real world
a widget is embedded in deeper in a gui
than a single widget, besides the problem
grows with the depth of embedding.
'''
app = QtGui.QApplication(sys.argv)
win = QtGui.QWidget()
layout = QtGui.QVBoxLayout()
win.setLayout(layout)
widget = None
for i in range(0, args.increase_bug):
if i < args.increase_bug-1:
widget = QtGui.QWidget()
layout.addWidget(widget)
layout= QtGui.QVBoxLayout()
widget.setLayout(layout)
else:
layout.addWidget(ScalingWidget())
win.show()
sys.exit(app.exec_())
def start_no_bug():
''' Does not show the mapping bug, the mouse event.x() and .y() map nicely back to
the coordinate system of the pixmap
'''
app = QtGui.QApplication(sys.argv)
win = ScalingWidget()
win.show()
sys.exit(app.exec_())
# parsing arguments
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-b', '--display-bug', action='store_true',
help="Toggle this option to get the bugged version"
)
parser.add_argument('-n', '--increase-bug', type=int, default=1,
help="Increase the bug by n times."
)
if __name__ == "__main__":
args = parser.parse_args()
if args.display_bug:
start_bug()
else:
start_no_bug()
The basic idea of the _default_image_transform is correct. The error is in the end of the function.
def _default_img_transform(self, painter):
#size of widget
winheight = float(self.height())
winwidth = float(self.width())
#size of pixmap
scrwidth = float(self.pixmap.width())
scrheight = float(self.pixmap.height())
assert(painter.transform().isIdentity())
if scrheight <= 0 or scrwidth <= 0:
raise RuntimeError(repr(self) + "Unable to determine Screensize")
widthr = winwidth / scrwidth
heightr = winheight / scrheight
if widthr > heightr:
translate = (winwidth - heightr * scrwidth) /2
painter.translate(translate, 0)
painter.scale(heightr, heightr)
else:
translate = (winheight - widthr * scrheight) / 2
painter.translate(0, translate)
painter.scale(widthr, widthr)
# now store the matrix used to map the mouse coordinates back to the
# coordinates of the pixmap
self.matrix = painter.deviceTransform() ## <-- error is here
The last line of the function _default_image_transform should be:
self.matrix = painter.transform()
According to the documentation one should only call the QPainter.deviceTransform() when you are working with QT::HANDLE which is an platform dependent handle. Since I wasn't working with a platform dependent handle I shouldn't have called it. It works out when I show the widget, but not when it is embedded in a layout. Then the deviceTransform matrix is different from the normal QPainter.transform() matrix. See also http://doc.qt.io/qt-4.8/qpainter.html#deviceTransform

Make certain regions of an image selectable

I have drawn a grid on a pixmap by painting evenly spaced horizontal and vertical lines, and I am trying to make each of the rectangular grid pieces selectable.
In other words, if a user clicks on a certain rectangle in the grid, then it would be stored as a separate pixmap. I have tried using the QRubberBand.
But I can't figure out how to restrict the selection to the specific piece that was selected. Is there a way to do this using PyQt?
Here is my code for drawing the grid onto the pixmap:
class imageSelector(QtGui.QWidget):
def __init__(self):
super(imageSelector,self).__init__()
self.initIS()
def initIS(self):
self.pixmap = self.createPixmap()
painter = QtGui.QPainter(self.pixmap)
pen = QtGui.QPen(QtCore.Qt.white, 0, QtCore.Qt.SolidLine)
painter.setPen(pen)
width = self.pixmap.width()
height = self.pixmap.height()
numLines = 6
numHorizontal = width//numLines
numVertical = height//numLines
painter.drawRect(0,0,height,width)
for x in range(numLines):
newH = x * numHorizontal
newV = x * numVertical
painter.drawLine(0+newH,0,0+newH,width)
painter.drawLine(0,0+newV,height,0+newV)
label = QtGui.QLabel()
label.setPixmap(self.pixmap)
label.resize(label.sizeHint())
hbox = QtGui.QHBoxLayout()
hbox.addWidget(label)
self.setLayout(hbox)
self.show()
def createPixmap(self):
pixmap = QtGui.QPixmap("CT1.png").scaledToHeight(500)
return pixmap
def main():
app = QtGui.QApplication(sys.argv)
Im = imageSelector()
sys.exit(app.exec_())
if __name__== '__main__':
main()
Extend your QWidget derived class to override mousePressEvent and then according to the actual mouse position find the tile and store the part of the pixmap that you want to store. Just add the following method to your class and fill in the specific code for your pixmap cutting out and storage.
def mousePressEvent(event):
"""
User has clicked inside the widget
"""
# get mouse position
x = event.x()
y = event.y()
# find coordinates of grid rectangle in your grid
# copy and store this grid rectangle
You could even display a rectangular rubber band that jumps from rectangle to rectangle if you want that. For this override mouseMoveEvent.
def mouseMoveEvent(event):
"""
Mouse is moved inside the widget
"""
# get mouse position
x = event.x()
y = event.y()
# find coordinates of grid rectangle in your grid
# move rectangular rubber band to this grid rectangle

Categories

Resources