GUI Not Responding Pyqt - python

I'm trying to run a YOLOv4 model with webcam on GUI but i always get a "not responding window". It shows the first frame of webcam and it freezes.
This is my Thread class:
class VideoThread(QThread):
change_pixmap_signal = pyqtSignal(np.ndarray)
def __init__(self):
...
def run(self):
self.cap = cv2.VideoCapture(0)
while self._run_flag:
ret, img = self.cap.read()
if ret:
self.change_pixmap_signal.emit(img)
self.cap.release()
def stop(self):
...
It can detect the frame successfully but last 2 line on the below code make the GUI freezed. This is my main class:
class App(QWidget):
def __init__(self):
...
self.image_label = QLabel(self)
...
self.thread = VideoThread()
self.thread.change_pixmap_signal.connect(self.detection)
self.thread.start()
def closeEvent(self, event):
...
#pyqtSlot(np.ndarray)
def detection(self, img):
original_image = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
image_data = cv2.resize(original_image, (input_size, input_size))
image_data = image_data / 255.
images_data = []
#for i in range(1):
images_data.append(image_data)
images_data = np.asarray(images_data).astype(np.float32)
batch_data = tf.constant(images_data)
pred_bbox = infer(batch_data)
for key, value in pred_bbox.items():
boxes = value[:, :, 0:4]
pred_conf = value[:, :, 4:]
# Perform NMS after YOLOv4 inference
boxes, scores, classes, valid_detections = tf.image.combined_non_max_suppression(
boxes=tf.reshape(boxes, (tf.shape(boxes)[0], -1, 1, 4)),
scores=tf.reshape(
pred_conf, (tf.shape(pred_conf)[0], -1, tf.shape(pred_conf)[-1])),
max_output_size_per_class=50,
max_total_size=50,
iou_threshold=iou,
score_threshold=score
)
pred_bbox = [boxes.numpy(), scores.numpy(), classes.numpy(), valid_detections.numpy()]
image = utils.draw_bbox(original_image, pred_bbox)
# image = utils.draw_bbox(image_data*255, pred_bbox)
image = Image.fromarray(image.astype(np.uint8))
image = cv2.cvtColor(np.array(image), cv2.COLOR_BGR2RGB)
qformat=QImage.Format_RGB888
outImage=QImage(image,image.shape[1],image.shape[0],image.strides[0],qformat)
#BGR>>RGB
outImage=outImage.rgbSwapped()
self.image_label.setPixmap(QPixmap.fromImage(outImage))
self.image_label.setScaledContents(True)
What should i do for this freezing GUI?

Related

I am trying to make a window/frame containing a picture that allows you to click through it

Just to clarify my goal. I am trying to get a resizable (via mouse click and drag) window that is see through, and allows you to change the transparency of it by up and down arrows.
I want it to ask for an image, and then rescale it as you drag the window.
The key feature which I have working is to allow users to click through it.
The issue that I am running into is I can't place an image inside the frame, nor move or resize it.
from win32api import GetSystemMetrics
import win32con
import win32gui
import sys
from PIL import Image
import numpy as np
import wx
def scale_bitmap(bitmap, width, height):
image = wx.ImageFromBitmap(bitmap) #was wx.imageFromBitmap(bitmap)
image = image.Scale(width, height, wx.IMAGE_QUALITY_HIGH)
result = wx.BitmapFromImage(image)
return result
imageName = input("Enter name of image file")
im1 = Image.open("C:\\Users\\Daniel\\Desktop\\Tracing Images" + "\\" + imageName )
#r, g, b, = im1.split()
#im1 = Image.merge("RGB", (r, g, b))
im1.save("C:\\Users\\Daniel\\Desktop\\Tracing Images\\converted\\" +str("1") + ".bmp")
Imgbmp = Image.open("C:\\Users\\Daniel\\Desktop\\Tracing Images\\converted\\" +str("1") + ".bmp")
#convert image into bitmap?
app = wx.App()
trans = 50
# create a window/frame, no parent, -1 is default ID
# change the size of the frame to fit the backgound images
frame1 = wx.Frame(None, -1, "KExA", style=wx.RESIZE_BORDER | wx.STAY_ON_TOP)
# create the class instance
frame1.Show() #was frame1.ShowFullScreen(True)
image_file = win32gui.SystemParametersInfo(win32con.SPI_GETDESKWALLPAPER, 0, 0)
bmp1 = image_file
bmp1 = wx.Image(image_file, wx.BITMAP_TYPE_ANY).ConvertToBitmap() #trying to remove this see if it fixes anything
bmp1 = scale_bitmap(bmp1, GetSystemMetrics(1) * 1.5, GetSystemMetrics(1))
bitmap1 = wx.StaticBitmap(frame1, -1, bmp1, (-100, 0))
hwnd = frame1.GetHandle()
extendedStyleSettings = win32gui.GetWindowLong(hwnd, win32con.GWL_EXSTYLE)
win32gui.SetWindowLong(hwnd, win32con.GWL_EXSTYLE,
extendedStyleSettings | win32con.WS_EX_LAYERED | win32con.WS_EX_TRANSPARENT)
win32gui.SetLayeredWindowAttributes(hwnd, 0, 255, win32con.LWA_ALPHA)
frame1.SetTransparent(trans)
def onKeyDown(e):
global trans
key = e.GetKeyCode()
if key == wx.WXK_UP:
print()
trans
trans += 10
if trans > 255:
trans = 255
elif key == wx.WXK_DOWN:
print()
trans
trans -= 10
if trans < 0:
trans = 0
try:
win32gui.SetLayeredWindowAttributes(hwnd, 0, trans, win32con.LWA_ALPHA)
except:
pass
frame1.Bind(wx.EVT_KEY_DOWN, onKeyDown)
app.MainLoop()
If you use wx.Image to manage your image, you can access the Scale function, which will allow you to tie the size of the image to size of the window, or anything else for that matter.
For example:
import wx
class TestFrame(wx.Frame):
def __init__(self, *args):
wx.Frame.__init__(self, *args)
self.Img = wx.Image("wxPython.jpg", wx.BITMAP_TYPE_ANY)
Imgsize = self.Img.GetWidth(), self.Img.GetHeight()
self.SetSize(Imgsize)
self.Image = wx.StaticBitmap(self, bitmap=wx.Bitmap(self.Img))
self.Bind(wx.EVT_SIZE, self.onResize)
self.Show()
def onResize(self, event):
frame_h, frame_w = self.GetSize()
img = self.Img.Scale(frame_h, frame_w)
self.Image = wx.StaticBitmap(self, bitmap=wx.Bitmap(img))
if __name__ == "__main__":
app = wx.App()
myframe = TestFrame(None, -1, "Resize Me")
app.MainLoop()

picamera.exc.PiCameraValueError: Incorrect buffer length for resolution 320x240

I have the code below for detecting cirtain blobs in an image. Before implementing the code with tkinter the camera code worked fine.
now that i combined it it puts ou the incorrect buffer error.
I tried implementing the rawCapture.truncate(0) function at the end but that resolves in there not being displayed any gui display.
Does anyone have any clue why that is?
Below is the code, i am a beginner in python so no doubt it is messy.
import cv2
import numpy as np
from tkinter import*
from PIL import Image, ImageTk
from picamera.array import PiRGBArray
from picamera import PiCamera
import time
# initialize the camera and grab a reference to the raw camera capture
camera = PiCamera()
camera.resolution = (320,240)
camera.brightness = 16
camera.framerate = 10
rawCapture = PiRGBArray(camera, size=(320,240))
camera.zoom=(0.495,0.435,0.073,0.073)
win = Tk()
win.geometry("670x600+200+30")
win.resizable(False, False)
w = 320
h = 240
color = "#581845"
frame_1 = Frame(win, width=670, height=700, bg=color).place(x=0, y=0)
var7 = IntVar()
var8 = IntVar()
W = 150
thresh = Scale(frame_1, label="thresh1", from_=0, to=255, orient=HORIZONTAL, variable=var7, activebackground='#339999')
thresh.set(0)
thresh.place(x=500, y=10, width=W)
thresh2 = Scale(frame_1, label="thresh2", from_=255, to=0, orient=HORIZONTAL, variable=var8, activebackground='#339999')
thresh2.set(255)
thresh2.place(x=500, y=80, width=W)
cap = cv2.VideoCapture(0)
label1 = Label(frame_1, width=w, height=h)
label1.place(x=10, y=160)
label2 = Label(frame_1, width=w, height=h)
label2.place(x=350, y=160)
label3 = Label(frame_1, width=w, height=h)
label3.place(x=10, y=370)
label4 = Label(frame_1, width=w, height=h)
label4.place(x=350, y=370)
#def select_img():
# _, img = cap.read()
for frame in camera.capture_continuous(rawCapture, format="bgr", use_video_port=False):
# grab the raw NumPy array representing the image, then initialize the timestamp
# and occupied/unoccupied text
image = frame.array
grayFrame = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(grayFrame, 50, 255, cv2.THRESH_BINARY)
# Read image
im = image
# Set up the detector with default parameters.
im=cv2.bitwise_not(im)
params = cv2.SimpleBlobDetector_Params()
params.minThreshold = 40
params.maxThreshold = 255
params.filterByArea = True
params.minArea = 80
params.filterByCircularity = False
params.filterByConvexity = False
params.filterByInertia = False
detector = cv2.SimpleBlobDetector_create(params)
# Detect blobs.
keypoints = detector.detect(im)
im=cv2.bitwise_not(im)
# Draw detected blobs as red circles.
# cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS ensures the size of the circle corresponds to the size of blob
im_with_keypoints_or = cv2.drawKeypoints(im, keypoints, np.array([]), (255,255,255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
im_with_keypoints_bi = cv2.drawKeypoints(thresh, keypoints, np.array([]), (0,0,255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
im_with_keypoints_gr = cv2.drawKeypoints(grayFrame, keypoints, np.array([]), (0,0,255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
file = open('color.txt', 'w')
file.write("l_b = " + str(50) + '\n' + "u_b = " + str(50))
file.close()
#res = cv2.bitwise_and(im, im, mask=mask)
#rgb2 = cv2.cvtColor(res, cv2.COLOR_BGR2RGB)
image = Image.fromarray(im_with_keypoints_or)
iago = ImageTk.PhotoImage(image)
label1.configure(image=iago)
label1.image = iago
image_2 = Image.fromarray(im_with_keypoints_bi)
iago_2 = ImageTk.PhotoImage(image_2)
label2.configure(image=iago_2)
label2.image = iago_2
image4 = Image.fromarray(im_with_keypoints_gr)
iago4 = ImageTk.PhotoImage(image4)
label4.configure(image=iago4)
label4.image = iago4
#win.after(10, select_img)
#rawCapture.truncate(0)
select_img()
win.mainloop()

How to use OpenCV video capture with custom video input?

I am trying to use the LK Optical Flow from this tutorial, to get some motion estimation into a robot simulation made with ROS+Gazebo.
I could manage to make properly the bridge between ROS and OpenCV via cv_bridge, as per the code below, and could implement some sample features which work "frame-by-frame" without major issues.
However, the optical flow tutorial reference seems to accept only video inputs, such as webcam and/or saved video files, and this is where I got stuck.
How could I apply the LK Optical flow in a "frame-by-frame" approach, or configure my cv_bridge to act as a "custom" camera device?
This is my cv_brige so far:
#!/usr/bin/env python
import roslib
import rospy
import sys
import cv2
import numpy as np
from sensor_msgs.msg import Image, CameraInfo
from cv_bridge import CvBridge, CvBridgeError
class OpticalFlow(object):
def __init__(self):
#Setting up the bridge and the subscriber
self.bridge = CvBridge()
self.image_sub = rospy.Subscriber("/realsense/color/image_raw", Image,self.image_callback)
# Parameters for Good Features Detection
self.feature_params = dict( maxCorners = 100,
qualityLevel = 0.3,
minDistance = 7,
blockSize = 7 )
# Parameters for Lucas-Kanade optical flow
self.lk_params = dict( winSize = (150,150),
maxLevel = 2,
criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
# Create some random colors for the tracking points
self.color = np.random.randint(0,255,(100,3))
def image_callback(self,ros_image):
# Using cv_bridge() to convert the ROS image to OpenCV format
try:
cv_image = self.bridge.imgmsg_to_cv2(ros_image, "bgr8")#bgr8
hsv_image = cv2.cvtColor(cv_image, cv2.COLOR_BGR2HSV)
gray = cv2.cvtColor(cv_image, cv2.COLOR_BGR2GRAY)
except CvBridgeError as e:
print(e)
cv2.imshow("Robot Image", cv_image) # The actual (non-processed image from the simulation/robot)
#cv2.imshow('Image HSV', hsv_image)
#cv2.imshow('Image Gray', gray)
frame = np.array(cv_image, dtype=np.uint8)
# Calling the Optical Flow Function
display = self.optical_flow(frame,self.feature_params,self.lk_params,self.color)
def optical_flow(self,frame,feature_params,lk_params,color):
old_frame = frame
old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)
p0 = cv2.goodFeaturesToTrack(old_gray, mask = None, **feature_params)
mask = np.zeros_like(old_frame)
while(1):
newframe = frame
frame_gray = cv2.cvtColor(newframe, cv2.COLOR_BGR2GRAY)
# Calculating the optical flow
p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None, **lk_params)
# Select good points
good_new = p1[st==1]
good_old = p0[st==1]
# Draw the tracks
for i,(new,old) in enumerate(zip(good_new, good_old)):
a,b = new.ravel()
c,d = old.ravel()
mask = cv2.line(mask, (a,b),(c,d), color[i].tolist(), 2)
frame = cv2.circle(frame,(a,b),5,color[i].tolist(),-1)
img = cv2.add(frame,mask)
cv2.imshow('LK_Flow',img)
k = cv2.waitKey(30) & 0xff
if k == 27:
break
# Now update the previous frame and previous points
old_gray = frame_gray.copy()
p0 = good_new.reshape(-1,1,2)
def main():
optical_flow_object = OpticalFlow()
rospy.init_node('KLT_Node', anonymous=True)
rospy.loginfo("\nWaiting for image topics...\n...")
try:
rospy.spin()
except KeyboardInterrupt:
rospy.loginfo("\nShutting Down...\n...")
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
I also would like to know, if possible, where I can find more information about how to manipulate those parameters (from the tutorial):
# params for ShiTomasi corner detection
feature_params = dict( maxCorners = 100,
qualityLevel = 0.3,
minDistance = 7,
blockSize = 7 )
# Parameters for lucas kanade optical flow
lk_params = dict( winSize = (15,15),
maxLevel = 2,
criteria = (cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 10, 0.03))
I would recommend using cv2.calcOpticalFlowPyrLK(old_frame, cur_frame, ...) or cv2.calcOpticalFlowFarneback(old_frame, cur_frame, ...) (dense optical flow). There is a bunch of information about these methods on the cv2 website. From personal experience, these methods work great!
Let me know if you have any questions or problems!

How do I take the output of the first processed image(e.g., Canny Filter) as input to another process filter?

How do I take the output of the first processed image(e.g., Canny Filter) as input to another process/filter(e.g., Sobel Filter) instead of applying any filter to the original image?
Before Applying any filter
After Applying Filter
Whenever I apply another process, it gets done again on the original image(not the output of the first process/filter).
Code:-
import sys
import numpy as np
from PyQt5 import QtCore, QtWidgets
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
import cv2
class Ui_MainWindow(QMainWindow):
global img
def __init__(self):
super().__init__()
self.init_Main_Ui()
self.init_Menu_Ui()
def init_Main_Ui(self):
self.setObjectName("test")
self.setEnabled(True)
self.resize(1200, 700)
self.setMinimumSize(QtCore.QSize(500, 300))
self.setMaximumSize(QtCore.QSize(500, 300))
self.image_label = QLabel(self)
self.setCentralWidget(self.image_label)
self.show()
def init_Menu_Ui(self):
global img
menu_bar = self.menuBar()
menu_bar.setNativeMenuBar(False)
file_menu = menu_bar.addMenu('&File') # &가 alt+F 메뉴 단축키 지정
Exit_action = QAction('Exit', self)
Exit_action.setShortcut('Ctrl+Q')
Exit_action.triggered.connect(qApp.quit)
Open_action = QAction('open', self)
Open_action.setShortcut('Ctrl+O')
Open_action.triggered.connect(self.read_file)
file_menu.addAction(Open_action)
file_menu.addAction(Exit_action)
self.filter_menu = menu_bar.addMenu("&Filter")
self.filter_menu.setEnabled(False)
Sobel_action = QAction('Sobel filter', self)
Sobel_action.setShortcut('Alt+1')
Sobel_action.triggered.connect(
lambda: self.Sobel_filter(img)
)
Prewitt_action = QAction('Prewitt filter', self)
Prewitt_action.setShortcut('Alt+2')
Prewitt_action.triggered.connect(
lambda : self.Prewitt_filter(img)
)
Gaussian_action = QAction('Gaussian filter', self)
Gaussian_action.setShortcut('Alt+3')
Gaussian_action.triggered.connect(
lambda : self.Gaussian_filter(img)
)
Canny_action = QAction('Canny filter', self)
Canny_action.setShortcut('Alt+4')
Canny_action.triggered.connect(
lambda : self.Canny_filter(img)
)
LoG_action = QAction('LoG filter', self)
LoG_action.setShortcut('Alt+5')
LoG_action.triggered.connect(
lambda : self.LoG_filter(img)
)
self.setWindowTitle('Image Processing')
self.filter_menu.addAction(Sobel_action)
self.filter_menu.addAction(Prewitt_action)
self.filter_menu.addAction(Gaussian_action)
def read_file(self):
global img
file_name = QFileDialog.getOpenFileName(self)
if file_name[0] is not '':
img0 = cv2.imread(file_name[0])
img = cv2.cvtColor(img0, cv2.COLOR_BGR2RGB)
self.reshow_image(img)
print('aa')
self.filter_menu.setEnabled(True)
else:
print('please put img')
def save_image(self):
print("save")
def Sobel_filter(self, img):
img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
sobelx = cv2.Sobel(img, cv2.CV_64F, 1, 0, ksize=3)
sobely = cv2.Sobel(img, cv2.CV_64F, 0, 1, ksize=3)
sobel = img.copy()
height = np.size(img, 0)
width = np.size(img, 1)
for i in range(width):
for j in range(height):
sobel[j, i] = np.minimum(255, np.round(np.sqrt(sobelx[j, i] * sobelx[j, i] + sobely[j, i] * sobely[j, i])))
sobel = cv2.cvtColor(sobel, cv2.COLOR_GRAY2RGB)
cv2.imwrite("Sobel Filtered Image.png", sobel)
self.reshow_image(sobel)
def Prewitt_filter(self, img):
img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
kernelx = np.array([[1, 1, 1], [0, 0, 0], [-1, -1, -1]])
kernely = np.array([[-1, 0, 1], [-1, 0, 1], [-1, 0, 1]])
img_prewittx = cv2.filter2D(img, -1, kernelx)
img_prewitty = cv2.filter2D(img, -1, kernely)
Prewitt = cv2.cvtColor(img_prewittx + img_prewitty, cv2.COLOR_GRAY2RGB)
self.reshow_image(Prewitt)
def Gaussian_filter(self, img):
img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
img_smooth = cv2.GaussianBlur(img, (3, 3), 0)
img_smooth = cv2.cvtColor(img_smooth, cv2.COLOR_GRAY2RGB)
self.reshow_image(img_smooth)
def reshow_image(self, cv_img):
if cv_img is not None:
self.image_label.resize(cv_img.shape[1], cv_img.shape[0])
Q_img = QImage(cv_img.data, cv_img.shape[1], cv_img.shape[0], cv_img.shape[1] * 3, QImage.Format_RGB888)
self.image_label.setPixmap(QPixmap.fromImage(Q_img))
else:
print("Image load failed")
def exit(self):
cv2.waitKey(0)
cv2.destroyAllWindows()
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv)
ui = Ui_MainWindow()
sys.exit(app.exec_())
Frankly I don't understand why you have problem.
Every filter should assign image to global/class variable ie. self.current_img
def Gaussian_filter(self, img):
# ... code ...
self.current_img = img_smooth
self.reshow_image(self.current_img)
def Sobel_filter(self, img):
# ... code ...
self.current_img = sobel
self.reshow_image(self.current_img)
def Prewitt_filter(self, img):
# ... code ...
self.current_img = Prewitt
self.reshow_image(self.current_img)
and every filter should use image from this variable self.current_img
Canny_action.triggered.connect(
lambda : self.Canny_filter(self.current_img)
)
Sobel_action.triggered.connect(
lambda: self.Sobel_filter(self.current_img)
)
Prewitt_action.triggered.connect(
lambda : self.Prewitt_filter(self.current_img)
)
Gaussian_action.triggered.connect(
lambda : self.Gaussian_filter(self.current_img)
)
Canny_action.triggered.connect(
lambda : self.Canny_filter(self.current_img)
)
LoG_action.triggered.connect(
lambda : self.LoG_filter(self.current_img)
)
And the same at start when you read it
if file_name[0] != '': # is not '':
img = cv2.imread(file_name[0])
self.current_img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
self.original_img = self.current_img.copy()
self.reshow_image(self.current_img)
I also used self.original_img to keep original image if I would like to start with original image.
EDIT
With class variable self.current_img you could even use it directly in methods - without sending as argument.
Canny_action.triggered.connect(self.Canny_filter)
Sobel_action.triggered.connect(self.Sobel_filter)
Prewitt_action.triggered.connect(self.Prewitt_filter)
Gaussian_action.triggered.connect(self.Gaussian_filter)
Canny_action.triggered.connect(self.Canny_filter)
LoG_action.triggered.connect(self.LoG_filter)
def Gaussian_filter(self):
img = self.current_img
# ... code ...
self.current_img = img_smooth
self.reshow_image(self.current_img)
def Sobel_filter(self):
img = self.current_img
# ... code ...
self.current_img = sobel
self.reshow_image(self.current_img)
def Prewitt_filter(self):
img = self.current_img
# ... code ...
self.current_img = Prewitt
self.reshow_image(self.current_img)
BTW:
def reset_to_original_image(self):
self.current_img = self.original_img.copy()
self.reshow_image(self.current_img)
BTW:
you could even create list with history of all images
def Sobel_filter(self):
self.history.append(self.current_img.copy())
img = self.current_img
# ... code ...
self.current_img = sobel
self.reshow_image(self.current_img)
EDIT:
Full working code.
Filters work on result from previous filter.
You have also function Reset to original image.
All images are remeber in history and you have function Undo to go back to previous image. You can even undo Reset.
import sys
import numpy as np
from PyQt5 import QtCore, QtWidgets
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
import cv2
class Ui_MainWindow(QMainWindow):
def __init__(self):
super().__init__()
self.init_Main_Ui()
self.init_Menu_Ui()
self.current_img = None # default value at start
self.original_img = None # default value at start
self.history = []
def init_Main_Ui(self):
self.setObjectName("test")
self.setEnabled(True)
self.resize(1200, 700)
self.setMinimumSize(QtCore.QSize(500, 300))
self.setMaximumSize(QtCore.QSize(500, 300))
self.image_label = QLabel(self)
self.setCentralWidget(self.image_label)
self.show()
def init_Menu_Ui(self):
menu_bar = self.menuBar()
menu_bar.setNativeMenuBar(False)
file_menu = menu_bar.addMenu('&File') # &가 alt+F 메뉴 단축키 지정
Exit_action = QAction('Exit', self)
Exit_action.setShortcut('Ctrl+Q')
Exit_action.triggered.connect(qApp.quit)
Open_action = QAction('open', self)
Open_action.setShortcut('Ctrl+O')
Open_action.triggered.connect(self.read_file)
file_menu.addAction(Open_action)
file_menu.addAction(Exit_action)
self.filter_menu = menu_bar.addMenu("&Filter")
self.filter_menu.setEnabled(False)
Reset_action = QAction('Reset to original image', self)
Reset_action.setShortcut('Alt+0')
Reset_action.triggered.connect(self.reset_to_original_image)
Sobel_action = QAction('Sobel filter', self)
Sobel_action.setShortcut('Alt+1')
Sobel_action.triggered.connect(self.Sobel_filter)
Prewitt_action = QAction('Prewitt filter', self)
Prewitt_action.setShortcut('Alt+2')
Prewitt_action.triggered.connect(self.Prewitt_filter)
Gaussian_action = QAction('Gaussian filter', self)
Gaussian_action.setShortcut('Alt+3')
Gaussian_action.triggered.connect(self.Gaussian_filter)
Canny_action = QAction('Canny filter', self)
Canny_action.setShortcut('Alt+4')
Canny_action.triggered.connect(self.Canny_filter) # filter has to exist
LoG_action = QAction('LoG filter', self)
LoG_action.setShortcut('Alt+5')
LoG_action.triggered.connect(self.LoG_filter) # filter has to exist
Undo_action = QAction('Undo filter', self)
Undo_action.setShortcut('Alt+X')
Undo_action.triggered.connect(self.Undo_filter) # filter has to exist
self.setWindowTitle('Image Processing')
self.filter_menu.addAction(Reset_action)
self.filter_menu.addAction(Sobel_action)
self.filter_menu.addAction(Prewitt_action)
self.filter_menu.addAction(Gaussian_action)
self.filter_menu.addAction(Undo_action)
def read_file(self):
file_name = QFileDialog.getOpenFileName(self)
if file_name[0] != '': # is not '':
img = cv2.imread(file_name[0])
self.original_img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
self.current_img = self.original_img.copy()
self.reshow_image(self.current_img)
self.filter_menu.setEnabled(True)
else:
print('please put img')
def save_image(self):
print("save")
def reset_to_original_image(self):
self.history.append(self.current_img.copy())
self.current_img = self.original_img.copy()
self.reshow_image(self.current_img)
def Sobel_filter(self):
self.history.append(self.current_img.copy())
img = self.current_img
cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
sobelx = cv2.Sobel(img, cv2.CV_64F, 1, 0, ksize=3)
sobely = cv2.Sobel(img, cv2.CV_64F, 0, 1, ksize=3)
sobel = img.copy()
height = np.size(img, 0)
width = np.size(img, 1)
for i in range(width):
for j in range(height):
sobel[j, i] = np.minimum(255, np.round(np.sqrt(sobelx[j, i] * sobelx[j, i] + sobely[j, i] * sobely[j, i])))
sobel = cv2.cvtColor(sobel, cv2.COLOR_GRAY2RGB)
cv2.imwrite("Sobel Filtered Image.png", sobel)
self.current_img = sobel
self.reshow_image(self.current_img)
def Prewitt_filter(self):
self.history.append(self.current_img.copy())
img = self.current_img
img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
kernelx = np.array([[1, 1, 1], [0, 0, 0], [-1, -1, -1]])
kernely = np.array([[-1, 0, 1], [-1, 0, 1], [-1, 0, 1]])
img_prewittx = cv2.filter2D(img, -1, kernelx)
img_prewitty = cv2.filter2D(img, -1, kernely)
Prewitt = cv2.cvtColor(img_prewittx + img_prewitty, cv2.COLOR_GRAY2RGB)
self.current_img = Prewitt
self.reshow_image(self.current_img)
def Gaussian_filter(self):
self.history.append(self.current_img.copy())
img = self.current_img
img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
img_smooth = cv2.GaussianBlur(img, (3, 3), 0)
img_smooth = cv2.cvtColor(img_smooth, cv2.COLOR_GRAY2RGB)
self.current_img = img_smooth
self.reshow_image(self.current_img)
def Canny_filter(self):
print("TODO: create Canny_filter")
#self.history.append(self.current_img.copy())
#img = self.current_img
# ... code ...
#self.current_img = img_smooth
#self.reshow_image(self.current_img)
def LoG_filter(self):
print("TODO: create LoG_filter")
#self.history.append(self.current_img.copy())
#img = self.current_img
# ... code ...
#self.current_img = img_smooth
#self.reshow_image(self.current_img)
def Undo_filter(self):
if self.history:
self.current_img = self.history.pop(-1)
self.reshow_image(self.current_img)
def reshow_image(self, cv_img):
if cv_img is not None:
self.image_label.resize(cv_img.shape[1], cv_img.shape[0])
Q_img = QImage(cv_img.data, cv_img.shape[1], cv_img.shape[0], cv_img.shape[1] * 3, QImage.Format_RGB888)
self.image_label.setPixmap(QPixmap.fromImage(Q_img))
else:
print("Image load failed")
def exit(self):
cv2.waitKey(0)
cv2.destroyAllWindows()
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv)
ui = Ui_MainWindow()
sys.exit(app.exec_())

How to fix 'After updating text in QLabel, text randomly changes between new and old value'

I'm using two threads Camera and NetworkProcessor in Qt application, which displayes their actual state in QLabel (there is one label for each thread: camStatusLabel, nnStatusLabel). To update text in label I am using signals and slots. There is also 6 labels to display images, which are updated in while loop periodically (cameraView, k1, k2, k3, k4, k5) with signals and slots as well.
import cv2
import sys
from PySide2.QtCore import *
from PySide2.QtWidgets import *
from PySide2.QtGui import *
import threading as th
from PIL import Image
import copy
class SecondPage(QWidget):
def __init__(self):
QWidget.__init__(self)
self.bigSize = QSize(640, 480)
self.smallSize = QSize(320, 240)
blackPic = QPixmap(self.bigSize)
blackPic.fill(Qt.black)
self.layoutTop = QVBoxLayout()
self.layoutTop.setAlignment(Qt.AlignCenter)
self.topRowLayout = QHBoxLayout()
self.buttonBack = QPushButton("Back")
self.camStatusLabel = QLabel("Camera status: not running...")
self.nnStatusLabel = QLabel("Neural net status: not running...")
self.topRowLayout.addWidget(self.buttonBack, alignment=Qt.AlignRight)
self.topRowLayout.addWidget(self.camStatusLabel, alignment=Qt.AlignLeft)
self.topRowLayout.addWidget(self.nnStatusLabel, alignment=Qt.AlignLeft)
self.cameraView = QLabel()
self.cameraView.setFixedSize(self.bigSize)
self.cameraView.setPixmap(blackPic)
self.knnLayout = QHBoxLayout()
self.layoutTop.addLayout(self.topRowLayout, alignment=Qt.AlignLeft)
self.layoutTop.addWidget(self.cameraView, alignment=Qt.AlignCenter)
self.layoutTop.addLayout(self.knnLayout, alignment=Qt.AlignCenter)
self.k1 = QLabel(alignment=Qt.AlignCenter)
self.k1.setFixedSize(self.smallSize)
self.k1.setPixmap(blackPic)
self.k2 = QLabel(alignment=Qt.AlignCenter)
self.k2.setFixedSize(self.smallSize)
self.k2.setPixmap(blackPic)
self.k3 = QLabel(alignment=Qt.AlignCenter)
self.k3.setFixedSize(self.smallSize)
self.k3.setPixmap(blackPic)
self.k4 = QLabel(alignment=Qt.AlignCenter)
self.k4.setFixedSize(self.smallSize)
self.k4.setPixmap(blackPic)
self.k5 = QLabel(alignment=Qt.AlignCenter)
self.k5.setFixedSize(self.smallSize)
self.k5.setPixmap(blackPic)
self.knnLayout.addWidget(self.k1)
self.knnLayout.addWidget(self.k2)
self.knnLayout.addWidget(self.k3)
self.knnLayout.addWidget(self.k4)
self.knnLayout.addWidget(self.k5)
self.setLayout(self.layoutTop)
self.frame = [None]
self.frameLock = QMutex()
startExecute = th.Event()
threadIsLoadedEvent = th.Event()
self.cameraThread = Camera()
self.connect(self.cameraThread, SIGNAL("updateCameraView(QImage)"), self.updateCameraView)
self.connect(self.cameraThread, SIGNAL("updateCameraStatus(QString)"), self.updateCameraStatus)
self.cameraThread.startCapturing(self.frame, self.frameLock, startExecute, threadIsLoadedEvent)
self.processingThread = NetworkProcessor()
self.connect(self.processingThread, SIGNAL("updateResults(QImage,QImage,QImage,QImage,QImage)"), self.updateResults)
self.connect(self.processingThread, SIGNAL("nnStatusLabel(QString)"), self.updateNNStatus)
self.processingThread.startAnalyzing(self.frame, self.frameLock, self.smallSize.toTuple(), startExecute, threadIsLoadedEvent)
startExecute.set()
def updateCameraStatus(self, text):
self.camStatusLabel.setText(text)
self.camStatusLabel.update()
def updateNNStatus(self, text):
self.nnStatusLabel.setText(text)
self.nnStatusLabel.update()
def updateCameraView(self, image):
self.cameraView.setPixmap(QPixmap.fromImage(image).scaled(self.bigSize))
self.cameraView.update()
def updateResults(self, p1, p2, p3, p4, p5):
self.k1.setPixmap(QPixmap.fromImage(p1))
self.k1.update()
self.k2.setPixmap(QPixmap.fromImage(p2))
self.k2.update()
self.k3.setPixmap(QPixmap.fromImage(p3))
self.k3.update()
self.k4.setPixmap(QPixmap.fromImage(p4))
self.k4.update()
self.k5.setPixmap(QPixmap.fromImage(p5))
self.k5.update()
def killPage(self):
self.cameraThread.stopThread()
self.processingThread.stopThread()
class Camera(QThread):
def __init__(self, parent = None):
QThread.__init__(self, parent)
self.exit = False
self.frame = None
self.frameLock = None
self.initSuccessEvent = None
self.startEvent = None
def stopThread(self):
self.exit = True
self.wait()
def startCapturing(self, frame, frameLock, startEvent, initSuccessEvent):
self.frame = frame
self.frameLock = frameLock
self.initSuccessEvent = initSuccessEvent
self.startEvent = startEvent
self.start()
def run(self):
self.startEvent.wait()
self.emit(SIGNAL("updateCameraStatus(QString)"), "Camera status: Opening...")
cap = cv2.VideoCapture(0)
cap.set(cv2.CAP_PROP_BUFFERSIZE, 1)
#initialize frame and signal success to main thead
self.frameLock.lock()
_, self.frame[0] = cap.read()
self.frameLock.unlock()
self.initSuccessEvent.set()
self.emit(SIGNAL("updateCameraStatus(QString)"), "Camera status: Running...")
#enter into loop
while True:
_, img = cap.read()
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
self.frameLock.lock()
self.frame[0] = copy.deepcopy(img)
self.frameLock.unlock()
img = QImage(img, img.shape[1], img.shape[0], img.strides[0], QImage.Format_RGB888)
self.emit(SIGNAL("updateCameraView(QImage)"), img)
if self.exit:
cap.release()
break
class NetworkProcessor(QThread):
def __init__(self, parent = None):
QThread.__init__(self, parent)
self.exit = False
self.frame = None
self.frameLock = None
self.imgSize = (100,100)
self.initSuccessEvent = None
self.startEvent = None
def stopThread(self):
self.exit = True
self.wait()
def startAnalyzing(self, frame, frameLock, imgSize, startEvent, initSuccessEvent):
self.frame = frame
self.frameLock = frameLock
self.imgSize = imgSize
self.initSuccessEvent = initSuccessEvent
self.startEvent = startEvent
self.start()
def run(self):
import pydevd;pydevd.settrace(suspend=False)
self.startEvent.wait()
self.emit(SIGNAL("nnStatusLabel(QString)"), "Neural net status: Waiting for camera...")
self.initSuccessEvent.wait()
self.emit(SIGNAL("nnStatusLabel(QString)"), "Neural net status: Loading nn and dataset...")
QThread.msleep(1000) #to simulate initialization
self.emit(SIGNAL("nnStatusLabel(QString)"), "Neural net status: Running...")
while True:
QThread.msleep(1000)
if self.exit:
break
self.frameLock.lock()
camImg = copy.deepcopy(self.frame[0])
self.frameLock.unlock()
products = self._processImg(Image.fromarray(camImg), 5)
data1 = products[0].tobytes('raw', 'RGB')
k1 = QImage(data1, products[0].size[0], products[0].size[1], QImage.Format_RGB888)
data2 = products[1].tobytes('raw', 'RGB')
k2 = QImage(data2, products[1].size[0], products[1].size[1], QImage.Format_RGB888)
data3 = products[2].tobytes('raw', 'RGB')
k3 = QImage(data3, products[2].size[0], products[2].size[1], QImage.Format_RGB888)
data4 = products[3].tobytes('raw', 'RGB')
k4 = QImage(data4, products[3].size[0], products[3].size[1], QImage.Format_RGB888)
data5 = products[4].tobytes('raw', 'RGB')
k5 = QImage(data5, products[4].size[0], products[4].size[1], QImage.Format_RGB888)
self.emit(SIGNAL("updateResults(QImage,QImage,QImage,QImage,QImage)"), k1, k2, k3, k4, k5)
if self.exit:
break
def _processImg(self, queryImg, k):
imgs = []
for i in range(k):
im = queryImg.rotate(20*i)
im = im.resize(self.imgSize)
imgs.append(im)
return imgs
class MainWindow(QMainWindow):
def __init__(self):
QMainWindow.__init__(self)
self.loadSecondPage()
def loadSecondPage(self):
widget = SecondPage()
self.setCentralWidget(widget)
self.setWindowTitle("Metriclearning demo - Visualisation")
def closeEvent(self, event):
print("Closing app")
self.centralWidget().killPage()
event.accept()
if __name__ == "__main__":
app = QApplication(sys.argv)
window = MainWindow()
window.show()
sys.exit(app.exec_())
The problem is that, when I am updating text labels containing thread state (camStatusLabel, nnStatusLabel), the content does not remain as I have set it, but randomly changes to previous texts and back, even though the text is not updated never again once the thread enters into while loop.
The same problem is observable on labels containing images.
Does anyone know what might cause this problem?
EDIT:
I eddited the code above to be executable. The problem is same as I have already described: The text in QLabel nnStatusLabel changes even though it is not supposed to and similar behavior is occasionally observable on displayed images as well - displayed images returns back in time (cameraView, k1, k2, k3, k4, k5).

Categories

Resources