i have try to do this from thread to an imagebox of gtk . The frames are showing successfull for some times but after 5 seconds the gui freezes and the image appears to be blank , but the thread is still running
import gi
import threading
import cv2
import time
gi.require_version("Gtk" , "3.0")
from gi.repository import Gtk , GLib , GObject , GdkPixbuf
run = True
cam = cv2.VideoCapture(0)
ret , frame = cam.read()
frame = cv2.cvtColor(frame , cv2.COLOR_BGR2RGB)
cam.release()
class MyWindow(Gtk.Window):
def __init__(self):
global cam, frame , run
Gtk.Window.__init__(self,title="Hello")
self.set_border_width(10)
image = Gtk.Image()
h, w, d = frame.shape
pixbuf = GdkPixbuf.Pixbuf.new_from_data(frame.tostring(),GdkPixbuf.Colorspace.RGB, False, 8, w, h, w*d)
image.set_from_pixbuf (pixbuf.copy())
self.add(image)
def thread_running_example():
cam = cv2.VideoCapture(0)
while run:
ret , frame = cam.read()
frame = cv2.cvtColor(frame , cv2.COLOR_BGR2RGB)
h, w, d = frame.shape
pixbuf = GdkPixbuf.Pixbuf.new_from_data(frame.tostring(), GdkPixbuf.Colorspace.RGB, False, 8, w, h, w*d)
image.set_from_pixbuf (pixbuf.copy())
thread = threading.Thread( target = thread_running_example )
thread.daemon=True
thread.start()
win = MyWindow()
win.connect("destroy" , Gtk.main_quit )
win.show_all()
Gtk.main()
Related
import cv2
import numpy as np
from PIL import ImageGrab
from time import time
from TKinterMess import Display
import tkinter as tk
import multiprocessing
e = multiprocessing.Event()
p = None
#kassadin portrait image
needle_list = []
kassadin_img = cv2.imread('kassadin.jpg', cv2.IMREAD_UNCHANGED)
darius_img = cv2.imread('darius.JPG', cv2.IMREAD_UNCHANGED)
#loop_time = time()
needle_list.append(kassadin_img)
needle_list.append(darius_img)
threshold = 0.5
def mainloop():
root = tk.Tk()
def start_text_display(text):
global p
p = multiprocessing.Process(target=Display, args=(text, e))
p.start()
def namestr(obj, namespace):
return [name for name in namespace if namespace[name] is obj]
def Display(text):
#root = tk.Tk()
w = tk.Label(root, text=text, font="Arial 100", background='gray')
w.master.overrideredirect(True)
w.master.geometry("+700+0")
w.master.lift()
w.master.wm_attributes("-topmost", True)
w.master.wm_attributes("-disabled", True)
w.master.wm_attributes("-alpha", 0.5)
w.master.wm_attributes('-transparentcolor', 'white')
w.after(1750 , w.master.destroy)
w.pack()
root.mainloop()
def record_screen():
while True:
screenshot = ImageGrab.grab(bbox=(1645,800,1920,1080))
screenshot = np.array(screenshot)
screenshot = cv2.cvtColor(screenshot, cv2.COLOR_RGB2BGR)
for needle in needle_list:
x = namestr(needle, globals())
results = cv2.matchTemplate(screenshot, needle, cv2.TM_CCOEFF_NORMED)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(results)
top_left = max_loc
bottom_right = (top_left[0] + needle.shape[1], top_left[1] + needle.shape[0])
if max_val >= threshold:
if str(x) == "['darius_img']":
cv2.rectangle(screenshot, top_left, bottom_right, color=(0, 0, 255), thickness=2, lineType=cv2.LINE_4)
Display('Darius')
elif str(x) =="['kassadin_img']":
cv2.rectangle(screenshot, top_left, bottom_right, color=(255, 0, 0), thickness=2, lineType=cv2.LINE_4)
p2 = multiprocessing.Process(target=Display, args=('Kassadin',))
p2.start()
p2.join()
cv2.imshow(None, screenshot)
#print('FPS {}'.format(1 / (time() - loop_time)))
#loop_time = time()
if cv2.waitKey(1) == ord('q'):
cv2.destroyAllWindows()
if __name__ == '__main__':
p1 = multiprocessing.Process(target=record_screen)
p2 = multiprocessing.Process(target=Display, args=('Kassadin',))
p2.start()
p1.start()
p1.join()
p2.join()
In the main function when I run Display as a process then it works no problem, the text shows up on screen and image capture is active in the bottom right.
However when I want to call Display and run it as a process when it finds a match in record_screen then it doesn't work and still freezes the display window.
Any help would be appreciated.
Just to clarify my goal. I am trying to get a resizable (via mouse click and drag) window that is see through, and allows you to change the transparency of it by up and down arrows.
I want it to ask for an image, and then rescale it as you drag the window.
The key feature which I have working is to allow users to click through it.
The issue that I am running into is I can't place an image inside the frame, nor move or resize it.
from win32api import GetSystemMetrics
import win32con
import win32gui
import sys
from PIL import Image
import numpy as np
import wx
def scale_bitmap(bitmap, width, height):
image = wx.ImageFromBitmap(bitmap) #was wx.imageFromBitmap(bitmap)
image = image.Scale(width, height, wx.IMAGE_QUALITY_HIGH)
result = wx.BitmapFromImage(image)
return result
imageName = input("Enter name of image file")
im1 = Image.open("C:\\Users\\Daniel\\Desktop\\Tracing Images" + "\\" + imageName )
#r, g, b, = im1.split()
#im1 = Image.merge("RGB", (r, g, b))
im1.save("C:\\Users\\Daniel\\Desktop\\Tracing Images\\converted\\" +str("1") + ".bmp")
Imgbmp = Image.open("C:\\Users\\Daniel\\Desktop\\Tracing Images\\converted\\" +str("1") + ".bmp")
#convert image into bitmap?
app = wx.App()
trans = 50
# create a window/frame, no parent, -1 is default ID
# change the size of the frame to fit the backgound images
frame1 = wx.Frame(None, -1, "KExA", style=wx.RESIZE_BORDER | wx.STAY_ON_TOP)
# create the class instance
frame1.Show() #was frame1.ShowFullScreen(True)
image_file = win32gui.SystemParametersInfo(win32con.SPI_GETDESKWALLPAPER, 0, 0)
bmp1 = image_file
bmp1 = wx.Image(image_file, wx.BITMAP_TYPE_ANY).ConvertToBitmap() #trying to remove this see if it fixes anything
bmp1 = scale_bitmap(bmp1, GetSystemMetrics(1) * 1.5, GetSystemMetrics(1))
bitmap1 = wx.StaticBitmap(frame1, -1, bmp1, (-100, 0))
hwnd = frame1.GetHandle()
extendedStyleSettings = win32gui.GetWindowLong(hwnd, win32con.GWL_EXSTYLE)
win32gui.SetWindowLong(hwnd, win32con.GWL_EXSTYLE,
extendedStyleSettings | win32con.WS_EX_LAYERED | win32con.WS_EX_TRANSPARENT)
win32gui.SetLayeredWindowAttributes(hwnd, 0, 255, win32con.LWA_ALPHA)
frame1.SetTransparent(trans)
def onKeyDown(e):
global trans
key = e.GetKeyCode()
if key == wx.WXK_UP:
print()
trans
trans += 10
if trans > 255:
trans = 255
elif key == wx.WXK_DOWN:
print()
trans
trans -= 10
if trans < 0:
trans = 0
try:
win32gui.SetLayeredWindowAttributes(hwnd, 0, trans, win32con.LWA_ALPHA)
except:
pass
frame1.Bind(wx.EVT_KEY_DOWN, onKeyDown)
app.MainLoop()
If you use wx.Image to manage your image, you can access the Scale function, which will allow you to tie the size of the image to size of the window, or anything else for that matter.
For example:
import wx
class TestFrame(wx.Frame):
def __init__(self, *args):
wx.Frame.__init__(self, *args)
self.Img = wx.Image("wxPython.jpg", wx.BITMAP_TYPE_ANY)
Imgsize = self.Img.GetWidth(), self.Img.GetHeight()
self.SetSize(Imgsize)
self.Image = wx.StaticBitmap(self, bitmap=wx.Bitmap(self.Img))
self.Bind(wx.EVT_SIZE, self.onResize)
self.Show()
def onResize(self, event):
frame_h, frame_w = self.GetSize()
img = self.Img.Scale(frame_h, frame_w)
self.Image = wx.StaticBitmap(self, bitmap=wx.Bitmap(img))
if __name__ == "__main__":
app = wx.App()
myframe = TestFrame(None, -1, "Resize Me")
app.MainLoop()
I'm trying to run a YOLOv4 model with webcam on GUI but i always get a "not responding window". It shows the first frame of webcam and it freezes.
This is my Thread class:
class VideoThread(QThread):
change_pixmap_signal = pyqtSignal(np.ndarray)
def __init__(self):
...
def run(self):
self.cap = cv2.VideoCapture(0)
while self._run_flag:
ret, img = self.cap.read()
if ret:
self.change_pixmap_signal.emit(img)
self.cap.release()
def stop(self):
...
It can detect the frame successfully but last 2 line on the below code make the GUI freezed. This is my main class:
class App(QWidget):
def __init__(self):
...
self.image_label = QLabel(self)
...
self.thread = VideoThread()
self.thread.change_pixmap_signal.connect(self.detection)
self.thread.start()
def closeEvent(self, event):
...
#pyqtSlot(np.ndarray)
def detection(self, img):
original_image = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
image_data = cv2.resize(original_image, (input_size, input_size))
image_data = image_data / 255.
images_data = []
#for i in range(1):
images_data.append(image_data)
images_data = np.asarray(images_data).astype(np.float32)
batch_data = tf.constant(images_data)
pred_bbox = infer(batch_data)
for key, value in pred_bbox.items():
boxes = value[:, :, 0:4]
pred_conf = value[:, :, 4:]
# Perform NMS after YOLOv4 inference
boxes, scores, classes, valid_detections = tf.image.combined_non_max_suppression(
boxes=tf.reshape(boxes, (tf.shape(boxes)[0], -1, 1, 4)),
scores=tf.reshape(
pred_conf, (tf.shape(pred_conf)[0], -1, tf.shape(pred_conf)[-1])),
max_output_size_per_class=50,
max_total_size=50,
iou_threshold=iou,
score_threshold=score
)
pred_bbox = [boxes.numpy(), scores.numpy(), classes.numpy(), valid_detections.numpy()]
image = utils.draw_bbox(original_image, pred_bbox)
# image = utils.draw_bbox(image_data*255, pred_bbox)
image = Image.fromarray(image.astype(np.uint8))
image = cv2.cvtColor(np.array(image), cv2.COLOR_BGR2RGB)
qformat=QImage.Format_RGB888
outImage=QImage(image,image.shape[1],image.shape[0],image.strides[0],qformat)
#BGR>>RGB
outImage=outImage.rgbSwapped()
self.image_label.setPixmap(QPixmap.fromImage(outImage))
self.image_label.setScaledContents(True)
What should i do for this freezing GUI?
I am trying to use the LK Optical Flow from this tutorial, to get some motion estimation into a robot simulation made with ROS+Gazebo.
I could manage to make properly the bridge between ROS and OpenCV via cv_bridge, as per the code below, and could implement some sample features which work "frame-by-frame" without major issues.
However, the optical flow tutorial reference seems to accept only video inputs, such as webcam and/or saved video files, and this is where I got stuck.
How could I apply the LK Optical flow in a "frame-by-frame" approach, or configure my cv_bridge to act as a "custom" camera device?
This is my cv_brige so far:
#!/usr/bin/env python
import roslib
import rospy
import sys
import cv2
import numpy as np
from sensor_msgs.msg import Image, CameraInfo
from cv_bridge import CvBridge, CvBridgeError
class OpticalFlow(object):
def __init__(self):
#Setting up the bridge and the subscriber
self.bridge = CvBridge()
self.image_sub = rospy.Subscriber("/realsense/color/image_raw", Image,self.image_callback)
# Parameters for Good Features Detection
self.feature_params = dict( maxCorners = 100,
qualityLevel = 0.3,
minDistance = 7,
blockSize = 7 )
# Parameters for Lucas-Kanade optical flow
self.lk_params = dict( winSize = (150,150),
maxLevel = 2,
criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
# Create some random colors for the tracking points
self.color = np.random.randint(0,255,(100,3))
def image_callback(self,ros_image):
# Using cv_bridge() to convert the ROS image to OpenCV format
try:
cv_image = self.bridge.imgmsg_to_cv2(ros_image, "bgr8")#bgr8
hsv_image = cv2.cvtColor(cv_image, cv2.COLOR_BGR2HSV)
gray = cv2.cvtColor(cv_image, cv2.COLOR_BGR2GRAY)
except CvBridgeError as e:
print(e)
cv2.imshow("Robot Image", cv_image) # The actual (non-processed image from the simulation/robot)
#cv2.imshow('Image HSV', hsv_image)
#cv2.imshow('Image Gray', gray)
frame = np.array(cv_image, dtype=np.uint8)
# Calling the Optical Flow Function
display = self.optical_flow(frame,self.feature_params,self.lk_params,self.color)
def optical_flow(self,frame,feature_params,lk_params,color):
old_frame = frame
old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)
p0 = cv2.goodFeaturesToTrack(old_gray, mask = None, **feature_params)
mask = np.zeros_like(old_frame)
while(1):
newframe = frame
frame_gray = cv2.cvtColor(newframe, cv2.COLOR_BGR2GRAY)
# Calculating the optical flow
p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None, **lk_params)
# Select good points
good_new = p1[st==1]
good_old = p0[st==1]
# Draw the tracks
for i,(new,old) in enumerate(zip(good_new, good_old)):
a,b = new.ravel()
c,d = old.ravel()
mask = cv2.line(mask, (a,b),(c,d), color[i].tolist(), 2)
frame = cv2.circle(frame,(a,b),5,color[i].tolist(),-1)
img = cv2.add(frame,mask)
cv2.imshow('LK_Flow',img)
k = cv2.waitKey(30) & 0xff
if k == 27:
break
# Now update the previous frame and previous points
old_gray = frame_gray.copy()
p0 = good_new.reshape(-1,1,2)
def main():
optical_flow_object = OpticalFlow()
rospy.init_node('KLT_Node', anonymous=True)
rospy.loginfo("\nWaiting for image topics...\n...")
try:
rospy.spin()
except KeyboardInterrupt:
rospy.loginfo("\nShutting Down...\n...")
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
I also would like to know, if possible, where I can find more information about how to manipulate those parameters (from the tutorial):
# params for ShiTomasi corner detection
feature_params = dict( maxCorners = 100,
qualityLevel = 0.3,
minDistance = 7,
blockSize = 7 )
# Parameters for lucas kanade optical flow
lk_params = dict( winSize = (15,15),
maxLevel = 2,
criteria = (cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 10, 0.03))
I would recommend using cv2.calcOpticalFlowPyrLK(old_frame, cur_frame, ...) or cv2.calcOpticalFlowFarneback(old_frame, cur_frame, ...) (dense optical flow). There is a bunch of information about these methods on the cv2 website. From personal experience, these methods work great!
Let me know if you have any questions or problems!
I'm using two threads Camera and NetworkProcessor in Qt application, which displayes their actual state in QLabel (there is one label for each thread: camStatusLabel, nnStatusLabel). To update text in label I am using signals and slots. There is also 6 labels to display images, which are updated in while loop periodically (cameraView, k1, k2, k3, k4, k5) with signals and slots as well.
import cv2
import sys
from PySide2.QtCore import *
from PySide2.QtWidgets import *
from PySide2.QtGui import *
import threading as th
from PIL import Image
import copy
class SecondPage(QWidget):
def __init__(self):
QWidget.__init__(self)
self.bigSize = QSize(640, 480)
self.smallSize = QSize(320, 240)
blackPic = QPixmap(self.bigSize)
blackPic.fill(Qt.black)
self.layoutTop = QVBoxLayout()
self.layoutTop.setAlignment(Qt.AlignCenter)
self.topRowLayout = QHBoxLayout()
self.buttonBack = QPushButton("Back")
self.camStatusLabel = QLabel("Camera status: not running...")
self.nnStatusLabel = QLabel("Neural net status: not running...")
self.topRowLayout.addWidget(self.buttonBack, alignment=Qt.AlignRight)
self.topRowLayout.addWidget(self.camStatusLabel, alignment=Qt.AlignLeft)
self.topRowLayout.addWidget(self.nnStatusLabel, alignment=Qt.AlignLeft)
self.cameraView = QLabel()
self.cameraView.setFixedSize(self.bigSize)
self.cameraView.setPixmap(blackPic)
self.knnLayout = QHBoxLayout()
self.layoutTop.addLayout(self.topRowLayout, alignment=Qt.AlignLeft)
self.layoutTop.addWidget(self.cameraView, alignment=Qt.AlignCenter)
self.layoutTop.addLayout(self.knnLayout, alignment=Qt.AlignCenter)
self.k1 = QLabel(alignment=Qt.AlignCenter)
self.k1.setFixedSize(self.smallSize)
self.k1.setPixmap(blackPic)
self.k2 = QLabel(alignment=Qt.AlignCenter)
self.k2.setFixedSize(self.smallSize)
self.k2.setPixmap(blackPic)
self.k3 = QLabel(alignment=Qt.AlignCenter)
self.k3.setFixedSize(self.smallSize)
self.k3.setPixmap(blackPic)
self.k4 = QLabel(alignment=Qt.AlignCenter)
self.k4.setFixedSize(self.smallSize)
self.k4.setPixmap(blackPic)
self.k5 = QLabel(alignment=Qt.AlignCenter)
self.k5.setFixedSize(self.smallSize)
self.k5.setPixmap(blackPic)
self.knnLayout.addWidget(self.k1)
self.knnLayout.addWidget(self.k2)
self.knnLayout.addWidget(self.k3)
self.knnLayout.addWidget(self.k4)
self.knnLayout.addWidget(self.k5)
self.setLayout(self.layoutTop)
self.frame = [None]
self.frameLock = QMutex()
startExecute = th.Event()
threadIsLoadedEvent = th.Event()
self.cameraThread = Camera()
self.connect(self.cameraThread, SIGNAL("updateCameraView(QImage)"), self.updateCameraView)
self.connect(self.cameraThread, SIGNAL("updateCameraStatus(QString)"), self.updateCameraStatus)
self.cameraThread.startCapturing(self.frame, self.frameLock, startExecute, threadIsLoadedEvent)
self.processingThread = NetworkProcessor()
self.connect(self.processingThread, SIGNAL("updateResults(QImage,QImage,QImage,QImage,QImage)"), self.updateResults)
self.connect(self.processingThread, SIGNAL("nnStatusLabel(QString)"), self.updateNNStatus)
self.processingThread.startAnalyzing(self.frame, self.frameLock, self.smallSize.toTuple(), startExecute, threadIsLoadedEvent)
startExecute.set()
def updateCameraStatus(self, text):
self.camStatusLabel.setText(text)
self.camStatusLabel.update()
def updateNNStatus(self, text):
self.nnStatusLabel.setText(text)
self.nnStatusLabel.update()
def updateCameraView(self, image):
self.cameraView.setPixmap(QPixmap.fromImage(image).scaled(self.bigSize))
self.cameraView.update()
def updateResults(self, p1, p2, p3, p4, p5):
self.k1.setPixmap(QPixmap.fromImage(p1))
self.k1.update()
self.k2.setPixmap(QPixmap.fromImage(p2))
self.k2.update()
self.k3.setPixmap(QPixmap.fromImage(p3))
self.k3.update()
self.k4.setPixmap(QPixmap.fromImage(p4))
self.k4.update()
self.k5.setPixmap(QPixmap.fromImage(p5))
self.k5.update()
def killPage(self):
self.cameraThread.stopThread()
self.processingThread.stopThread()
class Camera(QThread):
def __init__(self, parent = None):
QThread.__init__(self, parent)
self.exit = False
self.frame = None
self.frameLock = None
self.initSuccessEvent = None
self.startEvent = None
def stopThread(self):
self.exit = True
self.wait()
def startCapturing(self, frame, frameLock, startEvent, initSuccessEvent):
self.frame = frame
self.frameLock = frameLock
self.initSuccessEvent = initSuccessEvent
self.startEvent = startEvent
self.start()
def run(self):
self.startEvent.wait()
self.emit(SIGNAL("updateCameraStatus(QString)"), "Camera status: Opening...")
cap = cv2.VideoCapture(0)
cap.set(cv2.CAP_PROP_BUFFERSIZE, 1)
#initialize frame and signal success to main thead
self.frameLock.lock()
_, self.frame[0] = cap.read()
self.frameLock.unlock()
self.initSuccessEvent.set()
self.emit(SIGNAL("updateCameraStatus(QString)"), "Camera status: Running...")
#enter into loop
while True:
_, img = cap.read()
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
self.frameLock.lock()
self.frame[0] = copy.deepcopy(img)
self.frameLock.unlock()
img = QImage(img, img.shape[1], img.shape[0], img.strides[0], QImage.Format_RGB888)
self.emit(SIGNAL("updateCameraView(QImage)"), img)
if self.exit:
cap.release()
break
class NetworkProcessor(QThread):
def __init__(self, parent = None):
QThread.__init__(self, parent)
self.exit = False
self.frame = None
self.frameLock = None
self.imgSize = (100,100)
self.initSuccessEvent = None
self.startEvent = None
def stopThread(self):
self.exit = True
self.wait()
def startAnalyzing(self, frame, frameLock, imgSize, startEvent, initSuccessEvent):
self.frame = frame
self.frameLock = frameLock
self.imgSize = imgSize
self.initSuccessEvent = initSuccessEvent
self.startEvent = startEvent
self.start()
def run(self):
import pydevd;pydevd.settrace(suspend=False)
self.startEvent.wait()
self.emit(SIGNAL("nnStatusLabel(QString)"), "Neural net status: Waiting for camera...")
self.initSuccessEvent.wait()
self.emit(SIGNAL("nnStatusLabel(QString)"), "Neural net status: Loading nn and dataset...")
QThread.msleep(1000) #to simulate initialization
self.emit(SIGNAL("nnStatusLabel(QString)"), "Neural net status: Running...")
while True:
QThread.msleep(1000)
if self.exit:
break
self.frameLock.lock()
camImg = copy.deepcopy(self.frame[0])
self.frameLock.unlock()
products = self._processImg(Image.fromarray(camImg), 5)
data1 = products[0].tobytes('raw', 'RGB')
k1 = QImage(data1, products[0].size[0], products[0].size[1], QImage.Format_RGB888)
data2 = products[1].tobytes('raw', 'RGB')
k2 = QImage(data2, products[1].size[0], products[1].size[1], QImage.Format_RGB888)
data3 = products[2].tobytes('raw', 'RGB')
k3 = QImage(data3, products[2].size[0], products[2].size[1], QImage.Format_RGB888)
data4 = products[3].tobytes('raw', 'RGB')
k4 = QImage(data4, products[3].size[0], products[3].size[1], QImage.Format_RGB888)
data5 = products[4].tobytes('raw', 'RGB')
k5 = QImage(data5, products[4].size[0], products[4].size[1], QImage.Format_RGB888)
self.emit(SIGNAL("updateResults(QImage,QImage,QImage,QImage,QImage)"), k1, k2, k3, k4, k5)
if self.exit:
break
def _processImg(self, queryImg, k):
imgs = []
for i in range(k):
im = queryImg.rotate(20*i)
im = im.resize(self.imgSize)
imgs.append(im)
return imgs
class MainWindow(QMainWindow):
def __init__(self):
QMainWindow.__init__(self)
self.loadSecondPage()
def loadSecondPage(self):
widget = SecondPage()
self.setCentralWidget(widget)
self.setWindowTitle("Metriclearning demo - Visualisation")
def closeEvent(self, event):
print("Closing app")
self.centralWidget().killPage()
event.accept()
if __name__ == "__main__":
app = QApplication(sys.argv)
window = MainWindow()
window.show()
sys.exit(app.exec_())
The problem is that, when I am updating text labels containing thread state (camStatusLabel, nnStatusLabel), the content does not remain as I have set it, but randomly changes to previous texts and back, even though the text is not updated never again once the thread enters into while loop.
The same problem is observable on labels containing images.
Does anyone know what might cause this problem?
EDIT:
I eddited the code above to be executable. The problem is same as I have already described: The text in QLabel nnStatusLabel changes even though it is not supposed to and similar behavior is occasionally observable on displayed images as well - displayed images returns back in time (cameraView, k1, k2, k3, k4, k5).