I'm using python PyQt4.
Which I want to do:
(1) scale the Image from 1280x1024 to 860x480, and show to QImageWidget.
(2) draw some line on the Image by using mouse.
# -*- coding: utf-8 -*-
import ConfigParser
from PyQt4 import QtCore,QtGui
from PyQt4.QtGui import QImage
from PyQt4.QtGui import QApplication, QMainWindow, QPushButton
import sys , time , datetime
import cv2
import os
import numpy as np
video = cv2.VideoCapture(0)
video.set(3,1280)
video.set(4,1024)
class ImageWidget(QtGui.QWidget):
def __init__(self,parent=None):
super(ImageWidget,self).__init__(parent)
self.image=None
def setImage(self,image):
self.image=image
sz=image.size()
self.setMinimumSize(sz)
self.update()
def paintEvent(self,event):
qp=QtGui.QPainter()
qp.begin(self)
if self.image:
qp.drawImage(QtCore.QPoint(0,0),self.image)
qp.end()
class MainWindow(QtGui.QMainWindow):
def __init__(self,parent=None):
super(MainWindow,self).__init__(parent)
self.videoFrame=ImageWidget()
self.setCentralWidget(self.videoFrame)
screen = QtGui.QDesktopWidget().screenGeometry()
self.showFullScreen()
self.timer=QtCore.QTimer(self)
self.timer.timeout.connect(self.update)
self.timer.start(50)
def update(self):
ret, frame = video.read()
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
image = QtGui.QImage(frame, frame.shape[1], frame.shape[0],frame.strides[0], QtGui.QImage.Format_RGB888)
# here I want to scale to 860x480...
self.videoFrame.setImage(image)
# draw ... ?
def main():
app=QtGui.QApplication(sys.argv)
w=MainWindow()
w.show()
app.exec_()
if __name__=='__main__':
main()
Does anyone know where could I find there example/doc ?
here is the scaled doc.
# -*- coding: utf-8 -*-
from PyQt4 import QtCore,QtGui
from PyQt4.QtGui import QImage
from PyQt4.QtGui import QApplication, QMainWindow, QPushButton
import sys , time , datetime
import cv2
import numpy as np
video = cv2.VideoCapture(0)
video.set(3,1280)
video.set(4,1024)
class ImageWidget(QtGui.QWidget):
def __init__(self,parent=None):
super(ImageWidget,self).__init__(parent)
self.image=None
def setImage(self,image):
self.image=image
sz=image.size()
self.setMinimumSize(sz)
self.update()
def paintEvent(self,event):
qp=QtGui.QPainter()
qp.begin(self)
if self.image:
qp.drawImage(QtCore.QPoint(0,0),self.image)
qp.end()
# here get the mouseclick and move..
def mousePressEvent(self, event): # click
print event.pos()
def mouseMoveEvent(self, event): # move
print event.pos()
class MainWindow(QtGui.QMainWindow):
def __init__(self,parent=None):
super(MainWindow,self).__init__(parent)
self.fAction = QtGui.QAction(u"click me", self)
menubar = self.menuBar()
self.fileMenu = menubar.addAction(u"click me")
font = self.fileMenu.font()
font.setPointSize(30)
menubar.setFont(font)
self.videoFrame=ImageWidget()
self.setCentralWidget(self.videoFrame)
screen = QtGui.QDesktopWidget().screenGeometry()
self.showFullScreen()
self.timer=QtCore.QTimer(self)
self.timer.timeout.connect(self.update)
self.timer.start(50)
def update(self):
ret, frame = video.read()
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
image = QtGui.QImage(frame, frame.shape[1], frame.shape[0],frame.strides[0], QtGui.QImage.Format_RGB888)
image.scaledToWidth (640,QtCore.Qt.SmoothTransformation)
image.scaledToHeight(480,QtCore.Qt.SmoothTransformation)
# here is sclae my image...
result = image.scaled(640, 480, QtCore.Qt.KeepAspectRatioByExpanding, QtCore.Qt.SmoothTransformation)
self.videoFrame.setImage(result)
def main():
app=QtGui.QApplication(sys.argv)
w=MainWindow()
w.show()
app.exec_()
if __name__=='__main__':
main()
Related
I want to make Webcam gui so that when I click the cam on button the second window pops up and video will start.
When I click the return button the video should stop and go to mainwindow, but when i try, I can't see video, even if the flag is printed.
import threading
import cv2
import threading
import sys
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtTest import *
from PyQt5 import QtWidgets
from PyQt5 import QtGui
from PyQt5 import QtCore
running = False
class MyApp(QMainWindow, QWidget):
def __init__(self):
super().__init__()
self.second_UI = QDialog()
self.label = QtWidgets.QLabel()
self.first_UI()
self.show()
def first_UI(self):
btn_start = QPushButton("cam on", self)
btn_start.move(50, 50)
btn_start.clicked.connect(self.cam_start)
btn_start.clicked.connect(self.second_exc)
self.sb = self.statusBar()
self.sb.showMessage('Ready')
# Window setting
self.setWindowTitle('Select Servo Motor')
self.setGeometry(300, 300, 1024, 768)
self.show()
def cam_run(self):
global running
print("Flag1")
self.cap = cv2.VideoCapture(0)
# 'http://192.168.66.1:9527/videostream.cgi?loginuse=admin&loginpas=admin')
self.wid = 500
self.hei = 500
self.label.resize(self.wid, self.hei)
while running:
print("Flag2")
self.ret, self.img = self.cap.read()
if self.ret:
self.img = cv2.cvtColor(self.img, cv2.COLOR_BGR2RGB)
self.h, self.w, self.c = self.img.shape
self.qImg = QtGui.QImage(self.img.data, self.w, self.h, self.w*self.c,
QtGui.QImage.Format_RGB888)
self.pixmap = QtGui.QPixmap.fromImage(self.qImg)
self.label.setPixmap(self.pixmap)
print("Flag3")
self.cap.release()
print("Thread end.")
def cam_start(self): # , UI_param):
#UI_param = self.second_UI
global running
running = True
self.th = threading.Thread(target=self.cam_run)
self.th.start()
print("started...")
def second_exc(self):
self.close()
self.cam_run
return_btn = QPushButton('Return', self.second_UI)
return_btn.move(50, 700) # set position
# return_btn.resize(return_btn.sizeHint())
return_btn.clicked.connect(self.first_UI)
self.second_UI.setWindowTitle('Select User Mode')
self.second_UI.setGeometry(300, 300, 1024, 768)
self.second_UI.show()
if __name__ == '__main__':
app = QApplication(sys.argv)
ex_ = MyApp()
sys.exit(app.exec_())
Look at the following coding. It does what you are aiming for. I would recommend to create ui with qt designer and not fumble around with coding.
import cv2
import sys
from PyQt5 import uic
from PyQt5.QtWidgets import QMainWindow, QApplication
cap = cv2.VideoCapture(0)
class MainWindow(QMainWindow):
def __init__(self):
super(MainWindow, self).__init__()
uic.loadUi(r"main_window.ui", self)
self.btn_start_cam.clicked.connect(self.start_cam)
self.btn_return.clicked.connect(self.stop_cam)
def start_cam(self):
#Initialize video capture
#scaling factor
scaling_factor = 0.5
ret, frame = cap.read()
# Resize the frame
frame = cv2.resize(frame, None, fx=scaling_factor,
fy=scaling_factor, interpolation=cv2.INTER_AREA)
# Display the image
cv2.imshow('Webcam', frame)
def stop_cam(self):
# Release the video capture object
cap.release()
# Close all active windows
cv2.destroyAllWindows()
if __name__ == "__main__":
app = QApplication(sys.argv)
myapp = MainWindow()
myapp.show()
sys.exit(app.exec_())
i am working on an Opencv project and when trying to add the gui for it, i wanted to use PyQt because i am familiar with it.
in this code
import cv2
import sys
from PyQt5.QtWidgets import QWidget, QLabel, QApplication
from PyQt5.QtCore import QThread, Qt, pyqtSignal, pyqtSlot
from PyQt5.QtGui import QImage, QPixmap
class Thread(QThread):
changePixmap = pyqtSignal(QImage)
def run(self):
cap = cv2.VideoCapture("../Testing-Video.mp4")
while True:
ret, frame = cap.read()
if ret:
rgbImage = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
h, w, ch = rgbImage.shape
bytesPerLine = ch * w
convertToQtFormat = QImage(rgbImage.data, w, h, bytesPerLine,
QImage.Format_RGB888)
p = convertToQtFormat.scaled(640, 480, Qt.KeepAspectRatio)
self.changePixmap.emit(p)
class App(QWidget):
def __init__(self):
super().__init__()
self.left=0
self.top=0
self.width=600
self.height=800
self.initUI()
#pyqtSlot(QImage)
def setImage(self, image):
self.label.setPixmap(QPixmap.fromImage(image))
def initUI(self):
self.setWindowTitle("Testing PyQt")
self.setGeometry(self.left, self.top, self.width, self.height)
self.resize(800, 600)
# create a label
self.label = QLabel(self)
self.label.resize(640, 480)
th = Thread(self)
th.changePixmap.connect(self.setImage)
th.start()
self.show()
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = App()
ex.show()
sys.exit(app.exec_())
which is something i didn't write myself, it's slightly adjusted from here , anyways using the Camera video capture, it works perfectly but when using any other video media, the playback is extremely fast,
should i use a timer to call the frames constantly? better than using signal and slots?
This is the main code that loads a pyqt gui form and it has 2 button one is for
starting webcam and second one is for capturing photos from frame .
I write the first button but i can't write the capture button.
import sys
import cv2
import numpy as np
from PyQt5.QtCore import QTimer
from PyQt5.QtGui import QImage,QPixmap
from PyQt5.QtWidgets import QApplication , QDialog
from PyQt5.uic import loadUi
img_counter = 0
class video (QDialog):
def __init__(self):
super(video, self).__init__()
loadUi('video.ui',self)
self.image=None
self.startButton.clicked.connect(self.start_webcam)
self.capture.clicked.connect(self.keyPressEvent)
def start_webcam(self):
self.capture =cv2.VideoCapture(0)
self.capture.set(cv2.CAP_PROP_FRAME_HEIGHT,480)
self.capture.set(cv2.CAP_PROP_FRAME_WIDTH,640)
self.timer=QTimer(self)
self.timer.timeout.connect(self.update_frame)
self.timer.start(5)
def update_frame(self):
ret,self.image=self.capture.read()
self.image=cv2.flip(self.image,1)
self.displayImage(self.image,1)
def keyPressEvent(self):
flag, frame= self.capture.read()
path = 'J:\Face'
cv2.imwrite(os.path.join(path,'wakka.jpg'), frame)
def displayImage(self,img,window=1):
qformat=QImage.Format_Indexed8
if len(img.shape)==3 :
if img.shape[2]==4:
qformat=QImage.Format_RGBA8888
else:
qformat=QImage.Format_RGB888
outImage=QImage(img,img.shape[1],img.shape[0],img.strides[0],qformat)
outImage=outImage.rgbSwapped()
if window==1:
self.imgLabel.setPixmap(QPixmap.fromImage(outImage))
self.imgLabel.setScaledContents(True)
if __name__=='__main__':
app=QApplication(sys.argv)
window=video()
window.setWindowTitle('main code')
window.show()
sys.exit(app.exec_())
I want to capture photos from frames and save it in a folder.
The self.capture.clicked.connect(self.keyPressEvent) is for when we clicking on button.
I should write the function in keyPressEvent def
the capture.is for clicking a button
can someone help me through this?
Edit Note :
if flag:
QtWidgets.QApplication.beep(i)
img_name = "opencv_frame_{}.png".format()
cv2.imwrite(os.path.join(path,img_name), frame)
I want the condition for loop so that i can save the img_name format with counter but the counter must be number of clicking times
keyPressEvent is a method that allows you to capture the keys while the widget has the focus, and in your case it is not necessary, the solution is simple change its name, on the other hand I have improved your code.
import os
import cv2
import numpy as np
from PyQt5 import QtCore, QtGui, QtWidgets, uic
class video (QtWidgets.QDialog):
def __init__(self):
super(video, self).__init__()
uic.loadUi('video.ui',self)
self.startButton.clicked.connect(self.start_webcam)
self.capture.clicked.connect(self.capture_image)
self.imgLabel.setScaledContents(True)
self.capture = None
self.timer = QtCore.QTimer(self, interval=5)
self.timer.timeout.connect(self.update_frame)
self._image_counter = 0
#QtCore.pyqtSlot()
def start_webcam(self):
if self.capture is None:
self.capture =cv2.VideoCapture(0)
self.capture.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
self.capture.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
self.timer.start()
#QtCore.pyqtSlot()
def update_frame(self):
ret, image=self.capture.read()
simage = cv2.flip(image, 1)
self.displayImage(image, True)
#QtCore.pyqtSlot()
def capture_image(self):
flag, frame= self.capture.read()
path = r'J:\Face'
if flag:
QtWidgets.QApplication.beep()
name = "opencv_frame_{}.png".format(self._image_counter)
cv2.imwrite(os.path.join(path, name), frame)
self._image_counter += 1
def displayImage(self, img, window=True):
qformat = QtGui.QImage.Format_Indexed8
if len(img.shape)==3 :
if img.shape[2]==4:
qformat = QtGui.QImage.Format_RGBA8888
else:
qformat = QtGui.QImage.Format_RGB888
outImage = QtGui.QImage(img, img.shape[1], img.shape[0], img.strides[0], qformat)
outImage = outImage.rgbSwapped()
if window:
self.imgLabel.setPixmap(QtGui.QPixmap.fromImage(outImage))
if __name__=='__main__':
import sys
app = QtWidgets.QApplication(sys.argv)
window = video()
window.setWindowTitle('main code')
window.show()
sys.exit(app.exec_())
Try to link PyQt and Opencv video feed, can't understand how to apply while loop for continuously streaming video. It just take a still picture.Please can anyone help to solve the problem.
PtQt=5
Python=3.6.1
class App(QWidget):
def __init__(self):
super().__init__()
self.title = 'PyQt5 Video'
self.left = 100
self.top = 100
self.width = 640
self.height = 480
self.initUI()
def initUI(self):
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
self.resize(1800, 1200)
#create a label
label = QLabel(self)
cap = cv2.VideoCapture(0)
ret, frame = cap.read()
rgbImage = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
convertToQtFormat = QtGui.QImage(rgbImage.data, rgbImage.shape[1], rgbImage.shape[0],
QtGui.QImage.Format_RGB888)
convertToQtFormat = QtGui.QPixmap.fromImage(convertToQtFormat)
pixmap = QPixmap(convertToQtFormat)
resizeImage = pixmap.scaled(640, 480, QtCore.Qt.KeepAspectRatio)
QApplication.processEvents()
label.setPixmap(resizeImage)
self.show()
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = App()
sys.exit(app.exec_())
The problem is that the function that obtains the image is executed only once and not updating the label.
The correct way is to place it inside a loop, but it will result in blocking the main window. This blocking of main window can be solved by using the QThread class and send through a signal QImage to update the label. For example:
import cv2
import sys
from PyQt5.QtWidgets import QWidget, QLabel, QApplication
from PyQt5.QtCore import QThread, Qt, pyqtSignal, pyqtSlot
from PyQt5.QtGui import QImage, QPixmap
class Thread(QThread):
changePixmap = pyqtSignal(QImage)
def run(self):
cap = cv2.VideoCapture(0)
while True:
ret, frame = cap.read()
if ret:
# https://stackoverflow.com/a/55468544/6622587
rgbImage = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
h, w, ch = rgbImage.shape
bytesPerLine = ch * w
convertToQtFormat = QImage(rgbImage.data, w, h, bytesPerLine, QImage.Format_RGB888)
p = convertToQtFormat.scaled(640, 480, Qt.KeepAspectRatio)
self.changePixmap.emit(p)
class App(QWidget):
def __init__(self):
super().__init__()
[...]
self.initUI()
#pyqtSlot(QImage)
def setImage(self, image):
self.label.setPixmap(QPixmap.fromImage(image))
def initUI(self):
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
self.resize(1800, 1200)
# create a label
self.label = QLabel(self)
self.label.move(280, 120)
self.label.resize(640, 480)
th = Thread(self)
th.changePixmap.connect(self.setImage)
th.start()
self.show()
Updating this for PySide2 and qimage2ndarray
from PySide2.QtCore import *
from PySide2.QtGui import *
import cv2 # OpenCV
import qimage2ndarray # for a memory leak,see gist
import sys # for exiting
# Minimal implementation...
def displayFrame():
ret, frame = cap.read()
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
image = qimage2ndarray.array2qimage(frame)
label.setPixmap(QPixmap.fromImage(image))
app = QApplication([])
window = QWidget()
# OPENCV
cap = cv2.VideoCapture(0)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 320)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 240)
# timer for getting frames
timer = QTimer()
timer.timeout.connect(displayFrame)
timer.start(60)
label = QLabel('No Camera Feed')
button = QPushButton("Quiter")
button.clicked.connect(sys.exit) # quiter button
layout = QVBoxLayout()
layout.addWidget(button)
layout.addWidget(label)
window.setLayout(layout)
window.show()
app.exec_()
# See also: https://gist.github.com/bsdnoobz/8464000
Thank you Taimur Islam for the question.
Thank you eyllanesc for wonderful answering and I have modified your code little bit. I used PtQt=4 Python=2.7 and I didn't use opencv
import sys
import numpy as np
import flycapture2 as fc2
from PyQt4.QtCore import (QThread, Qt, pyqtSignal)
from PyQt4.QtGui import (QPixmap, QImage, QApplication, QWidget, QLabel)
class Thread(QThread):
changePixmap = pyqtSignal(QImage)
def __init__(self, parent=None):
QThread.__init__(self, parent=parent)
self.cameraSettings()
def run(self):
while True:
im = fc2.Image()
self.c.retrieve_buffer(im)
a = np.array(im)
rawImage = QImage(a.data, a.shape[1], a.shape[0], QImage.Format_Indexed8)
self.changePixmap.emit(rawImage)
def cameraSettings(self):
print(fc2.get_library_version())
self.c = fc2.Context()
numberCam = self.c.get_num_of_cameras()
print(numberCam)
self.c.connect(*self.c.get_camera_from_index(0))
print(self.c.get_camera_info())
m, f = self.c.get_video_mode_and_frame_rate()
print(m, f)
print(self.c.get_property_info(fc2.FRAME_RATE))
p = self.c.get_property(fc2.FRAME_RATE)
print(p)
self.c.set_property(**p)
self.c.start_capture()
class App(QWidget):
def __init__(self):
super(App,self).__init__()
self.title = 'PyQt4 Video'
self.left = 100
self.top = 100
self.width = 640
self.height = 480
self.initUI()
def initUI(self):
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
self.resize(800, 600)
# create a label
self.label = QLabel(self)
self.label.move(0, 0)
self.label.resize(640, 480)
th = Thread(self)
th.changePixmap.connect(lambda p: self.setPixMap(p))
th.start()
def setPixMap(self, p):
p = QPixmap.fromImage(p)
p = p.scaled(640, 480, Qt.KeepAspectRatio)
self.label.setPixmap(p)
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = App()
ex.show()
sys.exit(app.exec_())
I'm trying to embed video capture with my dialog box (created in pyqt4). For the same I tired the code below. But it just starts the capturing and does not display anything on the dialog. Please help me know what's missing in the following code.
Here, self.videoFrame is a QLabel under QtGui.
def onRun(self):
self.playing = True
capture = cv2.VideoCapture(0)
data1=np.array([])
while self.playing:
_, data = capture.read()
data1 = cv2.cvtColor(data, cv2.cv.CV_BGR2RGB)
qImage = QtGui.QImage(data1, data1.shape[2], data1.shape[2],
QtGui.QImage.Format_RGB888)
qImage=QtGui.QPixmap.fromImage(qImage)
self.videoFrame.setPixmap(
qImage)
self.videoFrame.setScaledContents(True)
QtGui.qApp.processEvents()
cv2.waitKey(5)
cv2.destroyAllWindows()
This works:
from PyQt4 import QtCore,QtGui
import sys
import cv2
import numpy as np
class ImageWidget(QtGui.QWidget):
def __init__(self,parent=None):
super(ImageWidget,self).__init__(parent)
self.image=None
def setImage(self,image):
self.image=image
sz=image.size()
self.setMinimumSize(sz)
self.update()
def paintEvent(self,event):
qp=QtGui.QPainter()
qp.begin(self)
if self.image:
qp.drawImage(QtCore.QPoint(0,0),self.image)
qp.end()
class MainWindow(QtGui.QMainWindow):
def __init__(self,parent=None):
super(MainWindow,self).__init__(parent)
self.videoFrame=ImageWidget()
self.setCentralWidget(self.videoFrame)
self.timer=QtCore.QTimer(self)
self.timer.timeout.connect(self.updateImage)
self.timer.start(30)
self.capture = cv2.VideoCapture(0)
def updateImage(self):
_, img = self.capture.read()
#img=cv2.cvtColor(img, cv.CV_BGR2RGB)
height, width, bpc = img.shape
bpl = bpc * width
image = QtGui.QImage(img.data, width, height, bpl, QtGui.QImage.Format_RGB888)
self.videoFrame.setImage(image)
def main():
app=QtGui.QApplication(sys.argv)
w=MainWindow()
w.show()
app.exec_()
if __name__=='__main__':
main()