This is the main code that loads a pyqt gui form and it has 2 button one is for
starting webcam and second one is for capturing photos from frame .
I write the first button but i can't write the capture button.
import sys
import cv2
import numpy as np
from PyQt5.QtCore import QTimer
from PyQt5.QtGui import QImage,QPixmap
from PyQt5.QtWidgets import QApplication , QDialog
from PyQt5.uic import loadUi
img_counter = 0
class video (QDialog):
def __init__(self):
super(video, self).__init__()
loadUi('video.ui',self)
self.image=None
self.startButton.clicked.connect(self.start_webcam)
self.capture.clicked.connect(self.keyPressEvent)
def start_webcam(self):
self.capture =cv2.VideoCapture(0)
self.capture.set(cv2.CAP_PROP_FRAME_HEIGHT,480)
self.capture.set(cv2.CAP_PROP_FRAME_WIDTH,640)
self.timer=QTimer(self)
self.timer.timeout.connect(self.update_frame)
self.timer.start(5)
def update_frame(self):
ret,self.image=self.capture.read()
self.image=cv2.flip(self.image,1)
self.displayImage(self.image,1)
def keyPressEvent(self):
flag, frame= self.capture.read()
path = 'J:\Face'
cv2.imwrite(os.path.join(path,'wakka.jpg'), frame)
def displayImage(self,img,window=1):
qformat=QImage.Format_Indexed8
if len(img.shape)==3 :
if img.shape[2]==4:
qformat=QImage.Format_RGBA8888
else:
qformat=QImage.Format_RGB888
outImage=QImage(img,img.shape[1],img.shape[0],img.strides[0],qformat)
outImage=outImage.rgbSwapped()
if window==1:
self.imgLabel.setPixmap(QPixmap.fromImage(outImage))
self.imgLabel.setScaledContents(True)
if __name__=='__main__':
app=QApplication(sys.argv)
window=video()
window.setWindowTitle('main code')
window.show()
sys.exit(app.exec_())
I want to capture photos from frames and save it in a folder.
The self.capture.clicked.connect(self.keyPressEvent) is for when we clicking on button.
I should write the function in keyPressEvent def
the capture.is for clicking a button
can someone help me through this?
Edit Note :
if flag:
QtWidgets.QApplication.beep(i)
img_name = "opencv_frame_{}.png".format()
cv2.imwrite(os.path.join(path,img_name), frame)
I want the condition for loop so that i can save the img_name format with counter but the counter must be number of clicking times
keyPressEvent is a method that allows you to capture the keys while the widget has the focus, and in your case it is not necessary, the solution is simple change its name, on the other hand I have improved your code.
import os
import cv2
import numpy as np
from PyQt5 import QtCore, QtGui, QtWidgets, uic
class video (QtWidgets.QDialog):
def __init__(self):
super(video, self).__init__()
uic.loadUi('video.ui',self)
self.startButton.clicked.connect(self.start_webcam)
self.capture.clicked.connect(self.capture_image)
self.imgLabel.setScaledContents(True)
self.capture = None
self.timer = QtCore.QTimer(self, interval=5)
self.timer.timeout.connect(self.update_frame)
self._image_counter = 0
#QtCore.pyqtSlot()
def start_webcam(self):
if self.capture is None:
self.capture =cv2.VideoCapture(0)
self.capture.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
self.capture.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
self.timer.start()
#QtCore.pyqtSlot()
def update_frame(self):
ret, image=self.capture.read()
simage = cv2.flip(image, 1)
self.displayImage(image, True)
#QtCore.pyqtSlot()
def capture_image(self):
flag, frame= self.capture.read()
path = r'J:\Face'
if flag:
QtWidgets.QApplication.beep()
name = "opencv_frame_{}.png".format(self._image_counter)
cv2.imwrite(os.path.join(path, name), frame)
self._image_counter += 1
def displayImage(self, img, window=True):
qformat = QtGui.QImage.Format_Indexed8
if len(img.shape)==3 :
if img.shape[2]==4:
qformat = QtGui.QImage.Format_RGBA8888
else:
qformat = QtGui.QImage.Format_RGB888
outImage = QtGui.QImage(img, img.shape[1], img.shape[0], img.strides[0], qformat)
outImage = outImage.rgbSwapped()
if window:
self.imgLabel.setPixmap(QtGui.QPixmap.fromImage(outImage))
if __name__=='__main__':
import sys
app = QtWidgets.QApplication(sys.argv)
window = video()
window.setWindowTitle('main code')
window.show()
sys.exit(app.exec_())
Related
I am working on trying to collect the video from a Basler GigE camera using PyPylon and converting it to an array. I want to be able to take this image array that is constantly updating and show it on a PyQt5 gui. I am unsure what I am doing wrong. Below is the code, any help would be greatly appreciated!
from PyQt5 import QtGui
from PyQt5.QtWidgets import QWidget, QApplication, QLabel, QVBoxLayout
from PyQt5.QtGui import QPixmap
import sys
import cv2
from PyQt5.QtCore import pyqtSignal, pyqtSlot, Qt, QThread
import numpy as np
from pypylon import pylon
class BaslerVideo():
def _videoGrab(self):
camera = pylon.InstantCamera(pylon.TlFactory.GetInstance().CreateFirstDevice())
camera.StartGrabbing(pylon.GrabStrategy_LatestImageOnly)
converter = pylon.ImageFormatConverter()
converter.OutputPixelFormat = pylon.PixelType_BGR8packed
converter.OutputBitAlignment = pylon.OutputBitAlignment_MsbAligned
while camera.IsGrabbing():
grabResult = camera.RetrieveResult(5000, pylon.TimeoutHandling_ThrowException)
if grabResult.GrabSucceeded():
image = converter.Convert(grabResult)
img = image.GetArray()
return img
class VideoThread(QThread, BaslerVideo):
change_pixmap_signal = pyqtSignal(np.ndarray)
def run(self):
# capture from web cam
cap = BaslerVideo()
while True:
cv_img = cap._videoGrab()
self.change_pixmap_signal.emit(cv_img)
class App(QWidget):
def __inti__(self):
super().__init__()
self.setWindowTitle("GigE Cam Test")
self.display_width = 640
self.display_height = 480
#Create the label that holds the image
self.image_label = QLabel(self)
self.image_label.resize(self.display_width,self.display_height)
#Create a text label
self.textLabel = QLabel("GigE Cam")
#Create the box that will hold the image
vbox = QVBoxLayout()
vbox.addWidget(self.image_label)
vbox.addWidget(self.textLabel)
#Set the layout
self.setLayout(vbox)
#create the video capture thread
self.thread = VideoThread()
#connect its signal to the update_image slot
self.thread.change_pixmap_signal.connect(self.update_image)
#Start the thread
self.thread.start()
#pyqtSlot(np.ndarray)
def update_image(self, cv_img):
"""Updates the image_label with a new opencv image"""
qt_img = self.convert_cv_qt(cv_img)
self.image_label.setPixmap(qt_img)
def convert_cv_qt(self, cv_img):
"""Convert from a numpy array img to a Qpixmap"""
rgb_image = cv2.cvtColor(cv_img, cv2.COLOR_BGR2RGB)
h, w, ch = rgb_image.shape
bytes_per_line = ch * w
convert_to_Qt_format = QtGui.QImage(rgb_image.data, w, h, bytes_per_line,QtGui.QImage.Format_RGB888)
p = convert_to_Qt_format.scaled(self.display_width,self.display_height, Qt.KeepAspectRatio)
return QPixmap().fromImage(p)
if __name__ == "__main__":
app = QApplication(sys.argv)
a = App()
a.show()
sys.exit(app.exec_())
I want to make Webcam gui so that when I click the cam on button the second window pops up and video will start.
When I click the return button the video should stop and go to mainwindow, but when i try, I can't see video, even if the flag is printed.
import threading
import cv2
import threading
import sys
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtTest import *
from PyQt5 import QtWidgets
from PyQt5 import QtGui
from PyQt5 import QtCore
running = False
class MyApp(QMainWindow, QWidget):
def __init__(self):
super().__init__()
self.second_UI = QDialog()
self.label = QtWidgets.QLabel()
self.first_UI()
self.show()
def first_UI(self):
btn_start = QPushButton("cam on", self)
btn_start.move(50, 50)
btn_start.clicked.connect(self.cam_start)
btn_start.clicked.connect(self.second_exc)
self.sb = self.statusBar()
self.sb.showMessage('Ready')
# Window setting
self.setWindowTitle('Select Servo Motor')
self.setGeometry(300, 300, 1024, 768)
self.show()
def cam_run(self):
global running
print("Flag1")
self.cap = cv2.VideoCapture(0)
# 'http://192.168.66.1:9527/videostream.cgi?loginuse=admin&loginpas=admin')
self.wid = 500
self.hei = 500
self.label.resize(self.wid, self.hei)
while running:
print("Flag2")
self.ret, self.img = self.cap.read()
if self.ret:
self.img = cv2.cvtColor(self.img, cv2.COLOR_BGR2RGB)
self.h, self.w, self.c = self.img.shape
self.qImg = QtGui.QImage(self.img.data, self.w, self.h, self.w*self.c,
QtGui.QImage.Format_RGB888)
self.pixmap = QtGui.QPixmap.fromImage(self.qImg)
self.label.setPixmap(self.pixmap)
print("Flag3")
self.cap.release()
print("Thread end.")
def cam_start(self): # , UI_param):
#UI_param = self.second_UI
global running
running = True
self.th = threading.Thread(target=self.cam_run)
self.th.start()
print("started...")
def second_exc(self):
self.close()
self.cam_run
return_btn = QPushButton('Return', self.second_UI)
return_btn.move(50, 700) # set position
# return_btn.resize(return_btn.sizeHint())
return_btn.clicked.connect(self.first_UI)
self.second_UI.setWindowTitle('Select User Mode')
self.second_UI.setGeometry(300, 300, 1024, 768)
self.second_UI.show()
if __name__ == '__main__':
app = QApplication(sys.argv)
ex_ = MyApp()
sys.exit(app.exec_())
Look at the following coding. It does what you are aiming for. I would recommend to create ui with qt designer and not fumble around with coding.
import cv2
import sys
from PyQt5 import uic
from PyQt5.QtWidgets import QMainWindow, QApplication
cap = cv2.VideoCapture(0)
class MainWindow(QMainWindow):
def __init__(self):
super(MainWindow, self).__init__()
uic.loadUi(r"main_window.ui", self)
self.btn_start_cam.clicked.connect(self.start_cam)
self.btn_return.clicked.connect(self.stop_cam)
def start_cam(self):
#Initialize video capture
#scaling factor
scaling_factor = 0.5
ret, frame = cap.read()
# Resize the frame
frame = cv2.resize(frame, None, fx=scaling_factor,
fy=scaling_factor, interpolation=cv2.INTER_AREA)
# Display the image
cv2.imshow('Webcam', frame)
def stop_cam(self):
# Release the video capture object
cap.release()
# Close all active windows
cv2.destroyAllWindows()
if __name__ == "__main__":
app = QApplication(sys.argv)
myapp = MainWindow()
myapp.show()
sys.exit(app.exec_())
I tried code from this answer and it crashes with error Process finished with exit code -1073740771 (0xC000041D) after some time (2-10 sec.) and sometimes with 0xC0000005. It crashes immediately if I try to drag the window.
However when I put time.sleep(0.1) in run it works fine. If I use sleeps shorter than 0.1 it crashes again.
from PyQt5.QtWidgets import QApplication, QMainWindow, QWidget, QLabel,QMessageBox
from PyQt5.QtGui import QImage, QPixmap
from PyQt5.QtCore import QThread, pyqtSignal, pyqtSlot, Qt
import cv2
import sys
import time
class CamThread(QThread):
changemap = pyqtSignal('QImage')
def run(self):
cap = cv2.VideoCapture(0)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
while True:
ret, img_rgb = cap.read()
if ret:
self.rgb = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2RGB)
self.convert = QImage(self.rgb.data, self.rgb.shape[1], self.rgb.shape[0], QImage.Format_RGB888)
self.p = self.convert.scaled(640, 480, Qt.KeepAspectRatio)
self.changemap.emit(self.p)
#time.sleep(0.1)
class App(QWidget):
def __init__(self):
super().__init__()
self.title = 'webcam'
self.initUI()
#pyqtSlot('QImage')
def setImage(self, image):
self.label.setPixmap(QPixmap.fromImage(image))
def initUI(self):
self.setWindowTitle(self.title)
self.setGeometry(100, 100, 640, 480)
self.resize(640, 480)
self.label = QLabel(self)
self.label.resize(640, 480)
thr = CamThread(self)
thr.changemap.connect(self.setImage)
thr.start()
app = QApplication(sys.argv)
win = App()
#win.setAttribute(Qt.WA_DeleteOnClose, True)
win.show()
app.exit(app.exec_())
I thought that the problem is somewhere in signals/slots but haven't been able to find anything relevant.
Windows 10
Python - 3.7
Pyqt - 5.12
OpenCV - 3.4.5.20
Fixed it using QMutex and QWaitCondition to prevent update call while main thread is already updating. Apparently, issue was in that. eyllanesc, I'm new here as you see, should I make an answer in original thread?
from PyQt5.QtWidgets import QApplication, QMainWindow, QWidget, QLabel, QMessageBox
from PyQt5.QtGui import QImage, QPixmap
from PyQt5.QtCore import QThread, pyqtSignal, pyqtSlot, Qt, QMutex, QWaitCondition
import cv2
import sys
import time
class CamThread(QThread):
changemap = pyqtSignal('QImage')
def __init__(self, mutex, condition):
super().__init__()
self.mutex = mutex
self.condition = condition
def run(self):
cap = cv2.VideoCapture(0)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
while True:
try:
ret, img_rgb = cap.read()
if ret:
rgb = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2RGB)
#any other image processing here
convert = QImage(rgb.data, rgb.shape[1], rgb.shape[0], QImage.Format_RGB888)
p = convert.scaled(640, 480, Qt.KeepAspectRatio)
self.changemap.emit(p)
self.condition.wait(self.mutex)
except:
print('error')
class App(QWidget):
time = 0
def __init__(self):
super().__init__()
self.title = 'webcam'
self.mutex = QMutex()
self.condition = QWaitCondition()
self.initUI()
#pyqtSlot('QImage')
def setImage(self, image):
self.mutex.lock()
try:
self.label.setPixmap(QPixmap.fromImage(image))
finally:
self.mutex.unlock()
self.condition.wakeAll()
def initUI(self):
self.mutex.lock()
self.setWindowTitle(self.title)
self.setGeometry(100, 100, 640, 480)
self.resize(640, 480)
self.label = QLabel(self)
self.label.resize(640, 480)
self.thr = CamThread(mutex = self.mutex,condition=self.condition)
self.thr.changemap.connect(self.setImage)
self.thr.start()
app = QApplication(sys.argv)
win = App()
win.show()
app.exit(app.exec_())
N.B. You still need to properly stop thread and close camera connection in this example.
I'm using python PyQt4.
Which I want to do:
(1) scale the Image from 1280x1024 to 860x480, and show to QImageWidget.
(2) draw some line on the Image by using mouse.
# -*- coding: utf-8 -*-
import ConfigParser
from PyQt4 import QtCore,QtGui
from PyQt4.QtGui import QImage
from PyQt4.QtGui import QApplication, QMainWindow, QPushButton
import sys , time , datetime
import cv2
import os
import numpy as np
video = cv2.VideoCapture(0)
video.set(3,1280)
video.set(4,1024)
class ImageWidget(QtGui.QWidget):
def __init__(self,parent=None):
super(ImageWidget,self).__init__(parent)
self.image=None
def setImage(self,image):
self.image=image
sz=image.size()
self.setMinimumSize(sz)
self.update()
def paintEvent(self,event):
qp=QtGui.QPainter()
qp.begin(self)
if self.image:
qp.drawImage(QtCore.QPoint(0,0),self.image)
qp.end()
class MainWindow(QtGui.QMainWindow):
def __init__(self,parent=None):
super(MainWindow,self).__init__(parent)
self.videoFrame=ImageWidget()
self.setCentralWidget(self.videoFrame)
screen = QtGui.QDesktopWidget().screenGeometry()
self.showFullScreen()
self.timer=QtCore.QTimer(self)
self.timer.timeout.connect(self.update)
self.timer.start(50)
def update(self):
ret, frame = video.read()
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
image = QtGui.QImage(frame, frame.shape[1], frame.shape[0],frame.strides[0], QtGui.QImage.Format_RGB888)
# here I want to scale to 860x480...
self.videoFrame.setImage(image)
# draw ... ?
def main():
app=QtGui.QApplication(sys.argv)
w=MainWindow()
w.show()
app.exec_()
if __name__=='__main__':
main()
Does anyone know where could I find there example/doc ?
here is the scaled doc.
# -*- coding: utf-8 -*-
from PyQt4 import QtCore,QtGui
from PyQt4.QtGui import QImage
from PyQt4.QtGui import QApplication, QMainWindow, QPushButton
import sys , time , datetime
import cv2
import numpy as np
video = cv2.VideoCapture(0)
video.set(3,1280)
video.set(4,1024)
class ImageWidget(QtGui.QWidget):
def __init__(self,parent=None):
super(ImageWidget,self).__init__(parent)
self.image=None
def setImage(self,image):
self.image=image
sz=image.size()
self.setMinimumSize(sz)
self.update()
def paintEvent(self,event):
qp=QtGui.QPainter()
qp.begin(self)
if self.image:
qp.drawImage(QtCore.QPoint(0,0),self.image)
qp.end()
# here get the mouseclick and move..
def mousePressEvent(self, event): # click
print event.pos()
def mouseMoveEvent(self, event): # move
print event.pos()
class MainWindow(QtGui.QMainWindow):
def __init__(self,parent=None):
super(MainWindow,self).__init__(parent)
self.fAction = QtGui.QAction(u"click me", self)
menubar = self.menuBar()
self.fileMenu = menubar.addAction(u"click me")
font = self.fileMenu.font()
font.setPointSize(30)
menubar.setFont(font)
self.videoFrame=ImageWidget()
self.setCentralWidget(self.videoFrame)
screen = QtGui.QDesktopWidget().screenGeometry()
self.showFullScreen()
self.timer=QtCore.QTimer(self)
self.timer.timeout.connect(self.update)
self.timer.start(50)
def update(self):
ret, frame = video.read()
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
image = QtGui.QImage(frame, frame.shape[1], frame.shape[0],frame.strides[0], QtGui.QImage.Format_RGB888)
image.scaledToWidth (640,QtCore.Qt.SmoothTransformation)
image.scaledToHeight(480,QtCore.Qt.SmoothTransformation)
# here is sclae my image...
result = image.scaled(640, 480, QtCore.Qt.KeepAspectRatioByExpanding, QtCore.Qt.SmoothTransformation)
self.videoFrame.setImage(result)
def main():
app=QtGui.QApplication(sys.argv)
w=MainWindow()
w.show()
app.exec_()
if __name__=='__main__':
main()
I'm trying to embed video capture with my dialog box (created in pyqt4). For the same I tired the code below. But it just starts the capturing and does not display anything on the dialog. Please help me know what's missing in the following code.
Here, self.videoFrame is a QLabel under QtGui.
def onRun(self):
self.playing = True
capture = cv2.VideoCapture(0)
data1=np.array([])
while self.playing:
_, data = capture.read()
data1 = cv2.cvtColor(data, cv2.cv.CV_BGR2RGB)
qImage = QtGui.QImage(data1, data1.shape[2], data1.shape[2],
QtGui.QImage.Format_RGB888)
qImage=QtGui.QPixmap.fromImage(qImage)
self.videoFrame.setPixmap(
qImage)
self.videoFrame.setScaledContents(True)
QtGui.qApp.processEvents()
cv2.waitKey(5)
cv2.destroyAllWindows()
This works:
from PyQt4 import QtCore,QtGui
import sys
import cv2
import numpy as np
class ImageWidget(QtGui.QWidget):
def __init__(self,parent=None):
super(ImageWidget,self).__init__(parent)
self.image=None
def setImage(self,image):
self.image=image
sz=image.size()
self.setMinimumSize(sz)
self.update()
def paintEvent(self,event):
qp=QtGui.QPainter()
qp.begin(self)
if self.image:
qp.drawImage(QtCore.QPoint(0,0),self.image)
qp.end()
class MainWindow(QtGui.QMainWindow):
def __init__(self,parent=None):
super(MainWindow,self).__init__(parent)
self.videoFrame=ImageWidget()
self.setCentralWidget(self.videoFrame)
self.timer=QtCore.QTimer(self)
self.timer.timeout.connect(self.updateImage)
self.timer.start(30)
self.capture = cv2.VideoCapture(0)
def updateImage(self):
_, img = self.capture.read()
#img=cv2.cvtColor(img, cv.CV_BGR2RGB)
height, width, bpc = img.shape
bpl = bpc * width
image = QtGui.QImage(img.data, width, height, bpl, QtGui.QImage.Format_RGB888)
self.videoFrame.setImage(image)
def main():
app=QtGui.QApplication(sys.argv)
w=MainWindow()
w.show()
app.exec_()
if __name__=='__main__':
main()