I have this code for web cam and should be displayed in the window (designed in Qt designer) this code works well but now i have two cam windows, one in my Main window (form designed in Qt Designer) and one out of the Main window.
def b1_clicked(self):
mycam = cv2.VideoCapture(0)
if mycam.isOpened():
_, frame = mycam.read()
else:
_, frame = False
while (True):
cv2.imshow("preview", frame)
_, frame = mycam.read()
frame = cv2.cvtColor(frame, cv2.cv.CV_BGR2RGB)
image = QtGui.QImage(frame, frame.shape[1], frame.shape[0],frame.strides[0], QtGui.QImage.Format_RGB888)
self.label.setPixmap(QtGui.QPixmap.fromImage(image))
key = cv2.waitKey(20)
if key == 27: # escape ESC
break
Please any suggestion how to kill and make it not visible the form which is out of the Main window.
Thanks
Comment out cv2.imshow which opens its own window.
Related
i tried to make a webcam video recording to a file using openCV python, i could not open the file with any of my video players. here is the code,
it works fine but I stop the recording and looking the file and it doesn't open. I guess there are some codec issues. I tried also (*'XVID') .avi format. but changed nothing.
here is the code
please help
from tkinter import *
from PIL import ImageTk, Image
import cv2
import threading
root = Tk()
root.geometry("750x500")
root.configure(bg="#0059aa")
#camera
camera_frame = LabelFrame(root, text=u"KAMERA STREAMING",
border=2,
width=398,
height=265)
camera_frame.place(x=183,y=33)
camera_label = Label(camera_frame,width=55,height=14)
camera_label.grid(row=0,column=0)
global capture
capture = cv2.VideoCapture(0)
# edit: close following two lines
# capture.set(3,250)
# capture.set(4,225)
global out
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
out = cv2.VideoWriter('blabla.mp4', fourcc, 20.0, (640, 480))
global stopCam
stopCam = False
def show_frames():
global capture
# read the capture
ret, frame = capture.read()
# turned into image and display
cv2image = cv2.cvtColor(frame,cv2.COLOR_BGR2RGB)
height, width, channels = cv2image.shape
img = Image.fromarray(cv2image)
imgtk = ImageTk.PhotoImage(image = img)
camera_label.imgtk = imgtk
camera_label.configure(image=imgtk,width=width,height=height)
# record
global out
out.write(frame)
# quit
if (stopCam):
out.release()
capture.release()
cv2.destroyAllWindows()
return
camera_label.after(20,show_frames)
p1 = threading.Thread(target=show_frames)
buttonLabel = Label(camera_frame)
buttonLabel.grid(row=1,column=0)
connectButton = Button (buttonLabel, text=u"connect", command=p1.start, width=14)
connectButton.grid(row=0,column=0)
stopButton = Button(buttonLabel, text=u"stop", command= lambda: globals().update(stopCam=True) , width=14)
stopButton.grid(row=0,column=1)
root.mainloop()
edit (also solved way):
I looked at some code that worked properly. and I saw capture.set() as the difference. When I close the capture.set() lines, I had no problems with either streaming or recording. Now the main problem is that I have to show the video in a label with a certain size. Without set() the video size gets too big. how can i solve it now?
If I use OpenCV to play video into its own window using this sort of logic:
cap = cv2.VideoCapture('video.mp4',cv2.CAP_FFMPEG)
while True:
ret, frame = cap.read()
if(ret):
cv2.imshow('', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
then it works well and smoothly. But if I use what appears to be the recommended way of playing into my own tkinter window, using the window.after() technique like this snippet:
def update(self):
# Get a frame from the video source
ret, frame = cap.read()
self.photo = PIL.ImageTk.PhotoImage(image = PIL.Image.fromarray(frame))
self.canvas.create_image(0, 0, image = self.photo, anchor = tk.NW)
self.update_id = self.window.after(self.VIDEO_READ_DELAY, self.update)
it is slow and stutters badly. I've played with the update delay without any real success, so I'm guessing that the processing overhead of the image conversion is what's causing the problem.
Can imshow() be made to play into my tkinter canvas directly?
I am using tkinter and opencv for the first time and have successfully built a GUI for my project, however, I cannot figure out why my video stream is updating so extremely slow. I am grabbing frames very quickly but it seems that the update on the screen gets exponentially slower. I am seeing somewhere around 30 seconds of lag when I first launch the program but it eventually slows to a halt. I am connecting to three cameras but only displaying one at a time. The cameras all display and the selection buttons work. My only issue is the display refresh rate.
This is running in Python3.7 on a raspberry pi4. I can connect to the camera via web browser and it appears to have no lag.
I have been searching for answers but cannot seem to find anything that helps. Can anyone offer some help with this?
Here's my program (I have removed unrelated code):
#!/usr/bin/env python3
import time
from tkinter import *
import cv2
from PIL import Image, ImageTk
#GUI
class robotGUI:
def __init__(self):
self.selectedCam = "front"
self.window = Tk()
#Setup the window to fit the Raspberry Pi Touch Display = 800x400 and align top left
self.window.geometry("800x480+0+0")
self.window.overrideredirect(True)
self.window.fullScreenState = False
#Create Frame for Video Window
self.videoFrame = Frame(self.window, relief=SUNKEN, bd=2)
self.videoFrame.place(x=0, y=0, height=457, width=650)
#Create the Video Window
self.video = Label(self.videoFrame, bd=0, relief=FLAT, width=644, height=451)
self.video.place(x=0, y=0)
self.vid = VideoCapture()
self.camUpdateFreq = 250
self.updateCams()
#Create the Button Frame
self.buttonFrame = Frame(self.window, relief=FLAT)
self.buttonFrame.place(x=651, y=0, height=457, width=149)
#Create Buttons
#Select Front Camera Button
self.frontCamButton = Button(self.buttonFrame, text="Front Camera", command=lambda: self.selectCam("front"))
self.frontCamButton.place(x=24, y=50, height=30, width=100)
#Select Boom Camera Button
self.boomCamButton = Button(self.buttonFrame, text="Boom Camera", command=lambda: self.selectCam("boom"))
self.boomCamButton.place(x=24, y=130, height=30, width=100)
#Select Rear Camera Button
self.rearCamButton = Button(self.buttonFrame, text="Rear Camera", command=lambda: self.selectCam("rear"))
self.rearCamButton.place(x=24, y=210, height=30, width=100)
#Close Button
self.exitButton = Button(self.buttonFrame, text="Close", command=self.window.destroy)
self.exitButton.place(x=24, y=400, height=30, width=100)
#Start the main loop for the gui
self.window.mainloop()
def selectCam(self, cam):
if (cam.lower() == "front"):
self.selectedCam = "front"
self.statusBarLeft['text'] = "Front Camera Selected"
elif (cam.lower() == "boom"):
self.selectedCam = "boom"
self.statusBarLeft['text'] = "Boom Camera Selected"
elif (cam.lower() == "rear"):
self.selectedCam = "rear"
self.statusBarLeft['text'] = "Rear Camera Selected"
def updateCams(self):
#Get a frame from the selected camera
ret, frame = self.vid.get_frame(self.selectedCam)
if ret:
imageCV2 = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
img = Image.fromarray(imageCV2)
imgPhoto = ImageTk.PhotoImage(image=img)
self.video.imgPhoto = imgPhoto
self.video.configure(image=imgPhoto)
self.window.after(self.camUpdateFreq, self.updateCams)
#Video Camera Class
class VideoCapture:
def __init__(self):
#Define Locals
FrontCameraAddress = "rtsp://admin:password#192.168.5.20:8554/12"
BoomCameraAddress = "rtsp://admin:password#192.168.5.21:8554/12"
RearCameraAddress = "rtsp://admin:password#192.168.5.22:8554/12"
#Open Front Video Camera Source
self.vidFront = cv2.VideoCapture(FrontCameraAddress)
self.vidBoom = cv2.VideoCapture(BoomCameraAddress)
self.vidRear = cv2.VideoCapture(RearCameraAddress)
#Verify that the Camera Streams Opened
if not self.vidFront.isOpened():
raise ValueError("Unable to open video source to Front Camera")
if not self.vidBoom.isOpened():
raise ValueError("Unable to open video source to Boom Camera")
if not self.vidRear.isOpened():
raise ValueError("Unable to open video source to Rear Camera")
#Get One Frame from the Selected Camera
def get_frame(self, camera="front"):
#Attempt to Get Front Camera Frame
if (camera.lower() == "front"):
#If Stream Still Open Return a Frame
if self.vidFront.isOpened():
ret, frame = self.vidFront.read()
if ret:
#Return a boolean success flag and the current frame converted to BGR
return (ret, frame)
else:
return (ret, None)
else:
return (ret, None)
#Attempt to Get Boom Camera Frame
elif (camera.lower() == "boom"):
#If Stream Still Open Return a Frame
if self.vidBoom.isOpened():
ret, frame = self.vidBoom.read()
if ret:
#Return a boolean success flag and the current frame converted to BGR
return (ret, frame)
else:
return (ret, None)
else:
return (ret, None)
#Attempt to Get Rear Camera Frame
elif (camera.lower() == "rear"):
#If Stream Still Open Return a Frame
if self.vidRear.isOpened():
ret, frame = self.vidRear.read()
if ret:
#Return a boolean success flag and the current frame converted to BGR
return (ret, frame)
else:
return (ret, None)
else:
return (ret, None)
else:
return (False, None)
#Release the video sources when the object is destroyed
def __del__(self):
if self.vidFront.isOpened():
self.vidFront.release()
if self.vidBoom.isOpened():
self.vidBoom.release()
if self.vidRear.isOpened():
self.vidRear.release()
#Main Routine - Only run if called from main program instance
if __name__ == '__main__':
try:
#Create GUI Object
app = robotGUI()
except Exception as e:
print("Exception: " + str(e))
finally:
print("Cleaning Up")
NOTE: In this copy of the program, I am updating every 250ms but I have tried smaller numbers down to around 3 but the frames still seem to be behind. Is there a better way to do this?
NOTE2: After working with this more today, I realize that openCV is definitely buffering frames for each camera starting when the cv2.VideoCapture() function is called for each camera. The read() function does seem to be pulling the next frame from the buffer which explains why it is taking so long to update and why the image I see on the screen never catches up to reality. I changed my test code to only connect to one camera at a time and use the cv2.release() function any time I am not actively viewing a camera. This improved things quite a bit. I also set the update function to run every 1ms and I am using the grab() function to grab a frame every cycle but I am only processing and displaying every 10th cycle which has also improved some. I still have some lag that I would love to remove if anyone has any suggestions.
My RTSP stream shows with zero noticeable lag when viewed in a web browser. Does anyone know how I can get the same effect in tkinter? I am not married to openCV.
I am making a program that checks if the camera is connected, and if so, Show the webcam footage, The problem is: The way i structured my program i cannot have cap = cv2.videocapture() for the time it takes the command to execute. This makes sabotages for the showframe function and makes it only show a frame every ~1 second. Is there a different way to check if the camera is connected rather than cap = cv2.videocapture() and cap.isOpened()?
I also cannot have a while loop in my program because of the root.mainloop command for tkinter, However, if there is no way to check my camera status rather than cap.isOpened(), can i move the root.mainloop command somewhere where i can have a while True loop in my program?
I've tried both Multiprocessesing and Threading with no further success.
Heres some code:
from tkinter import * # Import the tkinter module (For the Graphical User Interface)
import cv2 # Import the cv2 module for web camera footage
import PIL # Import the pillow library for image configuration.
from PIL import Image, ImageTk # Import the specifics for Image configuration.
print("[INFO] Imports done")
width, height = 800, 600 # Define The width and height widget for cap adjustment
RootGeometry = str(width) + "x" + str(height) # Make a variable to adjust tkinter frame
print("[INFO] Geometries made")
ImageSource = 0
cap = cv2.VideoCapture(ImageSource) # First VideoCapture
cap.set(cv2.CAP_PROP_FRAME_WIDTH, width)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
print("[INFO] Cap set")
root = Tk()
print("[INFO] Window made")
root.title("Main Window")
root.configure(background="white")
root.geometry(RootGeometry)
root.bind('<Escape>', lambda e: root.quit())
lmain = Label(root)
lmain.pack()
print("[INFO] Configuration of cap done.")
def ShowFrame():
ok, frame = cap.read()
if ok:
print("[INFO] Show frame Initialized.")
cv2image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA)
img = PIL.Image.fromarray(cv2image)
imgtk = ImageTk.PhotoImage(image=img)
lmain.imgtk = imgtk
lmain.configure(image=imgtk)
print("[INFO] After 10 initializing")
lmain.after(10, ShowFrame)
print("[INFO] Showed image")
else:
lmain.after(10, CheckSource)
def CheckSource():
print("[INFO] CheckSource Triggered.")
cap = cv2.VideoCapture(ImageSource)
if cap.isOpened():
print("[INFO] [DEBUG] if Ok initialized")
if cv2.waitKey(1) & 0xFF == ord('q'):
cv2.destroyAllWindows()
cv2.waitKey(0)
print("[WARNING] Exiting app after command")
ShowFrame()
else:
print("[WARNING] No source found. Looking for source.")
lmain.after(10, CheckSource)
CheckSource()
root.mainloop()
print("[INFO] [DEBUG] Root.Mainoop triggered")
Any and all help would be very appreciated!
When there is no webcam/image source, cap.read() will be (False, none). Therefore you can check if a webcam is connected if you do something like:
import cv2
cap=cv2.VideoCapture(ImageSource)
while True:
if cap.read()[0]==False:
print("Not connected")
cap=cv2.VideoCapture(imageSource)
else:
ret, frame=cap.read()
cv2.imshow("webcam footage",frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
Hope this helps :)
You should not do a VideoCapture each frame, you only need to check if it exists. isOpened() is the proper function for that. If it does not yet exist, then retry the cam.
I modified your code:
def CheckSource():
print("[INFO] CheckSource Triggered.")
# check if cam is open, if so, do showFrame
if cap.isOpened():
print("[INFO] [DEBUG] if Ok initialized")
if cv2.waitKey(1) & 0xFF == ord('q'):
cv2.destroyAllWindows()
cv2.waitKey(0)
print("[WARNING] Exiting app after command")
ShowFrame()
else:
# cam is not open, try VideoCapture
print("[WARNING] No source found. Looking for source.")
cap = cv2.VideoCapture(ImageSource)
lmain.after(10, CheckSource)
I am trying developing a code which functions as the self-timer camera. The video would be seen in the window and the person's face and eyes would be continuously detected and once the user selects a specific time, the frame at that point of time is captured. I am able to capture the frame after a certain time using sleep function in time module but the video frame seems to freeze. Is there any solution such that I can continue to see the video and the video capture takes place after some delay automatically.
I am using the code-
import numpy as np
import cv2
import time
import cv2.cv as cv
cap = cv2.VideoCapture(0)
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
# Our operations on the frame come here
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Display the resulting frame
cv2.imshow('frame',frame)
#time.sleep(01)
Capture = cv.CaptureFromCAM(0)
time.sleep(5)
ret, frame = cap.read()
image = cv.QueryFrame(Capture) #here you have an IplImage
imgarray = np.asarray(image[:,:]) #this is the way I use to convert it to numpy array
cv2.imshow('capImage', imgarray)
cv2.waitKey(0)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
Can someone suggest me? Any kind of help would be appreciated.
In order to continuously view the video, you need to repeat the same part of the code which displays the video first and put it in a while loop. Make sure that the handle to the window is not lost.You can make the capture as a mouse click event and use a tickcount, one before the start of the while loop and one inside the loop. Once the difference between the two tick counts is equal to some pre-defined seconds,capture that frame, use break and come out of the while loop.
You need to add another 'cap.read()' line when the delay ends, as this is the code that actually captures the image.
use threading and define the cv.imshow() separately from your function
import threading
import cv2
def getFrame():
global frame
while True:
frame = video_capture.read()[1]
def face_analyse():
while True:
#do some of the opeartion you want
def realtime():
while True:
cv2.imshow('Video', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
video_capture.release()
cv2.destroyAllWindows()
break
if __name__ == "__main__":
video_capture = cv2.VideoCapture(cam)
frame = video_capture.read()[1]
gfthread = threading.Thread(target=getFrame, args='')
gfthread.daemon = True
gfthread.start()
rtthread = threading.Thread(target=realtime, args='')
rtthread.daemon = True
rtthread.start()
fathread = threading.Thread(target=face_analyse, args='')
fathread.daemon = True
fathread.start()
while True: #keep main thread running while the other two threads are non-daemon
pass