I want to record video (not audio) from webcam.
I have put two buttons for start recording & stop recording.
As program starts it pick image from camera & shows on screen.works perfect.
My problem is when i click start recording & after some times stop recording,
only avi file created ,with 0K or 6K size found. No further recording found.
import tkinter
import cv2
import PIL.Image, PIL.ImageTk
stopb = None
class App():
def __init__(self, window, window_title):
self.window = window
self.window.title = window_title
self.ok = False
self.video = cv2.VideoCapture(0)
self.width = self.video.get(cv2.CAP_PROP_FRAME_WIDTH)
self.height = self.video.get(cv2.CAP_PROP_FRAME_HEIGHT)
#create videowriter
self.fourcc = cv2.VideoWriter_fourcc(*'XVID')
self.out = cv2.VideoWriter('output.avi',self.fourcc,10,(640,480))
# Create a canvas that can fit the above video source size
self.canvas = tkinter.Canvas(window, width=self.width, height=self.height)
self.canvas.pack()
self.opencamera = tkinter.Button(window, text="open camera", command=self.open_camera)
self.opencamera.pack()
self.closecamera = tkinter.Button(window, text="close camera", command=self.close_camera)
self.closecamera.pack()
self.delay = 10
self.update()
# After it is called once, the update method will be automatically called every delay milliseconds
self.window.mainloop()
def update(self):
ret, frame = self.video.read()
if self.ok == 'T':
self.out.write(frame)
if ret:
self.photo = PIL.ImageTk.PhotoImage(image=PIL.Image.fromarray(frame))
self.canvas.create_image(0, 0, image=self.photo, anchor=tkinter.NW)
self.window.after(self.delay, self.update)
def open_camera(self):
self.ok = True
print("camera opened")
print(self.ok)
def close_camera(self):
print("camera closed")
self.ok = False
self.video.release()
self.out.release()
def __del__(self):
if self.video.isOpened():
self.video.release()
self.out.release()
App(tkinter.Tk(), "mywindow")
Your problem is that you're never writing anything to your output, as if self.ok == 'T' will never evaluate to true. You should change it to just if self.ok, the same thing you did with ret.
def update(self):
ret, frame = self.video.read()
if self.ok:
self.out.write(frame)
if ret:
self.photo = PIL.ImageTk.PhotoImage(image=PIL.Image.fromarray(frame))
self.canvas.create_image(0, 0, image=self.photo, anchor=tkinter.NW)
self.window.after(self.delay, self.update)
One more issue I found was- after clicking the close camera button followed by the open camera button, the new recording won't start.
Meaning you will just be able to record the first video captured before clicking the close camera button.
This is because the close camera function releases the video object and output object thus video.read() of the update function won't work.
A quick fix is to create a method of set_camera() which creates the video object. Call this function every time in open camera.
https://replit.com/#AmitYadav4/video-record-in-tkinker-canvas
Related
I want to display images from an Allied Vision camera inside a tkinter frame using OpenCV and the SDK for the camera, VimbaPython.
The only possible way to initialize the camera is with a Python with statement:
with Vimba.get_instance() as vimba:
cams = vimba.get_all_cameras()
with cams[0] as camera:
camera.get_frame()
# Convert frame to opencv image, then use Image.fromarray and ImageTk.PhotoImage to
# display it on the tkinter GUI
Everything works fine so far. But I don't only need a single frame. Instead, I need to continuously get frames and display them on the screen so that it is streaming.
I found that one way to do it is to call the .after(delay, function) method from a tkinter Label widget.
So, after obtaining one frame, I want to call the same function to get a new frame and display it again. The code would look like that:
with Vimba.get_instance() as vimba:
cams = vimba.get_all_cameras()
with cams[0] as camera:
def show_frame():
frame = camera.get_frame()
frame = frame.as_opencv_image()
im = Image.fromarray(frame)
img = Image.PhotoImage(im)
lblVideo.configure(image=img) # this is the Tkinter Label Widget
lblVideo.image = img
show_frame()
lblVideo.after(20, show_frame)
Then this shows the first frame and stops, throwing an error saying that Vimba needs to be initialized with a with statement. I don't know much about Python, but it looks like when I call the function with the .after() method it ends the with statement.
I would like to know if it is possible to execute this show_frame() function without ending the with. Also, I can't initialize the camera every time because the program goes really slow.
Thank you
I know this question is pretty old, but I ran into a similar problem with the Allied Vision cameras and found the solution to be relatively robust. So I hope this helps someone, even if not the OP.
An alternative to using with statements is using __enter__ and __exit__ (see sample here). With this, I created a class for the Vimba camera and during the __init__ I used these functions twice: once to initialize the Vimba instance, and once to open the camera itself. An example as follows...
vimba_handle = Vimba.get_instance().__enter__()
camera = vimba_handle.get_all_cameras()[0].__enter__()
I'll include a longer snippet as code as well, but please note my purpose was slightly different the OP's intent. Hopefully, it is still useful.
class VimbaCam:
def __init__(self, device_id=0):
# Variables
self.current_frame = np.array([])
self.device = None
self.device_id = device_id
self.vimba_handle = Vimba.get_instance().__enter__()
self.is_streaming = False
self.scale_window = 4
self.stream_thread = threading.Thread(target=self.thread_stream, daemon=True)
# Default settings
self.auto_exposure = "Off"
self.auto_gain = "Off"
self.acquisition = "Continuous"
self.exposure_us = 200000
self.fps = 6.763
self.gain = 0
self.gamma = 1
self.open()
def close(self):
if self.device is not None:
if self.is_streaming:
self.stop_stream()
time.sleep(1)
self.device.__exit__(None, None, None)
self.vimba_handle.__exit__(None, None, None)
def open(self):
cams = self.vimba_handle.get_all_cameras()
if not cams:
error_check(151, currentframe())
else:
self.device = cams[self.device_id].__enter__()
self.set_defaults()
self.start_stream()
def start_stream(self):
if self.device is not None:
self.is_streaming = True
self.stream_thread.start()
time.sleep(1)
def thread_stream(self):
while self.is_streaming:
current_frame = self.device.get_frame().as_opencv_image()
h, w, _ = current_frame.shape
self.current_frame = current_frame.reshape((h, w))
self.stream_thread = threading.Thread(target=self.thread_stream, daemon=True)
def stop_stream(self):
if self.device is not None:
self.is_streaming = False
def live_video(self):
if self.device is not None:
window_name = "Allied Vision"
h, w = self.current_frame.shape
w = int(w / self.scale_window)
h = int(h / self.scale_window)
cv2.namedWindow(window_name, cv2.WINDOW_NORMAL)
cv2.resizeWindow(window_name, w, h)
while 1:
cv2.imshow(window_name, self.current_frame)
cv2.waitKey(1)
if cv2.getWindowProperty(window_name, cv2.WND_PROP_VISIBLE) < 1:
break
cv2.destroyAllWindows()
def set_defaults(self):
if self.device is not None:
# Exposure time settings
self.device.ExposureAuto.set(self.auto_exposure)
self.device.ExposureTimeAbs.set(self.exposure_us)
# Gain settings
self.device.GainAuto.set(self.auto_gain)
self.device.Gain.set(self.gain)
# Gamma settings
self.device.Gamma.set(self.gamma)
self.device.AcquisitionMode.set(self.acquisition)
self.device.AcquisitionFrameRateAbs.set(self.fps)
# Try to adjust GeV packet size (available for GigE only)
try:
self.device.GVSPAdjustPacketSize.run()
while not self.device.GVSPAdjustPacketSize.is_done():
pass
except (AttributeError, VimbaFeatureError):
pass
# Color formatting (tries mono first, then color)
cv_formats = intersect_pixel_formats(self.device.get_pixel_formats(), OPENCV_PIXEL_FORMATS)
mono_formats = intersect_pixel_formats(cv_formats, MONO_PIXEL_FORMATS)
color_formats = intersect_pixel_formats(cv_formats, COLOR_PIXEL_FORMATS)
if mono_formats:
self.device.set_pixel_format(mono_formats[0])
elif color_formats:
self.device.set_pixel_format(color_formats[0])
if __name__ == "__main__":
dev = VimbaCam()
dev.live_video()
dev.close()
You need to use thread to run the capture code and pass the frames read via queue. Then the main tkinter application reads the queue and show the frames periodically using .after().
Below is an example based on your posted code:
import threading
from queue import SimpleQueue
import tkinter as tk
from PIL import Image, ImageTk
from vimba import Vimba
def camera_streaming(queue):
global is_streaming
is_streaming = True
print("streaming started")
with Vimba.get_instance() as vimba:
with vimba.get_all_cameras()[0] as camera:
while is_streaming:
frame = camera.get_frame()
frame = frame.as_opencv_image()
im = Image.fromarray(frame)
img = ImageTk.PhotoImage(im)
queue.put(img) # put the capture image into queue
print("streaming stopped")
def start_streaming():
start_btn["state"] = "disabled" # disable start button to avoid running the threaded task more than once
stop_btn["state"] = "normal" # enable stop button to allow user to stop the threaded task
show_streaming()
threading.Thread(target=camera_streaming, args=(queue,), daemon=True).start()
def stop_streaming():
global is_streaming, after_id
is_streaming = False # terminate the streaming thread
if after_id:
lblVideo.after_cancel(after_id) # cancel the showing task
after_id = None
stop_btn["state"] = "disabled" # disable stop button
start_btn["state"] = "normal" # enable start button
# periodical task to show frames in queue
def show_streaming():
global after_id
if not queue.empty():
image = queue.get()
lblVideo.config(image=image)
lblVideo.image = image
after_id = lblVideo.after(20, show_streaming)
queue = SimpleQueue() # queue for video frames
after_id = None
root = tk.Tk()
lblVideo = tk.Label(root, image=tk.PhotoImage(), width=640, height=480)
lblVideo.grid(row=0, column=0, columnspan=2)
start_btn = tk.Button(root, text="Start", width=10, command=start_streaming)
start_btn.grid(row=1, column=0)
stop_btn = tk.Button(root, text="Stop", width=10, command=stop_streaming, state="disabled")
stop_btn.grid(row=1, column=1)
root.mainloop()
Note that I don't have the camera and the SDK installed, the above code may not work for you. I just demonstrate how to use thread, queue and .after().
Below is a testing vimba module (saved as vimba.py) I use to simulate VimbaPython module using OpenCV and a webcam:
import cv2
class Frame:
def __init__(self, frame):
self.frame = frame
def as_opencv_image(self):
return self.frame
class Camera:
def __init__(self, cam_id=0):
self.cap = cv2.VideoCapture(cam_id, cv2.CAP_DSHOW)
def __enter__(self):
return self
def __exit__(self, *args):
self.cap.release()
return self
def get_frame(self):
ret, frame = self.cap.read()
if ret:
return Frame(frame)
class Vimba:
_instance = None
#classmethod
def get_instance(self):
if self._instance is None:
self._instance = Vimba()
return self._instance
def __enter__(self):
return self
def __exit__(self, *args):
return self
def get_all_cameras(self):
return (Camera(),)
I tried to read the frames in openCV and display them in tkinter label. I was able to do so using the below code:
import tkinter as tk
import cv2
from PIL import ImageTk, Image
video_path = "SAMPLE/STORED_VIDEO/PATH"
root = tk.Tk()
base_img = Image.open("PATH/TO/DEFAULT/LABLE/IMAGE")
img_obj = ImageTk.PhotoImage(base_img)
lblVideo = tk.Label(root, image=img_obj)
lblVideo.pack()
cap = cv2.VideoCapture(video_path)
if cap.isOpened():
def show_frame():
_, frame = cap.read()
im = Image.fromarray(frame)
img = ImageTk.PhotoImage(im)
lblVideo.configure(image=img)
lblVideo.image = img
lblVideo.after(1, show_frame) # Need to create callback here
show_frame()
root.mainloop()
Although this doesnot contain the with statement, you can try replacing the after() callback inside the show_frame function itself.
I am using tkinter and opencv for the first time and have successfully built a GUI for my project, however, I cannot figure out why my video stream is updating so extremely slow. I am grabbing frames very quickly but it seems that the update on the screen gets exponentially slower. I am seeing somewhere around 30 seconds of lag when I first launch the program but it eventually slows to a halt. I am connecting to three cameras but only displaying one at a time. The cameras all display and the selection buttons work. My only issue is the display refresh rate.
This is running in Python3.7 on a raspberry pi4. I can connect to the camera via web browser and it appears to have no lag.
I have been searching for answers but cannot seem to find anything that helps. Can anyone offer some help with this?
Here's my program (I have removed unrelated code):
#!/usr/bin/env python3
import time
from tkinter import *
import cv2
from PIL import Image, ImageTk
#GUI
class robotGUI:
def __init__(self):
self.selectedCam = "front"
self.window = Tk()
#Setup the window to fit the Raspberry Pi Touch Display = 800x400 and align top left
self.window.geometry("800x480+0+0")
self.window.overrideredirect(True)
self.window.fullScreenState = False
#Create Frame for Video Window
self.videoFrame = Frame(self.window, relief=SUNKEN, bd=2)
self.videoFrame.place(x=0, y=0, height=457, width=650)
#Create the Video Window
self.video = Label(self.videoFrame, bd=0, relief=FLAT, width=644, height=451)
self.video.place(x=0, y=0)
self.vid = VideoCapture()
self.camUpdateFreq = 250
self.updateCams()
#Create the Button Frame
self.buttonFrame = Frame(self.window, relief=FLAT)
self.buttonFrame.place(x=651, y=0, height=457, width=149)
#Create Buttons
#Select Front Camera Button
self.frontCamButton = Button(self.buttonFrame, text="Front Camera", command=lambda: self.selectCam("front"))
self.frontCamButton.place(x=24, y=50, height=30, width=100)
#Select Boom Camera Button
self.boomCamButton = Button(self.buttonFrame, text="Boom Camera", command=lambda: self.selectCam("boom"))
self.boomCamButton.place(x=24, y=130, height=30, width=100)
#Select Rear Camera Button
self.rearCamButton = Button(self.buttonFrame, text="Rear Camera", command=lambda: self.selectCam("rear"))
self.rearCamButton.place(x=24, y=210, height=30, width=100)
#Close Button
self.exitButton = Button(self.buttonFrame, text="Close", command=self.window.destroy)
self.exitButton.place(x=24, y=400, height=30, width=100)
#Start the main loop for the gui
self.window.mainloop()
def selectCam(self, cam):
if (cam.lower() == "front"):
self.selectedCam = "front"
self.statusBarLeft['text'] = "Front Camera Selected"
elif (cam.lower() == "boom"):
self.selectedCam = "boom"
self.statusBarLeft['text'] = "Boom Camera Selected"
elif (cam.lower() == "rear"):
self.selectedCam = "rear"
self.statusBarLeft['text'] = "Rear Camera Selected"
def updateCams(self):
#Get a frame from the selected camera
ret, frame = self.vid.get_frame(self.selectedCam)
if ret:
imageCV2 = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
img = Image.fromarray(imageCV2)
imgPhoto = ImageTk.PhotoImage(image=img)
self.video.imgPhoto = imgPhoto
self.video.configure(image=imgPhoto)
self.window.after(self.camUpdateFreq, self.updateCams)
#Video Camera Class
class VideoCapture:
def __init__(self):
#Define Locals
FrontCameraAddress = "rtsp://admin:password#192.168.5.20:8554/12"
BoomCameraAddress = "rtsp://admin:password#192.168.5.21:8554/12"
RearCameraAddress = "rtsp://admin:password#192.168.5.22:8554/12"
#Open Front Video Camera Source
self.vidFront = cv2.VideoCapture(FrontCameraAddress)
self.vidBoom = cv2.VideoCapture(BoomCameraAddress)
self.vidRear = cv2.VideoCapture(RearCameraAddress)
#Verify that the Camera Streams Opened
if not self.vidFront.isOpened():
raise ValueError("Unable to open video source to Front Camera")
if not self.vidBoom.isOpened():
raise ValueError("Unable to open video source to Boom Camera")
if not self.vidRear.isOpened():
raise ValueError("Unable to open video source to Rear Camera")
#Get One Frame from the Selected Camera
def get_frame(self, camera="front"):
#Attempt to Get Front Camera Frame
if (camera.lower() == "front"):
#If Stream Still Open Return a Frame
if self.vidFront.isOpened():
ret, frame = self.vidFront.read()
if ret:
#Return a boolean success flag and the current frame converted to BGR
return (ret, frame)
else:
return (ret, None)
else:
return (ret, None)
#Attempt to Get Boom Camera Frame
elif (camera.lower() == "boom"):
#If Stream Still Open Return a Frame
if self.vidBoom.isOpened():
ret, frame = self.vidBoom.read()
if ret:
#Return a boolean success flag and the current frame converted to BGR
return (ret, frame)
else:
return (ret, None)
else:
return (ret, None)
#Attempt to Get Rear Camera Frame
elif (camera.lower() == "rear"):
#If Stream Still Open Return a Frame
if self.vidRear.isOpened():
ret, frame = self.vidRear.read()
if ret:
#Return a boolean success flag and the current frame converted to BGR
return (ret, frame)
else:
return (ret, None)
else:
return (ret, None)
else:
return (False, None)
#Release the video sources when the object is destroyed
def __del__(self):
if self.vidFront.isOpened():
self.vidFront.release()
if self.vidBoom.isOpened():
self.vidBoom.release()
if self.vidRear.isOpened():
self.vidRear.release()
#Main Routine - Only run if called from main program instance
if __name__ == '__main__':
try:
#Create GUI Object
app = robotGUI()
except Exception as e:
print("Exception: " + str(e))
finally:
print("Cleaning Up")
NOTE: In this copy of the program, I am updating every 250ms but I have tried smaller numbers down to around 3 but the frames still seem to be behind. Is there a better way to do this?
NOTE2: After working with this more today, I realize that openCV is definitely buffering frames for each camera starting when the cv2.VideoCapture() function is called for each camera. The read() function does seem to be pulling the next frame from the buffer which explains why it is taking so long to update and why the image I see on the screen never catches up to reality. I changed my test code to only connect to one camera at a time and use the cv2.release() function any time I am not actively viewing a camera. This improved things quite a bit. I also set the update function to run every 1ms and I am using the grab() function to grab a frame every cycle but I am only processing and displaying every 10th cycle which has also improved some. I still have some lag that I would love to remove if anyone has any suggestions.
My RTSP stream shows with zero noticeable lag when viewed in a web browser. Does anyone know how I can get the same effect in tkinter? I am not married to openCV.
I created a master window in tkinter.... then when I press a button on master window it will open another top level window in which I created a canvas and a button....
Canvas for: reading camera frame by frame and show it in tkinter window
Button for: capture the current frame..but in button I used face detection to detect a face when it detects a face I want close the top-level window and go back master window
In master window I have another button to exit the gui(means master.destroy when I press exit) ....but here when I press exit computer is hanging and asking for force close....I am posting my code below
'''
import tkinter
from tkinter import *
import cv2
import PIL.Image, PIL.ImageTk
import time
import os
import numpy as np
class App(object):
def __init__(self,master3):
self.window=Toplevel(master3)
self.window.title("capturing image")
self.window.geometry("{}x{}".format(self.window.winfo_screenwidth(),self.window.winfo_screenheight()))
self.vid = cv2.VideoCapture(0)
#self.width = self.vid.get(cv2.CAP_PROP_FRAME_WIDTH)
#self.height = self.vid.get(cv2.CAP_PROP_FRAME_HEIGHT)
self.canvas = Canvas(self.window, width = 1000, height = 700)
self.path = os.path.dirname(os.path.abspath(__file__))
self.detector=cv2.CascadeClassifier(self.path+r'\HarrCascade\haarcascade_frontalface.xml')
#self.recognizer = cv2.face.LBPHFaceRecognizer_create()
#self.recognizer.read(self.path+r'\trained_data\trained_file.xml')
self.canvas.pack()
self.canvas.pack()
self.btn_snapshot=Button(self.window, text="Snapshot", width=50,command=self.snap)
self.btn_snapshot.pack(anchor=CENTER, expand=True)
if not self.vid.isOpened():
raise IOError("Unable to open video source")
else:
self.count=0
self.delay=15
self.update()
def g(self):
if self.vid.isOpened():
_,frame = self.vid.read()
self.frame2 = frame
self.count=1
self.vid.release()
cv2.waitKey(1)
cv2.destroyAllWindows()
for i in range(1,5):
cv2.waitKey(1)
master.deiconify()
def snap(self):
self.gray=cv2.cvtColor(self.frame2,cv2.COLOR_BGR2GRAY)
self.faces= self.detector.detectMultiScale(self.gray, scaleFactor=1.2, minNeighbors=5, minSize=(100, 100), flags=cv2.CASCADE_SCALE_IMAGE)
self.no_faces=self.faces.shape[0]
if self.no_faces >= 1:
for(x,y,w,h) in self.faces:
self.g()
cv2.imwrite("face-0.jpg",self.gray)
def update(self):
if self.vid.isOpened():
ret, frame = self.vid.read()
frame1=cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
if ret:
self.photo = PIL.ImageTk.PhotoImage(image = PIL.Image.fromarray(frame1))
self.canvas.create_image(0, 0, image = self.photo, anchor = NW)
else:
pass
if self.count==0:
self.window.after(self.delay, self.update)
else:
pass
else:
pass
def fun():
App(master)
master.withdraw()
def fun1():
master.quit()
master=Tk(screenName=None,baseName=None,className='Tk',useTk=1)
master.title('face recognition system')
master.geometry("{}x{}".format(master.winfo_screenwidth(),master.winfo_screenheight()))
button2 = Button(master,text = 'Test Image',bg = 'black',fg = 'white',font = ('algerian',10),height = 3,width = 20,command=fun)
button3 = Button(master,text = 'exit',bg = 'black',fg = 'white',font = ('algerian',10),height = 3,width = 20,command=fun1)
button2.pack()
button3.pack()
master.mainloop()
'''
thanks in advance for your kind help
The problem is that I try to save a video file I get a one frame each time from another function (I check this is not the same frame...), the video created but only with one frame. I run with a loop outside the class Video_utility and send frame to the function save_and_display_video.
import cv2
class Video_utility:
def __init__(self, name_video, format_video, display, fps, size):
self.name_video = name_video
self.format_video = format_video
self.display = display
self.fps = fps
self.size = size
self.stream_frame = None
self.flag_update = True
self.display = True
self.fourcc = cv2.VideoWriter_fourcc(*'XVID')
self.out = cv2.VideoWriter(name_video, self.fourcc, fps, self.size)
self.i = 0
def save_and_display_video(self, frame):
frame = cv2.resize(frame,(self.size))
self.out.write(frame)
self.out.release()
cv2.destroyAllWindows()
Don't close the file after every frame
You have self.out.release() at your save_and_display_video() function.
You'd need to do that only after you've received the whole video.
My task is to display a webcam stream and its black&white stream in two different frames on a single GUI using Tkinter on python. I have seen some examples on google, but they are of images not videos as in the link here. Example of image display in 2 panes on a single GUI
I need exactly the same thing but for the real time video through my webcam.
Initial question:
"I am having issues in displaying multiple (2) windows for displaying video frames in a GUI using Tkinter in python. Please help me with a
code for this task."
The initial question mentioned 2 windows so here's a basic example on how to create multiple windows with tkinter:
#import tkinter as tk
import Tkinter as tk
class MainWindow(tk.Tk):
def __init__(self, *args, **kwargs):
#super().__init__()
tk.Tk.__init__(self)
self.title("This is the MainWindow")
self._is_hidden = False
self.window1 = OtherWindow(self, title="window 1")
self.window2 = OtherWindow(self, title="window 2")
def toggle_hide(self):
if self._is_hidden:
self.iconify()
self.deiconify()
else:
self.withdraw()
self._is_hidden = not self._is_hidden
class OtherWindow(tk.Toplevel):
def __init__(self, master, *args, **kwargs):
#super().__init__(master)
tk.Toplevel.__init__(self, master)
if 'title' in kwargs:
self.title(kwargs['title'])
self.hide_main_button = tk.Button(self, text="Hide/Show MainWindow")
self.hide_main_button['command'] = self.master.toggle_hide
self.hide_main_button.pack()
if __name__ == '__main__':
root = MainWindow()
root.mainloop()
from ttk import *
import Tkinter as tk
from Tkinter import *
import cv2
from PIL import Image, ImageTk
import os
import numpy as np
global last_frame #creating global variable
last_frame = np.zeros((480, 640, 3), dtype=np.uint8)
global last_frame2 #creating global variable
last_frame2 = np.zeros((480, 640, 3), dtype=np.uint8)
global cap
cap = cv2.VideoCapture(1)
def show_vid(): #creating a function
if not cap.isOpened(): #checks for the opening of camera
print("cant open the camera")
flag, frame = cap.read()
frame = cv2.resize(frame,(400,500))
if flag is None:
print "Major error!"
elif flag:
global last_frame
last_frame = frame.copy()
global last_frame2
last_frame2 = frame.copy()
pic = cv2.cvtColor(last_frame, cv2.COLOR_BGR2RGB) #we can change the display color of the frame gray,black&white here
img = Image.fromarray(pic)
imgtk = ImageTk.PhotoImage(image=img)
lmain.imgtk = imgtk
lmain.configure(image=imgtk)
lmain.after(10, show_vid)
def show_vid2():
pic2 = cv2.cvtColor(last_frame2, cv2.COLOR_BGR2GRAY)
img2 = Image.fromarray(pic2)
img2tk = ImageTk.PhotoImage(image=img2)
lmain2.img2tk = img2tk
lmain2.configure(image=img2tk)
lmain2.after(10, show_vid2)
if __name__ == '__main__':
root=tk.Tk() #assigning root variable for Tkinter as tk
lmain = tk.Label(master=root)
lmain2 = tk.Label(master=root)
#lmain.Frame= Frame(width=768, height=576)
#framex.grid(column=3,rowspan=2,padx=5, pady=5)
lmain.pack(side = LEFT)
lmain2.pack(side = RIGHT)
root.title("Fire Alarm Detector") #you can give any title
root.geometry("900x700+100+10") #size of window , x-axis, yaxis
exitbutton = Button(root, text='Quit',fg="red",command= root.destroy).pack(side = BOTTOM,)
show_vid()
show_vid2()
root.mainloop() #keeps the application in an infinite loop so it works continuosly
cap.release()
import tkinter
import PIL.Image
import PIL.ImageTk
import cv2
class App:
def __init__(self, window, video_source1, video_source2):
self.window = window
self.window.title("KEC MEDIA PLAYER")
self.video_source1 = video_source1
self.video_source2 = video_source2
self.photo1 = ""
self.photo2 = ""
# open video source
self.vid1 = MyVideoCapture(self.video_source1, self.video_source2)
# Create a canvas that can fit the above video source size
self.canvas1 = tkinter.Canvas(window, width=500, height=500)
self.canvas2 = tkinter.Canvas(window, width=500, height=500)
self.canvas1.pack(padx=5, pady=10, side="left")
self.canvas2.pack(padx=5, pady=60, side="left")
# After it is called once, the update method will be automatically called every delay milliseconds
self.delay = 15
self.update()
self.window.mainloop()
def update(self):
# Get a frame from the video source
ret1, frame1, ret2, frame2 = self.vid1.get_frame
if ret1 and ret2:
self.photo1 = PIL.ImageTk.PhotoImage(image=PIL.Image.fromarray(frame1))
self.photo2 = PIL.ImageTk.PhotoImage(image=PIL.Image.fromarray(frame2))
self.canvas1.create_image(0, 0, image=self.photo1, anchor=tkinter.NW)
self.canvas2.create_image(0, 0, image=self.photo2, anchor=tkinter.NW)
self.window.after(self.delay, self.update)
class MyVideoCapture:
def __init__(self, video_source1, video_source2):
# Open the video source
self.vid1 = cv2.VideoCapture(video_source1)
self.vid2 = cv2.VideoCapture(video_source2)
if not self.vid1.isOpened():
raise ValueError("Unable to open video source", video_source1)
#property
def get_frame(self):
ret1 = ""
ret2 = ""
if self.vid1.isOpened() and self.vid2.isOpened():
ret1, frame1 = self.vid1.read()
ret2, frame2 = self.vid2.read()
frame1 = cv2.resize(frame1, (500, 500))
frame2 = cv2.resize(frame2, (500, 500))
if ret1 and ret2:
# Return a boolean success flag and the current frame converted to BGR
return ret1, cv2.cvtColor(frame1, cv2.COLOR_BGR2RGB), ret2, cv2.cvtColor(frame2, cv2.COLOR_BGR2RGB)
else:
return ret1, None, ret2, None
else:
return ret1, None, ret2, None
# Release the video source when the object is destroyed
def __del__(self):
if self.vid1.isOpened():
self.vid1.release()
if self.vid2.isOpened():
self.vid2.release()
def callback():
global v1,v2
v1=E1.get()
v2=E2.get()
if v1 == "" or v2 == "":
L3.pack()
return
initial.destroy()
v1 = ""
v2 = ""
initial = tkinter.Tk()
initial.title("KEC MEDIA PLAYER")
L0 = tkinter.Label(initial, text="Enter the full path")
L0.pack()
L1 = tkinter.Label(initial, text="Video 1")
L1.pack()
E1 = tkinter.Entry(initial, bd =5)
E1.pack()
L2 = tkinter.Label(initial, text="Video 2")
L2.pack()
E2 = tkinter.Entry(initial, bd =5)
E2.pack()
B = tkinter.Button(initial, text ="Next", command = callback)
B.pack()
L3 = tkinter.Label(initial, text="Enter both the names")
initial.mainloop()
# Create a window and pass it to the Application object
App(tkinter.Tk(),v1, v2)
This code works under normal circumstances but I haven't handled situations where one video ends and the other is still playing. Also, the audio has not been handled in this code.
I created two canvases in the same window and ran the video as a series of images. I have resized the video to fit a constant canvas size but you can change the canvas size to fit the video if you want.
You can change the source to be from your webcam.