I am trying to save images from three cameras at the same moment using muti(3)-threads and press of a button.
I picked a code online and created three object instance. The threads are running fine and windows are display and can save the images. But i am not sure if i am using the "Locks"(acquire&release) in right sense. In total there are 4 locks.
Current Code:
#!/usr/bin/env python
from threading import Thread, Lock
import cv2
class WebcamVideoStream :
def __init__(self, src = 0, width = 1024, height = 768) :
# width = 320, height = 240
self.stream = cv2.VideoCapture(src)
self.stream.set(cv2.CAP_PROP_FRAME_WIDTH, width)
self.stream.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
(self.grabbed, self.frame) = self.stream.read()
self.started = False
self.read_lock = Lock()
def start(self) :
if self.started :
print "already started!!"
return None
self.started = True
self.thread = Thread(target=self.update, args=())
self.thread.start()
return self
def update(self) :
while self.started :
#if self.stream.grab():
#(grabbed, frame) = self.stream.retrieve()
(grabbed, frame) = self.stream.read()
self.read_lock.acquire()
self.grabbed, self.frame = grabbed, frame
self.read_lock.release()
def read(self) :
self.read_lock.acquire()
frame = self.frame.copy()
self.read_lock.release()
return frame
def stop(self) :
self.started = False
self.thread.join()
def __exit__(self, exc_type, exc_value, traceback) :
self.stream.release()
if __name__ == "__main__" :
mainer_lock = Lock()
vs = WebcamVideoStream(src = 0).start()
vs2 = WebcamVideoStream(src = 1).start()
vs3 = WebcamVideoStream(src = 2).start()
while True :
frame = vs.read()
frame2 = vs2.read()
frame3 = vs3.read()
cv2.imshow('webcam', frame)
cv2.imshow('webcam2', frame2)
cv2.imshow('webcam3', frame3)
if cv2.waitKey(1) == 99:
mainer_lock.acquire()
cv2.imwrite('cam_pics/frame.jpg', frame)
cv2.imwrite('cam_pics/frame2.jpg', frame2)
cv2.imwrite('cam_pics/frame3.jpg', frame3)
mainer_lock.release()
vs.stop()
vs2.stop()
vs3.stop()
cv2.destroyAllWindows()
I think you do not need lock at all.
First, mainer_lock is meaningless. I know you want use it to protect frame[2, 3] from being changed by vs[2, 3]. But it is impossible as the frame[2, 3] in main thread is a copy of original frame(I suppose you are using copy rightly). vs[2, 3] will only change the frame inside their instances, and it won't affect the frame in main thread.
Second, read_lock is also meaningless. Do you really mind that the frame is changed while being read? As you are capturing real frames, the consequent frames are almost the same. So let's say you want capture frame at 1000ms but Oh because of threads competition you actually capture frame at 1002ms. But does it really matter? As far as I think, it doesn't.
Related
For Context: I'm trying to write a program that is able to record video from a varying amount of webcams.
I got everything working except that I am getting a strange behavior in the video file itself.
After reading about other questions, fixing all my codecs and so forth, I figured it's an issue relating to threading. The strange part is when I tried it with a code I found around here, I get the opposite problem !
The Issue: My own code creates super-speedy videos (5 recorded seconds plays in 1 second). While the other code creates super-slow videos (5 recorded seconds plays in 25 seconds).
The Question: Can anyone explain this behavior to Me?
My simplified code:
import cv2
import threading
import os
import time
class Cameras():
def __init__(self, parent=None):
super().__init__()
self.cam1 = cv2.VideoCapture(0)
self.cam1check = self.cam1.isOpened()
newfolder = "./Unnamed"
os.makedirs(newfolder, exist_ok=True)
full_path = os.path.abspath(newfolder)
self.path = os.path.join(full_path,"Speedy")
self.namecam = self.path + " Example 0.avi"
self.Format = cv2.VideoWriter_fourcc(*'H264')
self.FPS = 60
self.cam1.set(cv2.CAP_PROP_FRAME_WIDTH, 1280)
self.cam1.set(cv2.CAP_PROP_FRAME_HEIGHT, 720)
self.cam1.set(cv2.CAP_PROP_FPS, self.FPS)
self.width1 = int(self.cam1.get(cv2.CAP_PROP_FRAME_WIDTH))
self.height1 = int(self.cam1.get(cv2.CAP_PROP_FRAME_HEIGHT))
self.size1 = (self.width1,self.height1)
def capturecam1(self):
while self.cam1check is True:
rect1, frame1 = self.cam1.read(cv2.CAP_FFMPEG)
if rect1:
self.video_writer_cam1.write(frame1)
def start_rec(self):
self.video_writer_cam1 = cv2.VideoWriter(self.namecam, self.Format, self.FPS, self.size1, cv2.CAP_FFMPEG)
if self.cam1check is True:
thread = threading.Thread(target=self.capturecam1, daemon=True)
thread.start()
if __name__ == '__main__':
Launch = Cameras()
Launch.start_rec()
while True:
time.sleep(1)
The other code:
from threading import Thread
import cv2
import time
import os
class VideoWriterWidget(object):
def __init__(self, video_file_name, src=0):
# Create a VideoCapture object
print(src)
self.frame_name = src # if using webcams, else just use src as it is.
self.video_file = video_file_name
# self.video_file_name = video_file_name + '.avi'
# self.capture = cv2.VideoCapture(src)
newfolder = "./Unnamed"
os.makedirs(newfolder, exist_ok=True)
full_path = os.path.abspath(newfolder)
self.path = os.path.join(full_path,"Slowmo")
self.video_file_path = self.path + " Example {}.avi".format(self.video_file)
# Default resolutions of the frame are obtained (system dependent)
self.cam1 = cv2.VideoCapture(0)
self.cam1check = self.cam1.isOpened()
print(self.cam1check)
self.cam1.set(cv2.CAP_PROP_FRAME_WIDTH, 1280)
self.cam1.set(cv2.CAP_PROP_FRAME_HEIGHT, 720)
self.cam1.set(cv2.CAP_PROP_FPS, 60)
# Set up codec and output video settings
self.codec = cv2.VideoWriter_fourcc(*'H264')
self.output_video = cv2.VideoWriter(self.video_file_path, self.codec, 60, (1280, 720))
# Start the thread to read frames from the video stream
self.thread = Thread(target=self.update, args=())
self.thread.daemon = True
self.thread.start()
# Start another thread to show/save frames
self.start_recording()
print('initialized {}'.format(self.video_file))
def update(self):
# Read the next frame from the stream in a different thread
while True:
if self.cam1check is True:
(self.status, self.frame) = self.cam1.read()
def show_frame(self):
# Display frames in main program
print(self.status)
if self.status:
print("Showing!")
cv2.imshow(self.frame_name, self.frame)
# Press Q on keyboard to stop recording
key = cv2.waitKey(1)
if key == ord('q'):
self.cam1.release()
self.output_video.release()
cv2.destroyAllWindows()
exit(1)
def save_frame(self):
# Save obtained frame into video output file
self.output_video.write(self.frame)
def start_recording(self):
# Create another thread to show/save frames
def start_recording_thread():
while True:
try:
# self.show_frame()
self.save_frame()
except AttributeError:
pass
self.recording_thread = Thread(target=start_recording_thread, args=())
self.recording_thread.daemon = True
self.recording_thread.start()
if __name__ == '__main__':
src1 = '0'
video_writer_widget1 = VideoWriterWidget('Camera 1', src1)
# src2 = '1'
# video_writer_widget2 = VideoWriterWidget('Camera 2', src2)
# src3 = '2'
# video_writer_widget3 = VideoWriterWidget('Camera 3', src3)
# Since each video player is in its own thread, we need to keep the main thread alive.
# Keep spinning using time.sleep() so the background threads keep running
# Threads are set to daemon=True so they will automatically die
# when the main thread dies
while True:
time.sleep(1)
Credit for the other code here: https://stackoverflow.com/a/71624807/9599824
I have a task to process a streaming video from Hikvision IP cam using OpenCV
I try this
RTSP
"""
cap = cv2.VideoCapture()
cap.open("rtsp://yourusername:yourpassword#172.16.30.248:555/Streaming/channels/1/")
and this
using API Hikvision
"""
cam = Client('http://192.168.1.10', 'admin', 'password', timeout=30)
cam.count_events = 2
response = cam.Streaming.channels[101].picture(method='get', type='opaque_data')
for chunk in response.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
img = cv2.imread('screen.jpg')
cv2.imshow("show", img)
cv2.waitKey(1)
In the first case, I have a delay between realtime and cap.read() about 9-20 seconds.
I solved it with such a "hack", but no results.
"""
class CameraBufferCleanerThread(threading.Thread):
def __init__(self, camera, name='camera-buffer-cleaner-thread'):
self.camera = camera
self.last_frame = None
super(CameraBufferCleanerThread, self).__init__(name=name)
self.start()
def run(self):
while True:
ret, self.last_frame = self.camera.read()
"""
The second case shows frames with a delay of 1-2 seconds, which is acceptable, but fps = 1, which is not very good.
Are there any options that can help you get a stream with low latency and normal fps?
In nathancys's post I finded working solutuion. it is done.
I used simple modification his code for my case for getting frame in main function.
from threading import Thread
import cv2, time
from threading import Thread
import cv2, time
class ThreadedCamera(object):
def __init__(self, src=0):
self.capture = cv2.VideoCapture(src)
self.capture.set(cv2.CAP_PROP_BUFFERSIZE, 1)
self.FPS = 1/100
self.FPS_MS = int(self.FPS * 1000)
# First initialisation self.status and self.frame
(self.status, self.frame) = self.capture.read()
# Start frame retrieval thread
self.thread = Thread(target=self.update, args=())
self.thread.daemon = True
self.thread.start()
def update(self):
while True:
if self.capture.isOpened():
(self.status, self.frame) = self.capture.read()
time.sleep(self.FPS)
if __name__ == '__main__':
src = 'rtsp://admin:password#192.168.7.100:554/ISAPI/Streaming/Channels/101'
threaded_camera = ThreadedCamera(src)
while True:
try:
cv2.imshow('frame', threaded_camera.frame)
cv2.waitKey(threaded_camera.FPS_MS)
except AttributeError:
pass
I want to display images from an Allied Vision camera inside a tkinter frame using OpenCV and the SDK for the camera, VimbaPython.
The only possible way to initialize the camera is with a Python with statement:
with Vimba.get_instance() as vimba:
cams = vimba.get_all_cameras()
with cams[0] as camera:
camera.get_frame()
# Convert frame to opencv image, then use Image.fromarray and ImageTk.PhotoImage to
# display it on the tkinter GUI
Everything works fine so far. But I don't only need a single frame. Instead, I need to continuously get frames and display them on the screen so that it is streaming.
I found that one way to do it is to call the .after(delay, function) method from a tkinter Label widget.
So, after obtaining one frame, I want to call the same function to get a new frame and display it again. The code would look like that:
with Vimba.get_instance() as vimba:
cams = vimba.get_all_cameras()
with cams[0] as camera:
def show_frame():
frame = camera.get_frame()
frame = frame.as_opencv_image()
im = Image.fromarray(frame)
img = Image.PhotoImage(im)
lblVideo.configure(image=img) # this is the Tkinter Label Widget
lblVideo.image = img
show_frame()
lblVideo.after(20, show_frame)
Then this shows the first frame and stops, throwing an error saying that Vimba needs to be initialized with a with statement. I don't know much about Python, but it looks like when I call the function with the .after() method it ends the with statement.
I would like to know if it is possible to execute this show_frame() function without ending the with. Also, I can't initialize the camera every time because the program goes really slow.
Thank you
I know this question is pretty old, but I ran into a similar problem with the Allied Vision cameras and found the solution to be relatively robust. So I hope this helps someone, even if not the OP.
An alternative to using with statements is using __enter__ and __exit__ (see sample here). With this, I created a class for the Vimba camera and during the __init__ I used these functions twice: once to initialize the Vimba instance, and once to open the camera itself. An example as follows...
vimba_handle = Vimba.get_instance().__enter__()
camera = vimba_handle.get_all_cameras()[0].__enter__()
I'll include a longer snippet as code as well, but please note my purpose was slightly different the OP's intent. Hopefully, it is still useful.
class VimbaCam:
def __init__(self, device_id=0):
# Variables
self.current_frame = np.array([])
self.device = None
self.device_id = device_id
self.vimba_handle = Vimba.get_instance().__enter__()
self.is_streaming = False
self.scale_window = 4
self.stream_thread = threading.Thread(target=self.thread_stream, daemon=True)
# Default settings
self.auto_exposure = "Off"
self.auto_gain = "Off"
self.acquisition = "Continuous"
self.exposure_us = 200000
self.fps = 6.763
self.gain = 0
self.gamma = 1
self.open()
def close(self):
if self.device is not None:
if self.is_streaming:
self.stop_stream()
time.sleep(1)
self.device.__exit__(None, None, None)
self.vimba_handle.__exit__(None, None, None)
def open(self):
cams = self.vimba_handle.get_all_cameras()
if not cams:
error_check(151, currentframe())
else:
self.device = cams[self.device_id].__enter__()
self.set_defaults()
self.start_stream()
def start_stream(self):
if self.device is not None:
self.is_streaming = True
self.stream_thread.start()
time.sleep(1)
def thread_stream(self):
while self.is_streaming:
current_frame = self.device.get_frame().as_opencv_image()
h, w, _ = current_frame.shape
self.current_frame = current_frame.reshape((h, w))
self.stream_thread = threading.Thread(target=self.thread_stream, daemon=True)
def stop_stream(self):
if self.device is not None:
self.is_streaming = False
def live_video(self):
if self.device is not None:
window_name = "Allied Vision"
h, w = self.current_frame.shape
w = int(w / self.scale_window)
h = int(h / self.scale_window)
cv2.namedWindow(window_name, cv2.WINDOW_NORMAL)
cv2.resizeWindow(window_name, w, h)
while 1:
cv2.imshow(window_name, self.current_frame)
cv2.waitKey(1)
if cv2.getWindowProperty(window_name, cv2.WND_PROP_VISIBLE) < 1:
break
cv2.destroyAllWindows()
def set_defaults(self):
if self.device is not None:
# Exposure time settings
self.device.ExposureAuto.set(self.auto_exposure)
self.device.ExposureTimeAbs.set(self.exposure_us)
# Gain settings
self.device.GainAuto.set(self.auto_gain)
self.device.Gain.set(self.gain)
# Gamma settings
self.device.Gamma.set(self.gamma)
self.device.AcquisitionMode.set(self.acquisition)
self.device.AcquisitionFrameRateAbs.set(self.fps)
# Try to adjust GeV packet size (available for GigE only)
try:
self.device.GVSPAdjustPacketSize.run()
while not self.device.GVSPAdjustPacketSize.is_done():
pass
except (AttributeError, VimbaFeatureError):
pass
# Color formatting (tries mono first, then color)
cv_formats = intersect_pixel_formats(self.device.get_pixel_formats(), OPENCV_PIXEL_FORMATS)
mono_formats = intersect_pixel_formats(cv_formats, MONO_PIXEL_FORMATS)
color_formats = intersect_pixel_formats(cv_formats, COLOR_PIXEL_FORMATS)
if mono_formats:
self.device.set_pixel_format(mono_formats[0])
elif color_formats:
self.device.set_pixel_format(color_formats[0])
if __name__ == "__main__":
dev = VimbaCam()
dev.live_video()
dev.close()
You need to use thread to run the capture code and pass the frames read via queue. Then the main tkinter application reads the queue and show the frames periodically using .after().
Below is an example based on your posted code:
import threading
from queue import SimpleQueue
import tkinter as tk
from PIL import Image, ImageTk
from vimba import Vimba
def camera_streaming(queue):
global is_streaming
is_streaming = True
print("streaming started")
with Vimba.get_instance() as vimba:
with vimba.get_all_cameras()[0] as camera:
while is_streaming:
frame = camera.get_frame()
frame = frame.as_opencv_image()
im = Image.fromarray(frame)
img = ImageTk.PhotoImage(im)
queue.put(img) # put the capture image into queue
print("streaming stopped")
def start_streaming():
start_btn["state"] = "disabled" # disable start button to avoid running the threaded task more than once
stop_btn["state"] = "normal" # enable stop button to allow user to stop the threaded task
show_streaming()
threading.Thread(target=camera_streaming, args=(queue,), daemon=True).start()
def stop_streaming():
global is_streaming, after_id
is_streaming = False # terminate the streaming thread
if after_id:
lblVideo.after_cancel(after_id) # cancel the showing task
after_id = None
stop_btn["state"] = "disabled" # disable stop button
start_btn["state"] = "normal" # enable start button
# periodical task to show frames in queue
def show_streaming():
global after_id
if not queue.empty():
image = queue.get()
lblVideo.config(image=image)
lblVideo.image = image
after_id = lblVideo.after(20, show_streaming)
queue = SimpleQueue() # queue for video frames
after_id = None
root = tk.Tk()
lblVideo = tk.Label(root, image=tk.PhotoImage(), width=640, height=480)
lblVideo.grid(row=0, column=0, columnspan=2)
start_btn = tk.Button(root, text="Start", width=10, command=start_streaming)
start_btn.grid(row=1, column=0)
stop_btn = tk.Button(root, text="Stop", width=10, command=stop_streaming, state="disabled")
stop_btn.grid(row=1, column=1)
root.mainloop()
Note that I don't have the camera and the SDK installed, the above code may not work for you. I just demonstrate how to use thread, queue and .after().
Below is a testing vimba module (saved as vimba.py) I use to simulate VimbaPython module using OpenCV and a webcam:
import cv2
class Frame:
def __init__(self, frame):
self.frame = frame
def as_opencv_image(self):
return self.frame
class Camera:
def __init__(self, cam_id=0):
self.cap = cv2.VideoCapture(cam_id, cv2.CAP_DSHOW)
def __enter__(self):
return self
def __exit__(self, *args):
self.cap.release()
return self
def get_frame(self):
ret, frame = self.cap.read()
if ret:
return Frame(frame)
class Vimba:
_instance = None
#classmethod
def get_instance(self):
if self._instance is None:
self._instance = Vimba()
return self._instance
def __enter__(self):
return self
def __exit__(self, *args):
return self
def get_all_cameras(self):
return (Camera(),)
I tried to read the frames in openCV and display them in tkinter label. I was able to do so using the below code:
import tkinter as tk
import cv2
from PIL import ImageTk, Image
video_path = "SAMPLE/STORED_VIDEO/PATH"
root = tk.Tk()
base_img = Image.open("PATH/TO/DEFAULT/LABLE/IMAGE")
img_obj = ImageTk.PhotoImage(base_img)
lblVideo = tk.Label(root, image=img_obj)
lblVideo.pack()
cap = cv2.VideoCapture(video_path)
if cap.isOpened():
def show_frame():
_, frame = cap.read()
im = Image.fromarray(frame)
img = ImageTk.PhotoImage(im)
lblVideo.configure(image=img)
lblVideo.image = img
lblVideo.after(1, show_frame) # Need to create callback here
show_frame()
root.mainloop()
Although this doesnot contain the with statement, you can try replacing the after() callback inside the show_frame function itself.
I am building an app that records frames from IP camera through RTSP.
My engine is in charge to save a video in mp4 with Opencv VideoWriter working well.
What I am looking for is to create a startRecord and a stopRecord class method that will respectively start and stop recording according to a trigger (it could be an argument that I pass to the thread).
Is anyone know what the best way to do that kind of stuff?
Here is my class:
from threading import Thread
import cv2
import time
import multiprocessing
import threading
class RTSPVideoWriterObject(object):
def __init__(self, src=0):
# Create a VideoCapture object
self.capture = cv2.VideoCapture(src)
# Start the thread to read frames from the video stream
self.thread = Thread(target=self.update, args=())
self.thread.daemon = True
self.thread.start()
def update(self):
# Read the next frame from the stream in a different thread
while True:
if self.capture.isOpened():
(self.status, self.frame) = self.capture.read()
def endRecord(self):
self.capture.release()
self.output_video.release()
exit(1)
def startRecord(self,endRec):
self.frame_width = int(self.capture.get(3))
self.frame_height = int(self.capture.get(4))
self.codec = cv2.VideoWriter_fourcc(*'mp4v')
self.output_video = cv2.VideoWriter('fileOutput.mp4', self.codec, 30, (self.frame_width, self.frame_height))
while True:
try:
self.output_video.write(self.frame)
if endRec:
self.endRecord()
except AttributeError:
pass
if __name__ == '__main__':
rtsp_stream_link = 'rtsp://foo:192.5545....'
video_stream_widget = RTSPVideoWriterObject(rtsp_stream_link)
stop_threads = False
t1 = threading.Thread(target = video_stream_widget.startRecord, args =[stop_threads])
t1.start()
time.sleep(15)
stop_threads = True
As you can see in the main I reading frames and store them in a separate thread. Then I am starting to record (record method is with an infinite loop so blocking) and then after 15 sec, I am trying to pass a 'stop_record' argument to stop recording properly.
A part of the code comes from Storing RTSP stream as video file with OpenCV VideoWriter
Is someone have an idea?
I read a lot that OpenCV can be very tricky for multithreading
N.
Instead of passing arguments to the thread, use a internal flag in the class to determine when to start/stop recording. The trigger could be as simple as pressing the spacebar to start/stop recording. When the spacebar is pressed it will switch an internal variable, say self.record to True to start recording and False to stop recording. Specifically, to check when the spacebar is pressed, you can check if the returned key value from cv2.waitKey() is 32. If you want the trigger based on any other key, take a look at this post to determine the key code. Here's a quick example to start/stop recording a video using the spacebar:
from threading import Thread
import cv2
class RTSPVideoWriterObject(object):
def __init__(self, src=0):
# Create a VideoCapture object
self.capture = cv2.VideoCapture(src)
self.record = True
# Default resolutions of the frame are obtained (system dependent)
self.frame_width = int(self.capture.get(3))
self.frame_height = int(self.capture.get(4))
# Set up codec and output video settings
self.codec = cv2.VideoWriter_fourcc(*'mp4v')
self.output_video = cv2.VideoWriter('output.mp4', self.codec, 30, (self.frame_width, self.frame_height))
# Start the thread to read frames from the video stream
self.thread = Thread(target=self.update, args=())
self.thread.daemon = True
self.thread.start()
def update(self):
# Read the next frame from the stream in a different thread
while True:
if self.capture.isOpened():
(self.status, self.frame) = self.capture.read()
def show_frame(self):
# Display frames in main program
if self.status:
cv2.imshow('frame', self.frame)
if self.record:
self.save_frame()
# Press Q on keyboard to stop recording
key = cv2.waitKey(1)
if key == ord('q'):
self.capture.release()
self.output_video.release()
cv2.destroyAllWindows()
exit(1)
# Press spacebar to start/stop recording
elif key == 32:
if self.record:
self.record = False
print('Stop recording')
else:
self.record = True
print('Start recording')
def save_frame(self):
# Save obtained frame into video output file
self.output_video.write(self.frame)
if __name__ == '__main__':
rtsp_stream_link = 'Your stream link!'
video_stream_widget = RTSPVideoWriterObject(rtsp_stream_link)
while True:
try:
video_stream_widget.show_frame()
except AttributeError:
pass
I solved the problem by creating a global variable inside the class, for your case endRec.
I'm working on a small project for personal use where I want to load the CCTV streams I have from home into Opencv. I've done alot of research and understand that I need to use multithreading to get them to work correctly. and using the following code I've got it working on one camera perfectly!
from threading import Thread
import imutils
import cv2, time
camlink1 = "rtsp://xx.xx.xx.xx.:xxx/user=xxx&password=xxx&channel=1&stream=0./"
class VideoStreamWidget(object):
def __init__(self, link, camname, src=0):
self.capture = cv2.VideoCapture(link)
# Start the thread to read frames from the video stream
self.thread = Thread(target=self.update, args=())
self.thread.daemon = True
self.thread.start()
self.camname = camname
self.link = link
print(camname)
print(link)
def update(self):
# Read the next frame from the stream in a different thread
while True:
if self.capture.isOpened():
(self.status, self.frame) = self.capture.read()
time.sleep(.01)
def show_frame(self):
# Display frames in main program
frame = imutils.resize(self.frame, width=400)
cv2.imshow('Frame ' + self.camname, frame)
key = cv2.waitKey(1)
if key == ord('q'):
self.capture.release()
cv2.destroyAllWindows()
exit(1)
if __name__ == '__main__':
video_stream_widget = VideoStreamWidget(camlink1,"Cam1")
while True:
try:
video_stream_widget.show_frame()
except AttributeError:
pass
But now I want run another camera alongside it (in parallel) but I'm not sure how what i need to change to the code to start another thread and run it side by side. I thought I did by using the following:
from threading import Thread
import imutils
import cv2, time
camlink1 = "rtsp://xx.xx.xx.xx.:xxx/user=xxx&password=xxx&channel=1&stream=0./"
camlink2 = "rtsp://xx.xx.xx.xx.:xxx/user=xxx&password=xxx&channel=2&stream=0./"
class VideoStreamWidget(object):
def __init__(self, link, camname, src=0):
self.capture = cv2.VideoCapture(link)
# Start the thread to read frames from the video stream
self.thread = Thread(target=self.update, args=())
self.thread.daemon = True
self.thread.start()
self.camname = camname
self.link = link
print(camname)
print(link)
def update(self):
# Read the next frame from the stream in a different thread
while True:
if self.capture.isOpened():
(self.status, self.frame) = self.capture.read()
time.sleep(.01)
def show_frame(self):
# Display frames in main program
frame = imutils.resize(self.frame, width=400)
cv2.imshow('Frame ' + self.camname, frame)
key = cv2.waitKey(1)
if key == ord('q'):
self.capture.release()
cv2.destroyAllWindows()
exit(1)
if __name__ == '__main__':
video_stream_widget = VideoStreamWidget(camlink1,"Cam1")
video_stream_widget = VideoStreamWidget(camlink2,"Cam2")
while True:
try:
video_stream_widget.show_frame()
except AttributeError:
pass
But this just shows the second camera only, overriding the first. I know somewhere I'm missing something simple but after looking at this for hours I can't figure it out. Any help is appreciated
Cheers
Chris
The problem is in these lines of code:
if __name__ == '__main__':
video_stream_widget = VideoStreamWidget(camlink1,"Cam1")
video_stream_widget = VideoStreamWidget(camlink2,"Cam2")
while True:
try:
video_stream_widget.show_frame()
except AttributeError:
pass
To be precise, you are overwriting the the variable video_stream_widget and then calling once video_stream_widget.show_frame() This will be only call in the last one (even if the thread that captures the images is still working), so only the last one will be showing, i.e. "Cam2".
Solution
Add different names for each and call the show_frame() function on both, like this:
if __name__ == '__main__':
video_stream_widget = VideoStreamWidget(camlink1,"Cam1")
video_stream_widget2 = VideoStreamWidget(camlink2,"Cam2")
while True:
try:
video_stream_widget.show_frame()
video_stream_widget2.show_frame()
except AttributeError:
pass