How can I merge/combine 2 windows into one in python? - python

I am writing a program which displays a security console for all my rtsp streams from my ip cameras. Each stream plays in a different window, I want it all to be merged into one window.
Here is my code:
import cv2
import threading
class camThread(threading.Thread):
def __init__(self, previewName, camID):
threading.Thread.__init__(self)
self.previewName = previewName
self.camID = camID
def run(self):
camPreview(self.previewName, self.camID)
def camPreview(previewName, camID):
# global frame
cv2.namedWindow(previewName)
cam = cv2.VideoCapture(camID)
if cam.isOpened(): # try to get the first frame
rval, frame = cam.read()
else:
rval = False
while rval:
frame = cv2.resize(frame, (620,400))
cv2.imshow(previewName, frame)
rval, frame = cam.read()
cv2.waitKey(20)
thread1 = camThread("Camera 1", 'my-rtsp-link')
thread2 = camThread("Camera 2", "my-second-rtsp-link")
thread1.start()
thread2.start()
I've tried multiproccessing and it doesn't work since I have a while loop in my function. Any help would be greatly appreciated.
I used np.hstack to combine my rtsp streams however this makes the streams freeze. So I got both streams working by multithreading. But now each stream is playing in its individual window which is very messy if I add multiple cameras. How can I merge it all into one big window?

Related

tkinter + OpenCV WebCam Very Slow Video Stream

I am using tkinter and opencv for the first time and have successfully built a GUI for my project, however, I cannot figure out why my video stream is updating so extremely slow. I am grabbing frames very quickly but it seems that the update on the screen gets exponentially slower. I am seeing somewhere around 30 seconds of lag when I first launch the program but it eventually slows to a halt. I am connecting to three cameras but only displaying one at a time. The cameras all display and the selection buttons work. My only issue is the display refresh rate.
This is running in Python3.7 on a raspberry pi4. I can connect to the camera via web browser and it appears to have no lag.
I have been searching for answers but cannot seem to find anything that helps. Can anyone offer some help with this?
Here's my program (I have removed unrelated code):
#!/usr/bin/env python3
import time
from tkinter import *
import cv2
from PIL import Image, ImageTk
#GUI
class robotGUI:
def __init__(self):
self.selectedCam = "front"
self.window = Tk()
#Setup the window to fit the Raspberry Pi Touch Display = 800x400 and align top left
self.window.geometry("800x480+0+0")
self.window.overrideredirect(True)
self.window.fullScreenState = False
#Create Frame for Video Window
self.videoFrame = Frame(self.window, relief=SUNKEN, bd=2)
self.videoFrame.place(x=0, y=0, height=457, width=650)
#Create the Video Window
self.video = Label(self.videoFrame, bd=0, relief=FLAT, width=644, height=451)
self.video.place(x=0, y=0)
self.vid = VideoCapture()
self.camUpdateFreq = 250
self.updateCams()
#Create the Button Frame
self.buttonFrame = Frame(self.window, relief=FLAT)
self.buttonFrame.place(x=651, y=0, height=457, width=149)
#Create Buttons
#Select Front Camera Button
self.frontCamButton = Button(self.buttonFrame, text="Front Camera", command=lambda: self.selectCam("front"))
self.frontCamButton.place(x=24, y=50, height=30, width=100)
#Select Boom Camera Button
self.boomCamButton = Button(self.buttonFrame, text="Boom Camera", command=lambda: self.selectCam("boom"))
self.boomCamButton.place(x=24, y=130, height=30, width=100)
#Select Rear Camera Button
self.rearCamButton = Button(self.buttonFrame, text="Rear Camera", command=lambda: self.selectCam("rear"))
self.rearCamButton.place(x=24, y=210, height=30, width=100)
#Close Button
self.exitButton = Button(self.buttonFrame, text="Close", command=self.window.destroy)
self.exitButton.place(x=24, y=400, height=30, width=100)
#Start the main loop for the gui
self.window.mainloop()
def selectCam(self, cam):
if (cam.lower() == "front"):
self.selectedCam = "front"
self.statusBarLeft['text'] = "Front Camera Selected"
elif (cam.lower() == "boom"):
self.selectedCam = "boom"
self.statusBarLeft['text'] = "Boom Camera Selected"
elif (cam.lower() == "rear"):
self.selectedCam = "rear"
self.statusBarLeft['text'] = "Rear Camera Selected"
def updateCams(self):
#Get a frame from the selected camera
ret, frame = self.vid.get_frame(self.selectedCam)
if ret:
imageCV2 = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
img = Image.fromarray(imageCV2)
imgPhoto = ImageTk.PhotoImage(image=img)
self.video.imgPhoto = imgPhoto
self.video.configure(image=imgPhoto)
self.window.after(self.camUpdateFreq, self.updateCams)
#Video Camera Class
class VideoCapture:
def __init__(self):
#Define Locals
FrontCameraAddress = "rtsp://admin:password#192.168.5.20:8554/12"
BoomCameraAddress = "rtsp://admin:password#192.168.5.21:8554/12"
RearCameraAddress = "rtsp://admin:password#192.168.5.22:8554/12"
#Open Front Video Camera Source
self.vidFront = cv2.VideoCapture(FrontCameraAddress)
self.vidBoom = cv2.VideoCapture(BoomCameraAddress)
self.vidRear = cv2.VideoCapture(RearCameraAddress)
#Verify that the Camera Streams Opened
if not self.vidFront.isOpened():
raise ValueError("Unable to open video source to Front Camera")
if not self.vidBoom.isOpened():
raise ValueError("Unable to open video source to Boom Camera")
if not self.vidRear.isOpened():
raise ValueError("Unable to open video source to Rear Camera")
#Get One Frame from the Selected Camera
def get_frame(self, camera="front"):
#Attempt to Get Front Camera Frame
if (camera.lower() == "front"):
#If Stream Still Open Return a Frame
if self.vidFront.isOpened():
ret, frame = self.vidFront.read()
if ret:
#Return a boolean success flag and the current frame converted to BGR
return (ret, frame)
else:
return (ret, None)
else:
return (ret, None)
#Attempt to Get Boom Camera Frame
elif (camera.lower() == "boom"):
#If Stream Still Open Return a Frame
if self.vidBoom.isOpened():
ret, frame = self.vidBoom.read()
if ret:
#Return a boolean success flag and the current frame converted to BGR
return (ret, frame)
else:
return (ret, None)
else:
return (ret, None)
#Attempt to Get Rear Camera Frame
elif (camera.lower() == "rear"):
#If Stream Still Open Return a Frame
if self.vidRear.isOpened():
ret, frame = self.vidRear.read()
if ret:
#Return a boolean success flag and the current frame converted to BGR
return (ret, frame)
else:
return (ret, None)
else:
return (ret, None)
else:
return (False, None)
#Release the video sources when the object is destroyed
def __del__(self):
if self.vidFront.isOpened():
self.vidFront.release()
if self.vidBoom.isOpened():
self.vidBoom.release()
if self.vidRear.isOpened():
self.vidRear.release()
#Main Routine - Only run if called from main program instance
if __name__ == '__main__':
try:
#Create GUI Object
app = robotGUI()
except Exception as e:
print("Exception: " + str(e))
finally:
print("Cleaning Up")
NOTE: In this copy of the program, I am updating every 250ms but I have tried smaller numbers down to around 3 but the frames still seem to be behind. Is there a better way to do this?
NOTE2: After working with this more today, I realize that openCV is definitely buffering frames for each camera starting when the cv2.VideoCapture() function is called for each camera. The read() function does seem to be pulling the next frame from the buffer which explains why it is taking so long to update and why the image I see on the screen never catches up to reality. I changed my test code to only connect to one camera at a time and use the cv2.release() function any time I am not actively viewing a camera. This improved things quite a bit. I also set the update function to run every 1ms and I am using the grab() function to grab a frame every cycle but I am only processing and displaying every 10th cycle which has also improved some. I still have some lag that I would love to remove if anyone has any suggestions.
My RTSP stream shows with zero noticeable lag when viewed in a web browser. Does anyone know how I can get the same effect in tkinter? I am not married to openCV.

Opencv face detection on live video feed using Qt Gui performing slow

I have a project where I need to design a gui in qt. This design contains a widget where live video feed will be displayed from a usb webcam using opencv. This project will detect faces and will also recognize them which means a lot of processing will happen on each frames.
For this what I have done is that I have created a thread which initialize the camera and takes frames from it using opencv. It then puts all the frame in the queue and this queue is then read by a function update_frame which basically displays the frame on qt widget. This is working fine with no delay.
Inside the update_frame function, I added the face detection due to which it was performing very slow. So I created another thread start_inferencing which basically reads frame from queue and after detecting face, it put the frame again in another queue q2 which is then read by update_frame and it displays but still its responding very slow. Below is the code:
q = queue.Queue()
q2 = queue.Queue()
def grab(cam, qu, width, height):
global running
capture = cv2.VideoCapture(cam)
capture.set(cv2.CAP_PROP_FRAME_WIDTH, width)
capture.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
while running:
frame = {}
capture.grab()
ret_val, img = capture.retrieve(0)
frame["img"] = img
if qu.qsize() < 100:
qu.put(frame)
else:
print(qu.qsize())
class Logic(QtWidgets.QMainWindow, Ui_MainWindow):
def __init__(self, parent=None):
QtWidgets.QMainWindow.__init__(self, parent)
self.setupUi(self)
set_initial_alert_temp()
self.window_width = self.ImgWidget.frameSize().width()
self.window_height = self.ImgWidget.frameSize().height()
self.ImgWidget = OwnImageWidget(self.ImgWidget)
self.timer = QtCore.QTimer(self)
self.timer.timeout.connect(self.update_frame)
self.timer.start(1)
self.outside_temp_text_box.setText(str(curr_temp_cel))
def update_frame(self):
if not q2.empty():
frame1 = q2.get()
img = frame1["img"]
img_height, img_width, img_colors = img.shape
scale_w = float(self.window_width) / float(img_width)
scale_h = float(self.window_height) / float(img_height)
scale = min([scale_w, scale_h])
if scale == 0:
scale = 1
img = cv2.resize(img, None, fx=scale, fy=scale, interpolation=cv2.INTER_CUBIC)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
height, width, bpc = img.shape
bpl = bpc * width
image = QtGui.QImage(img.data, width, height, bpl, QtGui.QImage.Format_RGB888)
self.ImgWidget.setImage(image)
def start_inferencing():
while True:
if not q.empty():
frame = q.get()
img = frame["img"]
face_bbox = face.detect_face(img)
if face_bbox is not None:
(f_startX, f_startY, f_endX, f_endY) = face_bbox.astype("int")
f_startX = f_startX + 10
f_startY = f_startY + 10
f_endX = f_endX - 10
f_endY = f_endY - 10
cv2.rectangle(img, (f_startX, f_startY), (f_endX, f_endY), (0, 255, 0), 2)
frame1 = {"img": img}
if q2.qsize() < 100:
q2.put(frame1)
else:
print(q2.qsize())
def main():
capture_thread = threading.Thread(target=grab, args=(0, q, 640, 480))
capture_thread.start()
infer_thread = threading.Thread(target=start_inferencing)
infer_thread.start()
app = QtWidgets.QApplication(sys.argv)
w = Logic(None)
w.setWindowTitle('Test')
w.show()
app.exec_()
main()
Below is the summary of whats happening in the code:
camera -> frame -> queue.put # (reading frame from camera and putting it in queue)
queue.get -> frame -> detect face -> queue2.put # (getting frame from queue, detecting face in it and putting the updated frames in queue2)
queue2.get -> frame -> display it on qt widget # (getting frame from queue2 and display it on qt widget)
The main reason why the live video feed is slow is because the frame which is read in grab function is not able to process faster and thus the queue size keeps on increasing a lot and thus it overall becomes very slow. Is there any good approach I can use which can detect the face as well as show it without any delay. Please help. Thanks
Queue accumulates the frames that the thread does not manage to process. So, no chanse to process them at all. That's why the queue useless here. Working clocks here defined by arriving frames, each frame generates event, which may work in it's own thread (let's say in processing thread) after frame processing is finished, processing thread generates another event and it processed in another thread, let's say in GUI thread and it shows the result to user.
If you mandatory need some buffer, check ring buffer it have finite lenght.
You have a producer/consumer sequence...
grab frame and push on queue1
dequeue frame from queue1, process and enqueue results on queue2
dequeue results from queue2 and display
From what you've stated stage 2. is the bottleneck. That being the case you could try assigning more resources (i.e. threads) to that stage so 2. has multiple threads reading from queue1, processing and pushing results onto queue2. You just need to ensure that the processed data popped from queue2 is sequenced correctly -- presumably by assigning each initial frame with a sequence number or id.

How to start and stop saving video frames according to a trigger with OpenCV VideoWriter

I am building an app that records frames from IP camera through RTSP.
My engine is in charge to save a video in mp4 with Opencv VideoWriter working well.
What I am looking for is to create a startRecord and a stopRecord class method that will respectively start and stop recording according to a trigger (it could be an argument that I pass to the thread).
Is anyone know what the best way to do that kind of stuff?
Here is my class:
from threading import Thread
import cv2
import time
import multiprocessing
import threading
class RTSPVideoWriterObject(object):
def __init__(self, src=0):
# Create a VideoCapture object
self.capture = cv2.VideoCapture(src)
# Start the thread to read frames from the video stream
self.thread = Thread(target=self.update, args=())
self.thread.daemon = True
self.thread.start()
def update(self):
# Read the next frame from the stream in a different thread
while True:
if self.capture.isOpened():
(self.status, self.frame) = self.capture.read()
def endRecord(self):
self.capture.release()
self.output_video.release()
exit(1)
def startRecord(self,endRec):
self.frame_width = int(self.capture.get(3))
self.frame_height = int(self.capture.get(4))
self.codec = cv2.VideoWriter_fourcc(*'mp4v')
self.output_video = cv2.VideoWriter('fileOutput.mp4', self.codec, 30, (self.frame_width, self.frame_height))
while True:
try:
self.output_video.write(self.frame)
if endRec:
self.endRecord()
except AttributeError:
pass
if __name__ == '__main__':
rtsp_stream_link = 'rtsp://foo:192.5545....'
video_stream_widget = RTSPVideoWriterObject(rtsp_stream_link)
stop_threads = False
t1 = threading.Thread(target = video_stream_widget.startRecord, args =[stop_threads])
t1.start()
time.sleep(15)
stop_threads = True
As you can see in the main I reading frames and store them in a separate thread. Then I am starting to record (record method is with an infinite loop so blocking) and then after 15 sec, I am trying to pass a 'stop_record' argument to stop recording properly.
A part of the code comes from Storing RTSP stream as video file with OpenCV VideoWriter
Is someone have an idea?
I read a lot that OpenCV can be very tricky for multithreading
N.
Instead of passing arguments to the thread, use a internal flag in the class to determine when to start/stop recording. The trigger could be as simple as pressing the spacebar to start/stop recording. When the spacebar is pressed it will switch an internal variable, say self.record to True to start recording and False to stop recording. Specifically, to check when the spacebar is pressed, you can check if the returned key value from cv2.waitKey() is 32. If you want the trigger based on any other key, take a look at this post to determine the key code. Here's a quick example to start/stop recording a video using the spacebar:
from threading import Thread
import cv2
class RTSPVideoWriterObject(object):
def __init__(self, src=0):
# Create a VideoCapture object
self.capture = cv2.VideoCapture(src)
self.record = True
# Default resolutions of the frame are obtained (system dependent)
self.frame_width = int(self.capture.get(3))
self.frame_height = int(self.capture.get(4))
# Set up codec and output video settings
self.codec = cv2.VideoWriter_fourcc(*'mp4v')
self.output_video = cv2.VideoWriter('output.mp4', self.codec, 30, (self.frame_width, self.frame_height))
# Start the thread to read frames from the video stream
self.thread = Thread(target=self.update, args=())
self.thread.daemon = True
self.thread.start()
def update(self):
# Read the next frame from the stream in a different thread
while True:
if self.capture.isOpened():
(self.status, self.frame) = self.capture.read()
def show_frame(self):
# Display frames in main program
if self.status:
cv2.imshow('frame', self.frame)
if self.record:
self.save_frame()
# Press Q on keyboard to stop recording
key = cv2.waitKey(1)
if key == ord('q'):
self.capture.release()
self.output_video.release()
cv2.destroyAllWindows()
exit(1)
# Press spacebar to start/stop recording
elif key == 32:
if self.record:
self.record = False
print('Stop recording')
else:
self.record = True
print('Start recording')
def save_frame(self):
# Save obtained frame into video output file
self.output_video.write(self.frame)
if __name__ == '__main__':
rtsp_stream_link = 'Your stream link!'
video_stream_widget = RTSPVideoWriterObject(rtsp_stream_link)
while True:
try:
video_stream_widget.show_frame()
except AttributeError:
pass
I solved the problem by creating a global variable inside the class, for your case endRec.

I try to get 2 frames from picamera. Why it works very slowly?

From picamera.PiCamera() Thread I try to get cls.frameCV2 = snap.array for handling in opencv, and another cls.frame = stream.read() for web streaming. Both work, but very slowly.
What is the problem? If Use only frameCV2 it works quite fast, or if I only Use stream.read, it also ok.
Here is my code snippet:
import time
import io
import threading
import cv2
import picamera
from picamera.array import PiRGBArray
class Camera(object):
thread = None # background thread that reads frames from camera
frame = None # current frame is stored here by background thread
frameCV2 = None # same, but as a numpy array
last_access = 0 # time of last client access to the camera
def initialize(self):
if Camera.thread is None:
# start background frame thread
Camera.thread = threading.Thread(target=self._thread)
Camera.thread.start()
# wait until frames start to be available
while self.frame is None:
time.sleep(0)
def get_frame(self):
Camera.last_access = time.time()
self.initialize()
return self.frame
def get_frame_for_internal_proc(self): # store frame for cv2 я добавил
Camera.last_access = time.time()
self.initialize()
return self.frameCV2
#classmethod
def _thread(cls):
with picamera.PiCamera() as camera:
# camera setup
camera.resolution = (800, 600)
camera.hflip = True
camera.vflip = True
# let camera warm up
camera.start_preview()
time.sleep(2)
rawCapture = PiRGBArray(camera) # я добавил
stream = io.BytesIO()
for snap in camera.capture_continuous(rawCapture, 'bgr',
use_video_port=True):
cls.frameCV2 = snap.array # frame for cv2
# store frame for web
camera.capture(stream, format='jpeg')
stream.seek(0)
cls.frame = stream.read()
# reset stream for next frame
stream.seek(0)
stream.truncate()
rawCapture.truncate(0)
# if there hasn't been any clients asking for frames in
# the last 10 seconds stop the thread
if time.time() - cls.last_access > 10:
break
cls.thread = None
So, I didn't find the way to make it working faster. The main reason of low speed is doubled capturing inside of camera.capture_continuous circle.
Anyhow I solved the problem using 1 thread for both generation frame and frameCV2, just converting second one to the first. I just put self.frame = cv2.imencode('.jpg', self.frameCV2)[1].tobytes() in get_frame() method which converts numpy matrix to format, fit for web streaming.
Now it works well.

Auto-capture an image from a video in OpenCV using python

I am trying developing a code which functions as the self-timer camera. The video would be seen in the window and the person's face and eyes would be continuously detected and once the user selects a specific time, the frame at that point of time is captured. I am able to capture the frame after a certain time using sleep function in time module but the video frame seems to freeze. Is there any solution such that I can continue to see the video and the video capture takes place after some delay automatically.
I am using the code-
import numpy as np
import cv2
import time
import cv2.cv as cv
cap = cv2.VideoCapture(0)
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
# Our operations on the frame come here
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Display the resulting frame
cv2.imshow('frame',frame)
#time.sleep(01)
Capture = cv.CaptureFromCAM(0)
time.sleep(5)
ret, frame = cap.read()
image = cv.QueryFrame(Capture) #here you have an IplImage
imgarray = np.asarray(image[:,:]) #this is the way I use to convert it to numpy array
cv2.imshow('capImage', imgarray)
cv2.waitKey(0)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
Can someone suggest me? Any kind of help would be appreciated.
In order to continuously view the video, you need to repeat the same part of the code which displays the video first and put it in a while loop. Make sure that the handle to the window is not lost.You can make the capture as a mouse click event and use a tickcount, one before the start of the while loop and one inside the loop. Once the difference between the two tick counts is equal to some pre-defined seconds,capture that frame, use break and come out of the while loop.
You need to add another 'cap.read()' line when the delay ends, as this is the code that actually captures the image.
use threading and define the cv.imshow() separately from your function
import threading
import cv2
def getFrame():
global frame
while True:
frame = video_capture.read()[1]
def face_analyse():
while True:
#do some of the opeartion you want
def realtime():
while True:
cv2.imshow('Video', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
video_capture.release()
cv2.destroyAllWindows()
break
if __name__ == "__main__":
video_capture = cv2.VideoCapture(cam)
frame = video_capture.read()[1]
gfthread = threading.Thread(target=getFrame, args='')
gfthread.daemon = True
gfthread.start()
rtthread = threading.Thread(target=realtime, args='')
rtthread.daemon = True
rtthread.start()
fathread = threading.Thread(target=face_analyse, args='')
fathread.daemon = True
fathread.start()
while True: #keep main thread running while the other two threads are non-daemon
pass

Categories

Resources