The task is to recognize faces in a video stream and to draw bounding boxes on the video frames and to show the person's name. Need to stream video frames and metadata (names) from the API. The API makes a call to a GPU intensive machine learning subroutine which can be made to return a frame and name pair in a Python tuple. To reduce, computation we tried to make a single function call per frame processed. tuple contains a bytes type frame and a string type name.
How to show stream video frames and metadata (names) from the API?
def get_frame():
recog = VideoFaceRecog(target="/video/m.mp4")
while True:
(ret, frame) = recog.cap.read()
if not ret:
print('end of the video file...')
break
cv2.resize(frame, (640, 480))
frame, names, bounding_boxes = recog.frame_recog(frame)
camera_frame = cv2.imencode('.jpg', frame)[1].tobytes()
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + camera_frame + b'\r\n')
#app.route('/camera_feed', methods=['GET'])
def video_feed():
return Response(stream_with_context((get_frame())),
mimetype='multipart/x-mixed-replace; boundary=frame')
recog = VideoFaceRecog()
def get_frame(recog):
cap = cv2.VideoCapture(0)
while True:
(ret, frame) = cap.read()
if not ret:
print('end of the video file...')
break
frame, names, bounding_boxes = recog.frame_recog(frame)
recog.add_name(names)
camera_frame = cv2.imencode('.jpg', frame)[1].tobytes()
yield ((b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + camera_frame + b'\r\n'))
Call meta-data in a different endpoint.
Related
from flask import Flask, Response, request, send_file
from moviepy.editor import VideoFileClip
import socket
import cv2 as cv2
app = Flask(__name__)
video_path = "videos/video.avi"
#app.route('/video_feed/')
def video_feed():
start_frame = int(request.args.get("start_frame"))
end_frame = int(request.args.get("end_frame"))
return Response(gen(start_frame, end_frame), mimetype='multipart/x-mixed-replace; boundary=frame')
def gen(start_frame, end_frame):
cap = cv2.VideoCapture(video_path)
cap.set(1, start_frame)
while True:
success, img = cap.read()
current_frame = cap.get(cv2.CAP_PROP_POS_FRAMES)
if current_frame > end_frame:
break
imgencode = cv2.imencode('.jpg', img)[1]
stringData = imgencode.tobytes()
# can also use tostring()
yield (b'--frame\r\n'
b'Content-Type: text/plain\r\n\r\n' + stringData + b'\r\n')
if __name__ == '__main__':
app.run(host='0.0.0.0', port=8000, debug=True)
So, this is the Flask server I am running. I am able to view the correct frames being yielded by the flask server at this URL: (It doesn't matter that the video playback is too fast)
http://127.0.0.1:8000/video_feed/?start_frame=0&end_frame=5000
But I can't seem to figure out how to use this URL into a development environment like PyCharm to be able to read the frames from this URL into a python script, for example.
url="http://127.0.0.1:8000/video_feed/?start_frame=4000&end_frame=5001"
while True:
resp = urllib.request.urlopen(url)
response = resp.read()
data = resp.split(b'\r\n\r\n', 1)[1]
i = np.frombuffer(data, dtype=np.uint8)
img = cv2.imdecode(i, cv2.IMREAD_UNCHANGED)
cv2.imshow("frame", img)
if cv2.waitKey(16) & 0xFF==ord('q'):
break
cv2.destroyAllWindows()
So this is what I have tried for reading the frames into PyCharm but it only reads the first frame. I want it to be able to ingest all of the frames from the URL. I know that there is something I am not understanding when it comes to URL's or generator functions, so any refactoring or help is greatly appreciated!
On the Flask server side the adjustment is made to the generator function:
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + stringData + b'\r\n')
And then read into an IDE like pycharm as such:
import cv2
vcap = cv2.VideoCapture('http://127.0.0.1:8000/video_feed/?
start_frame=4000&end_frame=5001')
while True:
ret, frame = vcap.read()
if frame is not None:
cv2.imshow('frame', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
print("Frame is None")
break
vcap.release()
cv2.destroyAllWindows()
print("Video stop")
how i can make qrcode scanner in django like this web so i can see the result in text not in image video
i already make the views.py like this
def camera_feed(request):
stream = CameraStream()
frames = stream.get_frames()
return StreamingHttpResponse(frames, content_type='multipart/x-mixed-replace; boundary=frame')
def detect(request):
stream = CameraStream()
success, frame = stream.camera.read()
if success:
status = True
else:
status = False
return render(request, 'detect_barcodes/detect.html', context={'cam_status': status})
my camera_stream.py
class CameraStream(str):
def __init__(self):
self.camera = cv2.VideoCapture(0)
def get_frames(self):
while True:
# Capture frame-by-frame
success, frame = self.camera.read()
if not success:
break
else:
ret, buffer = cv2.imencode('.jpg', frame)
color_image = np.asanyarray(frame)
if decode(color_image):
for barcode in decode(color_image):
barcode_data = (barcode.data).decode('utf-8')
else:
frame = buffer.tobytes()
#hasil2 = b'--frame\r\n'b'Content-Type: image/jpeg\r\n\r\n' + barcode_frame + b'\r\n\r\n'
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n')
this is my urls.py
path('camera_feed', views.camera_feed, name='camera_feed'),
path('detect_barcodes', views.detect, name='detect_barcodes'),
and i use the html like this
<img src="{% url 'qrcode' request.path %}" width="120px" height="120px;">
how i can pass the result in html?
U should use js instascan rather using open cv2 no need to use backend because sever load increases and you easily pas decode value
I am trying to stream live video from Raspberry Pi zero using Arducam B0112.
I have written a function to stream video but the frame rate is low, Can you please suggest some alternative function to stream video and display it on localhost which will have better frame rate.
The video streaming function I have is as follows:
def gen():
"""Video streaming generator function."""
output = np.empty((240, 320, 3), dtype=np.uint8)
while True:
camera.capture(output, 'rgb')
# Construct a numpy array from the stream
ret, buffer = cv2.imencode('.jpg', output, [int(cv2.IMWRITE_JPEG_QUALITY), 85])
frame = buffer.tobytes()
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
I have a problem with my IP camera trying to streaming in Flask Web using opencv. In my html page shows the camera streaming window but it capture a frame every 2-3 minutes...
i got this error: [h264 # 0x130e6e0] error while decoding MB 14 2, bytestream -15
import cv2
camera = cv2.VideoCapture('rtsp://admin:12345#192.168.1.105:554/user=admin_password=12345_channe0_stream=0.sdp')
def gen_frames(): # generate frame by frame from camera
while True:
# Capture frame-by-frame
success, frame = camera.read() # read the camera frame
if not success:
break
else:
ret, buffer = cv2.imencode('.jpg', frame)
frame = buffer.tobytes()
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n') # concat frame one by one and show result
#app.route('/video_feed')
def video_feed():
"""Video streaming route. Put this in the src attribute of an img tag."""
return Response(gen_frames(),
mimetype='multipart/x-mixed-replace; boundary=frame')
I have an class that gets frames from a camera using a get_frame method. In a web context, I need to add some data around each frame before streaming it to the browser. When I try to add the extra information (some bytes) to the frame, I get TypeError: can't concat bytes to generator. How do I concatenate this data?
def gen():
camera = VideoCamera()
while True:
frame = camera.get_frame()
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n')
class VideoCamera():
def __init__(self):
self.video = cv2.VideoCapture(0)
def get_frame(self):
while(True):
ret, frame = self.video.read()
#that face is the list, which has all the detected faces in the frame, using dlib library
face = detector(gray, 0)
for (J, rect) in enumerate(face):
ret, jpeg = cv2.imencode('.jpg', frame)
yield jpeg.tobytes()
As written, calling get_frame returns a generator, not an individual frame. You need to iterate over that generator to get individual frames, which you can then yield along with the other data.
def gen():
camera = VideoCamera()
for frame in camera.get_frame():
yield b'--frame\r\nContent-Type: image/jpeg\r\n\r\n'
yield frame
yield b'\r\n\r\n'