I have a flask web application which reads one image and display it in my web browser
app.py
from flask import Response
from flask import Flask
from flask import render_template
import cv2
app = Flask(__name__)
#app.route("/")
def index():
return render_template("index.html")
def GetImage():
global img
while True:
yield(b'--frame\r\n' b'Content-Type: image/jpeg\r\n\r\n' + bytearray(img) + b'\r\n')
#app.route("/stream")
def stream():
return Response(GetImage(), mimetype = "multipart/x-mixed-replace; boundary=frame")
if(__name__ == "__main__"):
img = cv2.imread("Cat.jpg", 0)
app.run(debug = True, threaded = True, use_reloader = False)
index.html
<html>
<body>
<img src="{{ url_for('stream') }}">
</body>
</html>
This example doesn´t work, because the image isn´t displayed in the browser.
But when I change GetImage in the following way the image will be displayed in the browser:
def GetImage():
global img
(flag, encodedImage) = cv2.imencode(".jpg", img)
while True:
yield(b'--frame\r\n' b'Content-Type: image/jpeg\r\n\r\n' + bytearray(encodedImage) + b'\r\n')
So why do I need this imencode? The image is stored as jpg on my harddisk so why do I have to encode it again as JPG?
If you have JPEG file then you can use standard open() and read() to read it as raw bytes data without decompressing it to array with all pixels - so later you don't have to compress it back to JPEG data using imencode()
img = open("Cat.jpg", "rb").read()
and then you can display it
b'Content-Type: image/jpeg\r\n\r\n'+ img + b'\r\n'
I read it directly in bytes mode - open(..., 'rb') - so I don't have to use bytearray() to convert string to bytes. Besides, reading in text mode it could convert some chars (like "new line") and create incorrect data.
But to send single file you can use send_file()
#app.route("/image")
def image():
return send_file('Cat.jpg')
Working example.
Opening http://127.0.0.1:5000/stream in Chrome it shows image.
My Firefox had problem to display image - it was reading data all time - till I added time.sleep()
I also added version which adds header Content-Length:
from flask import Flask, Response, send_file
import time
app = Flask(__name__)
#app.route("/")
def index():
return "Hello World"
def get_image():
while True:
yield(b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n'+ img + b'\r\n')
time.sleep(0.01) # my Firefox needs some time to display image / Chrome displays image without it
def get_image_with_size():
length = str(len(img)).encode() # convert to bytes
while True:
yield(b'--frame\r\n'
b'Content-Type: image/jpeg\r\n'
b'Content-Length: ' + length + b'\r\n'
b'\r\n'+ img + b'\r\n')
time.sleep(0.01) # my Firefox needs some time to display image / Chrome displays image without it
#app.route("/stream")
def stream():
return Response(get_image(), mimetype="multipart/x-mixed-replace; boundary=frame")
#app.route("/image")
def image():
return send_file('Cat.jpg')
if(__name__ == "__main__"):
img = open('Cat.jpg', 'rb').read()
app.run()
Related
from flask import Flask, Response, request, send_file
from moviepy.editor import VideoFileClip
import socket
import cv2 as cv2
app = Flask(__name__)
video_path = "videos/video.avi"
#app.route('/video_feed/')
def video_feed():
start_frame = int(request.args.get("start_frame"))
end_frame = int(request.args.get("end_frame"))
return Response(gen(start_frame, end_frame), mimetype='multipart/x-mixed-replace; boundary=frame')
def gen(start_frame, end_frame):
cap = cv2.VideoCapture(video_path)
cap.set(1, start_frame)
while True:
success, img = cap.read()
current_frame = cap.get(cv2.CAP_PROP_POS_FRAMES)
if current_frame > end_frame:
break
imgencode = cv2.imencode('.jpg', img)[1]
stringData = imgencode.tobytes()
# can also use tostring()
yield (b'--frame\r\n'
b'Content-Type: text/plain\r\n\r\n' + stringData + b'\r\n')
if __name__ == '__main__':
app.run(host='0.0.0.0', port=8000, debug=True)
So, this is the Flask server I am running. I am able to view the correct frames being yielded by the flask server at this URL: (It doesn't matter that the video playback is too fast)
http://127.0.0.1:8000/video_feed/?start_frame=0&end_frame=5000
But I can't seem to figure out how to use this URL into a development environment like PyCharm to be able to read the frames from this URL into a python script, for example.
url="http://127.0.0.1:8000/video_feed/?start_frame=4000&end_frame=5001"
while True:
resp = urllib.request.urlopen(url)
response = resp.read()
data = resp.split(b'\r\n\r\n', 1)[1]
i = np.frombuffer(data, dtype=np.uint8)
img = cv2.imdecode(i, cv2.IMREAD_UNCHANGED)
cv2.imshow("frame", img)
if cv2.waitKey(16) & 0xFF==ord('q'):
break
cv2.destroyAllWindows()
So this is what I have tried for reading the frames into PyCharm but it only reads the first frame. I want it to be able to ingest all of the frames from the URL. I know that there is something I am not understanding when it comes to URL's or generator functions, so any refactoring or help is greatly appreciated!
On the Flask server side the adjustment is made to the generator function:
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + stringData + b'\r\n')
And then read into an IDE like pycharm as such:
import cv2
vcap = cv2.VideoCapture('http://127.0.0.1:8000/video_feed/?
start_frame=4000&end_frame=5001')
while True:
ret, frame = vcap.read()
if frame is not None:
cv2.imshow('frame', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
print("Frame is None")
break
vcap.release()
cv2.destroyAllWindows()
print("Video stop")
I am using following code to display camera feed in Flask web app.
App.py
from flask import Flask, render_template, Response
import cv2
app = Flask(__name__)
camera = cv2.VideoCapture(0)
def gen_frames():
while True:
success, frame = camera.read() # read the camera frame
if not success:
break
else:
ret, buffer = cv2.imencode('.jpg', frame)
frame = buffer.tobytes()
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n') # concat frame one by one and show result
#app.route('/')
def index():
return render_template('index.html')
#app.route('/video_feed')
def video_feed():
return Response(gen_frames(), mimetype='multipart/x-mixed-replace; boundary=frame')
if __name__ == "__main__":
app.run(debug=True)
templates/index.html
<body>
<div>
<img src="{{ url_for('video_feed') }}" width="50%">
</div>
</body>
I want to print frame[0][0][0] value dynamically using commas at the bottom of the video like below.
Video Feed
51, 37, 222, 67, ...
Could you please help me with that?
Thanks in advance
The simplest method is to run JavaScript code with loop which periodically gest this value from server and put value in HTML.
But frame has to be in global variable to access it in another function in flask. And after converting frame to jpg you have to use different variable for this value.
But this method sometimes may have problem with synchromization. It may get value from new frame but browser may still display old frame.
Minimal working code
It uses setInterval(function, 40) to run function every 40ms (which gives 25 executions per second - like 25 frames per second). And this function uses fetch() to get value (JSON) from url /get_value and display it in <div id="value">
EDIT: I use frame[0,0,0].tolist() instead of frame[0,0].tolist() to get only BLUE (cv2 keeps pixel as B,G,R instead of R,G,B). I also add all values to innerText instead of repalcing previous value.
from flask import Flask, render_template_string, jsonify, Response
import cv2
import time
app = Flask(__name__)
camera = cv2.VideoCapture(0)
#success, frame = camera.read() # default value at start
success = False # default value at start
frame = None # default value at start
def gen_frames():
global success
global frame
while True:
success, frame = camera.read() # read the camera frame
if not success:
break
else:
ret, buffer = cv2.imencode('.jpg', frame)
image = buffer.tobytes() # use other variable instead of `frame`
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n'
b'\r\n' + image + b'\r\n') # concat frame one by one and show result
time.sleep(0.04) # my `Firefox` needs this to have time to display image.
# And this gives stream with 25 FPS (Frames Per Second) (1s/0.04s = 25)
#app.route('/get_value')
def get_value():
#if frame is not None:
if success:
value = frame[0,0,2].tolist()
else:
value = ['?']
#print(value)
return jsonify(value)
#app.route('/')
def index():
return render_template_string('''
<body>
<div>
<img src="{{ url_for('video_feed') }}" width="50%">
</div>
<div id="value"></div>
<script>
place = document.querySelector("#value");
setInterval(function(){
console.log("run function");
fetch("/get_value")
.then(response => response.json())
.then(data => {
if(place.innerText.length > 0){
place.innerText += ",";
}
place.innerText += data;
})
}, 40);
</script
</body>
''')
#app.route('/video_feed')
def video_feed():
return Response(gen_frames(), mimetype='multipart/x-mixed-replace; boundary=frame')
if __name__ == "__main__":
try:
app.run(debug=True) #, use_reloader=False)
except KeyboardInterrupt:
print("Stopped by `Ctrl+C`")
finally:
camera.release()
I want to get the signal from a webcam, or csi cam on my jetson nano, display it on a web browser and push some buttons to make a snapshot and depending of the button place the picture in different folders.
I made following code, which it seems to work, but after few button actions, the browser starts to load indefinitely. After a while, the web browser does not load indefinitely. It loads until I press one of the button. And in this case, I'm not more able to see the live signal from the camera, I just see the snapshot taken when I pressed the button.
from flask import Flask, render_template, Response, request
import cv2
import datetime, time
import os, sys
import numpy as np
from threading import Thread
global rec_frame, Polluted, Clear, Treated, OutOfService
Polluted=0
Clear=0
Treated=0
OutOfService=0
#instatiate flask app
app = Flask(__name__, template_folder='./templates')
#select webcam or CSI
#camera = cv2.VideoCapture("nvarguscamerasrc ! video/x-raw(memory:NVMM), width=(int)1280, height=(int)960,format=(string)NV12, framerate=(fraction)20/1 ! nvvidconv flip-method=0 ! video/x-raw,format=(string)BGRx ! videoconvert ! video/x-raw,width=(int)1280, height=(int)960, format=(string)BGR ! appsink"
#, cv2.CAP_GSTREAMER)
camera = cv2.VideoCapture(1)
def snapshot(frame,folder):
cropped = frame[100:400,200:500]
resized = cv2.resize(cropped,(100,100))
cv2.imwrite("/home/ava/Documents/AVA/Get pictures/" + folder + "/frame-" + time.strftime("%d-%m-%Y-%H-%M-%S") + ".jpg",cv2.cvtColor(resized,cv2.COLOR_RGB2BGR))
def gen_frames(): # generate frame by frame from camera
global rec_frame,Polluted,Clear,Treated,OutOfService
while True:
success,frame = camera.read()
if success:
#get snapshot if button pressed
if(Polluted):
snapshot(frame,"Polluted Water")
Polluted = 0
if(Clear):
snapshot(frame,"Cleared Water")
Clear = 0
if(Treated):
snapshot(frame,"Treated Water")
Treated = 0
if(OutOfService):
snapshot(frame,"Out of Service")
OutOfService = 0
try:
ret, buffer = cv2.imencode('.jpg', cv2.flip(frame,1))
frame = buffer.tobytes()
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
except Exception as e:
print('pass')
pass
else:
pass
#app.route('/')
def index():
return render_template('index.html')
#app.route('/video_feed')
def video_feed():
return Response(gen_frames(), mimetype='multipart/x-mixed-replace; boundary=frame')
#app.route('/requests',methods=['POST','GET'])
def tasks():
global switch,camera
if request.method == 'POST':
if request.form.get('Polluted') == 'Polluted':
global Polluted,rec_frame
Polluted=1
print('in polluted')
elif request.form.get('Clear') == 'Clear':
global Clear
Clear = 1
elif request.form.get('Treated') == 'Treated':
global Treated
Treated = 1
elif request.form.get('OutOfService') == 'OutOfService':
global OutOfService
OutOfService = 1
elif request.method=='GET':
return render_template('index.html')
return render_template('index.html')
if __name__ == '__main__':
app.run()
camera.release()
cv2.destroyAllWindows()
how i can make qrcode scanner in django like this web so i can see the result in text not in image video
i already make the views.py like this
def camera_feed(request):
stream = CameraStream()
frames = stream.get_frames()
return StreamingHttpResponse(frames, content_type='multipart/x-mixed-replace; boundary=frame')
def detect(request):
stream = CameraStream()
success, frame = stream.camera.read()
if success:
status = True
else:
status = False
return render(request, 'detect_barcodes/detect.html', context={'cam_status': status})
my camera_stream.py
class CameraStream(str):
def __init__(self):
self.camera = cv2.VideoCapture(0)
def get_frames(self):
while True:
# Capture frame-by-frame
success, frame = self.camera.read()
if not success:
break
else:
ret, buffer = cv2.imencode('.jpg', frame)
color_image = np.asanyarray(frame)
if decode(color_image):
for barcode in decode(color_image):
barcode_data = (barcode.data).decode('utf-8')
else:
frame = buffer.tobytes()
#hasil2 = b'--frame\r\n'b'Content-Type: image/jpeg\r\n\r\n' + barcode_frame + b'\r\n\r\n'
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n')
this is my urls.py
path('camera_feed', views.camera_feed, name='camera_feed'),
path('detect_barcodes', views.detect, name='detect_barcodes'),
and i use the html like this
<img src="{% url 'qrcode' request.path %}" width="120px" height="120px;">
how i can pass the result in html?
U should use js instascan rather using open cv2 no need to use backend because sever load increases and you easily pas decode value
My project uses socket.io to send/receive data.
I added aiohttp to help display the results on the browser.
import asyncio
from aiohttp import web
sio = socketio.AsyncServer(async_mode='`aiohttp`')
app = web.Application()
sio.attach(app)
I followed
https://us-pycon-2019-tutorial.readthedocs.io/aiohttp_file_uploading.html
to upload an image but I cannot upload a video.
def gen1():
# while True:
# if len(pm.list_image_display) > 1 :
image = cv2.imread("/home/duong/Pictures/Chess_Board.svg")
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# img = PIL.Image.new("RGB", (64, 64), color=(255,255,0))
image_pil = PIL.Image.fromarray(image)
fp = io.BytesIO()
image_pil.save(fp, format="JPEG")
content = fp.getvalue()
return content
async def send1():
print("11")
return web.Response(body=gen1(), content_type='image/jpeg')
How to display video via aiohttp on browsers?
To stream a video in aiohttp you may open a StreamResponse in response to the fetching of a img HTML node:
#routes.get('/video')
async def video_feed(request):
response = web.StreamResponse()
response.content_type = 'multipart/x-mixed-replace; boundary=frame'
await response.prepare(request)
for frame in frames('/dev/video0'):
await response.write(frame)
return response
and send your frames in the form of bytes:
def frames(path):
camera = cv2.VideoCapture(path)
if not camera.isOpened():
raise RuntimeError('Cannot open camera')
while True:
_, img = camera.read()
img = cv2.resize(img, (480, 320))
frame = cv2.imencode('.jpg', img)[1].tobytes()
yield b'--frame\r\nContent-Type: image/jpeg\r\n\r\n'+frame+b'\r\n'
This may be however network demanding as the bitrate required to send each frame individually is high. For real-time streaming with further compression you may want to use WebRTC implementations like aiortc.