I'm using opencv to capture a video from my webcam. Every 5 seconds, I'm processing a single frame / an image which can take some seconds. So far everything works. But whenever a frame is processed the entire video is freezing for a couple of seconds (Until the process is finished). I'm trying to get rid of it by using Threading. Here is what I did so far:
Inside the while loop which is capturing the video:
while True:
ret, image = cap.read()
if next_time <= datetime.now():
content_type = 'image/jpeg'
headers = {'content-type': content_type}
_, img_encoded = cv2.imencode('.jpg', image)
loop = asyncio.get_event_loop()
future = asyncio.ensure_future(self.async_faces(img_encoded, headers))
loop.run_until_complete(future)
next_time += period
...
cv2.imshow('img', image)
Here are the methods:
async def async_faces(self, img, headers):
with ThreadPoolExecutor(max_workers=10) as executor:
loop = asyncio.get_event_loop()
tasks = [
loop.run_in_executor(
executor,
self.face_detection,
*(img, headers) # Allows us to pass in multiple arguments to `fetch`
)
]
for response in await asyncio.gather(*tasks):
pass
def face_detection(self, img, headers):
try:
response = requests.post(self.url, data=img.tostring(), headers=headers)
...
except Exception as e:
...
...
But unfortunately it's not working.
EDIT 1
In the following I add what the whole thing is supposed to do.
Originally, the function looked like:
import requests
import cv2
from datetime import datetime, timedelta
def face_recognition(self):
# Start camera
cap = cv2.VideoCapture(0)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1920)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 1080)
emotional_states = []
font = cv2.FONT_HERSHEY_SIMPLEX
period = timedelta(seconds=self.time_period)
next_time = datetime.now() + period
cv2.namedWindow('img', cv2.WND_PROP_FULLSCREEN)
cv2.setWindowProperty('img', cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
while True:
ret, image = cap.read()
if next_time <= datetime.now():
# Prepare headers for http request
content_type = 'image/jpeg'
headers = {'content-type': content_type}
_, img_encoded = cv2.imencode('.jpg', image)
try:
# Send http request with image and receive response
response = requests.post(self.url, data=img_encoded.tostring(), headers=headers)
emotional_states = response.json().get("emotions")
face_locations = response.json().get("locations")
except Exception as e:
emotional_states = []
face_locations = []
print(e)
next_time += period
for i in range(0, len(emotional_states)):
emotion = emotional_states[i]
face_location = face_locations[i]
cv2.putText(image, emotion, (int(face_location[0]), int(face_location[1])),
font, 0.8, (0, 255, 0), 2, cv2.LINE_AA)
cv2.imshow('img', image)
k = cv2.waitKey(1) & 0xff
if k == 27:
cv2.destroyAllWindows()
cap.release()
break
if k == ord('a'):
cv2.resizeWindow('img', 700,700)
I use the above method to film myself. This film will be shown live on my screen. Further, every 5 seconds one frame is send to an API where the image is processed in such a way that the emotion of the person in the image is returned. This emotion is displayed on my screen, next to myself. The problem is, that the live video is freezing for a couple of seconds until the emotion is returned from the API.
My OS is Ubuntu.
EDIT 2
The API is running locally. I created a Flask App and the following method is receiving the request:
from flask import Flask, request, Response
import numpy as np
import cv2
import json
#app.route('/api', methods=['POST'])
def facial_emotion_recognition():
# Convert string of image data to uint8
nparr = np.fromstring(request.data, np.uint8)
# Decode image
img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
# Analyse the image
emotional_state, face_locations = emotionDetection.analyze_facial_emotions(img)
json_dump = json.dumps({'emotions': emotional_state, 'locations': face_locations}, cls=NumpyEncoder)
return Response(json_dump, mimetype='application/json')
Related
from flask import Flask, Response, request, send_file
from moviepy.editor import VideoFileClip
import socket
import cv2 as cv2
app = Flask(__name__)
video_path = "videos/video.avi"
#app.route('/video_feed/')
def video_feed():
start_frame = int(request.args.get("start_frame"))
end_frame = int(request.args.get("end_frame"))
return Response(gen(start_frame, end_frame), mimetype='multipart/x-mixed-replace; boundary=frame')
def gen(start_frame, end_frame):
cap = cv2.VideoCapture(video_path)
cap.set(1, start_frame)
while True:
success, img = cap.read()
current_frame = cap.get(cv2.CAP_PROP_POS_FRAMES)
if current_frame > end_frame:
break
imgencode = cv2.imencode('.jpg', img)[1]
stringData = imgencode.tobytes()
# can also use tostring()
yield (b'--frame\r\n'
b'Content-Type: text/plain\r\n\r\n' + stringData + b'\r\n')
if __name__ == '__main__':
app.run(host='0.0.0.0', port=8000, debug=True)
So, this is the Flask server I am running. I am able to view the correct frames being yielded by the flask server at this URL: (It doesn't matter that the video playback is too fast)
http://127.0.0.1:8000/video_feed/?start_frame=0&end_frame=5000
But I can't seem to figure out how to use this URL into a development environment like PyCharm to be able to read the frames from this URL into a python script, for example.
url="http://127.0.0.1:8000/video_feed/?start_frame=4000&end_frame=5001"
while True:
resp = urllib.request.urlopen(url)
response = resp.read()
data = resp.split(b'\r\n\r\n', 1)[1]
i = np.frombuffer(data, dtype=np.uint8)
img = cv2.imdecode(i, cv2.IMREAD_UNCHANGED)
cv2.imshow("frame", img)
if cv2.waitKey(16) & 0xFF==ord('q'):
break
cv2.destroyAllWindows()
So this is what I have tried for reading the frames into PyCharm but it only reads the first frame. I want it to be able to ingest all of the frames from the URL. I know that there is something I am not understanding when it comes to URL's or generator functions, so any refactoring or help is greatly appreciated!
On the Flask server side the adjustment is made to the generator function:
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + stringData + b'\r\n')
And then read into an IDE like pycharm as such:
import cv2
vcap = cv2.VideoCapture('http://127.0.0.1:8000/video_feed/?
start_frame=4000&end_frame=5001')
while True:
ret, frame = vcap.read()
if frame is not None:
cv2.imshow('frame', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
print("Frame is None")
break
vcap.release()
cv2.destroyAllWindows()
print("Video stop")
I am new to async in python and openCV.
I am working on sending an image from a webcam to a server and getting the value.
However, the following code does not work asynchronously.
It only sends one frame, receives one result, and then renders the next frame.
I'm hoping the webcam rendering will continue.
And I want to send the next frame when I get the result value from the server.
What am I missing?
import cv2
import os
import time
import threading
import numpy as np
import argparse
import requests
import base64
import asyncio
import aiohttp
async def faceanlyzer(image):
async with aiohttp.ClientSession() as session:
string = base64.b64encode(cv2.imencode('.jpg', image)[1]).decode()
data = {
"image": string
}
url = "http://localhost:5000/streamdata"
async with session.post(url, data = data) as resp:
result = await resp.json()
print(result)
async def main():
cv2.ocl.setUseOpenCL(False)
cap = cv2.VideoCapture(0)
while True:
ret, frame = cap.read()
if not ret:
break
image = cv2.resize(frame, dsize=(640, 480), interpolation=cv2.INTER_AREA)
await faceanlyzer(image)
cv2.imshow('Video', cv2.resize(image,(1300,1000),interpolation = cv2.INTER_CUBIC))
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
asyncio.run(main())
I am trying to send frames of a video to a remote server using requests, the code I am using for the same is
def send_request(frame_path = "frame_on_disk_1.jpeg"):
files = {'upload': with open(frame_path,"rb")}
r = requests.post(URL, files=files)
return r
So I am writing the frames to disk and then reading them as bytes when sending over to the server, which is not the best way to do it.
However, I am not sure how I can actually convert the array in the following code represented by variable frame in the code below, directly into a read byte object without touching the disk.
import cv2
cap = cv2.VideoCapture("video.MOV")
count = 0
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
cv2.imwrite(f"all_frames/frame_num_{count}.png",frame)
You can use io.BytesIO and cv2.imencode to encode an image into a memory buffer.
I've also used a queue so the frames are enqueued and then HTTP requests are done in a separate threads.
import traceback
import cv2
from io import BytesIO
from queue import Queue
from threading import Thread
from requests import Session
URL = "http://example.com"
THREADS = 5
SAMPLE = "sample.mov"
class UploaderThread(Thread):
def __init__(self, q, s):
super().__init__()
self.q = q
self.s = s
def run(self):
for count, file in iter(self.q.get, "STOP"):
try:
r = self.s.post(URL, files={"upload": file})
except Exception:
traceback.print_exc()
else:
print(f"Frame ({count}): {r}")
def main():
cap = cv2.VideoCapture(SAMPLE)
q = Queue()
s = Session()
count = 0
threads = []
for _ in range(THREADS):
t = UploaderThread(q, s)
t.start()
threads.append(t)
while True:
ret, frame = cap.read()
count += 1
if not ret:
break
_, img = cv2.imencode(".png", frame)
q.put_nowait((count, BytesIO(img)))
for _ in range(THREADS):
q.put("STOP")
if __name__ == "__main__":
main()
My project uses socket.io to send/receive data.
I added aiohttp to help display the results on the browser.
import asyncio
from aiohttp import web
sio = socketio.AsyncServer(async_mode='`aiohttp`')
app = web.Application()
sio.attach(app)
I followed
https://us-pycon-2019-tutorial.readthedocs.io/aiohttp_file_uploading.html
to upload an image but I cannot upload a video.
def gen1():
# while True:
# if len(pm.list_image_display) > 1 :
image = cv2.imread("/home/duong/Pictures/Chess_Board.svg")
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# img = PIL.Image.new("RGB", (64, 64), color=(255,255,0))
image_pil = PIL.Image.fromarray(image)
fp = io.BytesIO()
image_pil.save(fp, format="JPEG")
content = fp.getvalue()
return content
async def send1():
print("11")
return web.Response(body=gen1(), content_type='image/jpeg')
How to display video via aiohttp on browsers?
To stream a video in aiohttp you may open a StreamResponse in response to the fetching of a img HTML node:
#routes.get('/video')
async def video_feed(request):
response = web.StreamResponse()
response.content_type = 'multipart/x-mixed-replace; boundary=frame'
await response.prepare(request)
for frame in frames('/dev/video0'):
await response.write(frame)
return response
and send your frames in the form of bytes:
def frames(path):
camera = cv2.VideoCapture(path)
if not camera.isOpened():
raise RuntimeError('Cannot open camera')
while True:
_, img = camera.read()
img = cv2.resize(img, (480, 320))
frame = cv2.imencode('.jpg', img)[1].tobytes()
yield b'--frame\r\nContent-Type: image/jpeg\r\n\r\n'+frame+b'\r\n'
This may be however network demanding as the bitrate required to send each frame individually is high. For real-time streaming with further compression you may want to use WebRTC implementations like aiortc.
import numpy as np
import cv2
from hikvisionapi import Client
cap = cv2.VideoCapture()
#cap.open("rtsp://admin:DocoutBolivia#192.168.1.64:554/h264/ch0/sub")
cap.open("rtsp://admin:DocoutBolivia#192.168.1.64:554/Streaming/Channels/102/")
#cam = Client('http://192.168.1.64', 'admin', 'DocoutBolivia')
#rtsp://admin:password#192.168.1.64/h264/ch1/sub/
#response = cam.System.deviceInfo(method='get')
ret, frame = cap.read()
cv2.imwrite("holo.jpg", frame)
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
# Our operations on the frame come here
# Display the resulting frame
cv2.imshow('frame',frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
I have this code and it's connecting and showing well but it really slow there's another way for doing this? and have a bit less of delay? I want to make face recognition with my HikVision IP camera
Trying to load the steam directly with Python will not get you anywhere.
The only way to get extremely low lantency is to make use of the .dll or .so files from the SDK provided by HikVision, and use ctypes to call the internal functions.
Below is a simple example I made before to access the NET_DVR_PTZControl_Other. It is a lot of work if you want to develop your own application with their SDK. I'd suggest you to request a sample python application from your vendor.
For example,
import os, ctypes
import cv2
def add_dll(path, dll_list):
files = os.listdir(path)
for file in files:
if not os.path.isdir(path + file):
if file.endswith(".dll"):
dll_list.append(path + file)
else:
add_dll(path + file + "/", dll_list)
def callCpp(func_name, *args):
for so_lib in so_list:
try:
lib = ctypes.cdll.LoadLibrary(so_lib)
try:
value = eval("lib.%s" % func_name)(*args)
print("Success:" + str(value))
return value
except:
continue
except:
print("Fail:" + so_lib)
continue
return False
def NET_DVR_PTZControl_Other(lUserID, lChannel, dwPTZCommand, dwStop):
res = callCpp("NET_DVR_PTZControl_Other", lUserID, lChannel, dwPTZCommand, dwStop)
if res:
print("Control Success")
else:
print("Control Fail: " + str(callCpp("NET_DVR_GetLastError")))
Get Steam Example
class NET_DVR_JPEGPARA(ctypes.Structure):
_fields_ = [
("wPicSize", ctypes.c_ushort), # WORD
("wPicQuality", ctypes.c_ushort)] # WORD
def NET_DVR_CaptureJPEGPicture():
sJpegPicFileName = bytes("pytest.jpg", "ascii")
lpJpegPara = NET_DVR_JPEGPARA()
lpJpegPara.wPicSize = 2
lpJpegPara.wPicQuality = 1
res = callCpp("NET_DVR_CaptureJPEGPicture", lUserID, lChannel, ctypes.byref(lpJpegPara), sJpegPicFileName)
if res == False:
error_info = callCpp("NET_DVR_GetLastError")
print("Success:" + str(error_info))
else:
print("Grab stream fail")