I am new to async in python and openCV.
I am working on sending an image from a webcam to a server and getting the value.
However, the following code does not work asynchronously.
It only sends one frame, receives one result, and then renders the next frame.
I'm hoping the webcam rendering will continue.
And I want to send the next frame when I get the result value from the server.
What am I missing?
import cv2
import os
import time
import threading
import numpy as np
import argparse
import requests
import base64
import asyncio
import aiohttp
async def faceanlyzer(image):
async with aiohttp.ClientSession() as session:
string = base64.b64encode(cv2.imencode('.jpg', image)[1]).decode()
data = {
"image": string
}
url = "http://localhost:5000/streamdata"
async with session.post(url, data = data) as resp:
result = await resp.json()
print(result)
async def main():
cv2.ocl.setUseOpenCL(False)
cap = cv2.VideoCapture(0)
while True:
ret, frame = cap.read()
if not ret:
break
image = cv2.resize(frame, dsize=(640, 480), interpolation=cv2.INTER_AREA)
await faceanlyzer(image)
cv2.imshow('Video', cv2.resize(image,(1300,1000),interpolation = cv2.INTER_CUBIC))
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
asyncio.run(main())
Related
from flask import Flask, Response, request, send_file
from moviepy.editor import VideoFileClip
import socket
import cv2 as cv2
app = Flask(__name__)
video_path = "videos/video.avi"
#app.route('/video_feed/')
def video_feed():
start_frame = int(request.args.get("start_frame"))
end_frame = int(request.args.get("end_frame"))
return Response(gen(start_frame, end_frame), mimetype='multipart/x-mixed-replace; boundary=frame')
def gen(start_frame, end_frame):
cap = cv2.VideoCapture(video_path)
cap.set(1, start_frame)
while True:
success, img = cap.read()
current_frame = cap.get(cv2.CAP_PROP_POS_FRAMES)
if current_frame > end_frame:
break
imgencode = cv2.imencode('.jpg', img)[1]
stringData = imgencode.tobytes()
# can also use tostring()
yield (b'--frame\r\n'
b'Content-Type: text/plain\r\n\r\n' + stringData + b'\r\n')
if __name__ == '__main__':
app.run(host='0.0.0.0', port=8000, debug=True)
So, this is the Flask server I am running. I am able to view the correct frames being yielded by the flask server at this URL: (It doesn't matter that the video playback is too fast)
http://127.0.0.1:8000/video_feed/?start_frame=0&end_frame=5000
But I can't seem to figure out how to use this URL into a development environment like PyCharm to be able to read the frames from this URL into a python script, for example.
url="http://127.0.0.1:8000/video_feed/?start_frame=4000&end_frame=5001"
while True:
resp = urllib.request.urlopen(url)
response = resp.read()
data = resp.split(b'\r\n\r\n', 1)[1]
i = np.frombuffer(data, dtype=np.uint8)
img = cv2.imdecode(i, cv2.IMREAD_UNCHANGED)
cv2.imshow("frame", img)
if cv2.waitKey(16) & 0xFF==ord('q'):
break
cv2.destroyAllWindows()
So this is what I have tried for reading the frames into PyCharm but it only reads the first frame. I want it to be able to ingest all of the frames from the URL. I know that there is something I am not understanding when it comes to URL's or generator functions, so any refactoring or help is greatly appreciated!
On the Flask server side the adjustment is made to the generator function:
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + stringData + b'\r\n')
And then read into an IDE like pycharm as such:
import cv2
vcap = cv2.VideoCapture('http://127.0.0.1:8000/video_feed/?
start_frame=4000&end_frame=5001')
while True:
ret, frame = vcap.read()
if frame is not None:
cv2.imshow('frame', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
print("Frame is None")
break
vcap.release()
cv2.destroyAllWindows()
print("Video stop")
I am reading an rtsp(local rtsp link) stream from my cctv camera connected on LAN.
My Main Goal :-
I want to perform some processing on the frames and want to display via m3u8 in real time or nearly real time so that i can display in the frontend using hls.js.
Currently i am trying to create video in realtime so that using ffmpeg i can create the m3u8 .
Sharing my code below.
import cv2
from moviepy.editor import *
import numpy as np
import time
url = "rtsp://username:password#192.168.1.100:10554/Streaming/channels/401"
cap = cv2.VideoCapture(url)
def make_video_file(clips):
try:
print(f"clips = {clips}")
video_clip = concatenate_videoclips(clips,method='compose')
video_clip.write_videofile("video-output.mp4",fps=30)
except Exception as e:
print(e)
FRAME_COUNTER = 0
NUMBER_OF_FRAMES = 30
CLIPS = [0 for i in range(NUMBER_OF_FRAMES)]
while True:
ret, frame = cap.read()
# print(frame)
if not ret:
continue
CLIPS.pop(0)
CLIPS.append(ImageClip(frame).set_duration(1))
if FRAME_COUNTER == NUMBER_OF_FRAMES:
try:
FRAME_COUNTER = 0
make_video_file(CLIPS)
except:
pass
cv2.imshow('frame', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
NUMBER_OF_FRAMES += 1
cap.release()
cv2.destroyAllWindows()
I am trying to interface my OpenCV program with my Raspberry Pi PiCamera. Every time I use OpenCV to capture video, it drastically drops the FPS. When I capture video using PiCamera's Library, everything is fine and smooth.
Why is this happening?
Is there a way to fix it?
This is my code:
import time
import RPi.GPIO as GPIO
from PCA9685 import PCA9685
import numpy as np
import cv2
try:
cap = cv2.VideoCapture(0)
cap.set(cv2.CAP_PROP_FPS, 90)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 800)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 700)
while(True):
ret, frame = cap.read()
cv2.imshow('frame',frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything is done, release the capture
except:
pwm.exit_PCA9685()
print ("\nProgram end")
exit()
cap.release()
cv2.destroyAllWindows()
This is the error I'm getting:
First of all, those are warnings not errors.
Reduce the video dimension. Specify the dimension.
cv2.VideoCapture has some problems as it buffers the frames, and the frames are queued so if you're doing some processing and the speed is less than the bandwidth of VideoCapture the video will be slowed down.
So, here is a bufferless VideoCapture.
video_capture_Q_buf.py
import cv2, queue as Queue, threading, time
is_frame = True
# bufferless VideoCapture
class VideoCaptureQ:
def __init__(self, name):
self.cap = cv2.VideoCapture(name)
self.q = Queue.Queue()
t = threading.Thread(target=self._reader)
t.daemon = True
t.start()
# read frames as soon as they are available, keeping only most recent one
def _reader(self):
while True:
ret, frame = self.cap.read()
if not ret:
global is_frame
is_frame = False
break
if not self.q.empty():
try:
self.q.get_nowait() # discard previous (unprocessed) frame
except Queue.Empty:
pass
self.q.put(frame)
def read(self):
return self.q.get()
Using it:
test.py
import video_capture_Q_buf as vid_cap_q # import as alias
from video_capture_Q_buf import VideoCaptureQ # class import
import time
cap = VideoCaptureQ(vid_path)
while True:
t1 = time.time()
if vid_cap_q.is_frame == False:
print('no more frames left')
break
try:
ori_frame = cap.read()
# do your stuff
except Exception as e:
print(e)
break
t2 = time.time()
print(f'FPS: {1/(t2-t1)}')
I used this tutorial to install and configure memcached service on server. Then I tried to interact with memcached through pymemcached package.
I used this code as process1.py:
from time import sleep
from pymemcache.client.base import Client
from pymemcache import serde
import cv2
# import matplotlib.pyplot as plt
client = Client(('127.0.0.1', 11211),
serializer=serde.python_memcache_serializer,
deserializer=serde.python_memcache_deserializer)
# result = client.get('some_key')
cap = cv2.VideoCapture('video/vdo.mp4')
while True:
ret, frame = cap.read()
client.set("image", frame)
print("cached")
if not ret:
break
# cv2.imshow('Frame', frame)
# Press Q on keyboard to exit
# if cv2.waitKey(25) & 0xFF == ord('q'):
# break
sleep(0.05)
cap.release()
# Closes all the frames
cv2.destroyAllWindows()
And this one as process2.py:
import subprocess
import numpy as np
import cv2
from flask import Flask, Response
# from process1 import client
from pymemcache.client.base import Client
from pymemcache import serde
app = Flask(__name__)
client = Client(('127.0.0.1', 11211),
serializer=serde.python_memcache_serializer,
deserializer=serde.python_memcache_deserializer)
def gen():
while True:
# Capture frame-by-frame
img = np.asarray(client.get('image'), np.uint8)
if img is not None:
# img = cv2.resize(img, (0, 0), fx=0.5, fy=0.5)
frame = cv2.imencode('.jpg', img)[1].tobytes()
yield b'--frame\r\n'b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n'
#app.route('/')
def video_feed():
"""Video streaming route. Put this in the src attribute of an img tag."""
return Response(gen(),
mimetype='multipart/x-mixed-replace; boundary=frame')
py_dir = "/home/masoud/anaconda3/envs/py37/bin/python"
process_dir = "/home/masoud/Desktop/PycharmProjects/share_memory_test"
if __name__ == '__main__':
cmd = [py_dir, process_dir]
subprocess.Popen(cmd)
app.run(host='127.0.0.1', port=8888, debug=True)
The weird thing is that I ran this code with no problem in my own laptop in ubuntu 18. but when I ran this on a server in my office (with ubuntu 16.04), I got this runtime error:
File "codes/MSD/process1.py", line 139, in run
client.set("stream_image_cached", a)
File "anaconda3/envs/msd_ped_det/lib/python3.7/site-packages/pymemcache/client/base.py",
line 341, in set flags=flags)[key]
File "anaconda3/envs/msd_ped_det/lib/python3.7/site-packages/pymemcache/client/base.py",
line 933, in _store_cmd self.sock.sendall(b''.join(cmds))
ConnectionResetError: [Errno 104] Connection reset by peer
I wonder where else I have to check to make it work!
I'm using opencv to capture a video from my webcam. Every 5 seconds, I'm processing a single frame / an image which can take some seconds. So far everything works. But whenever a frame is processed the entire video is freezing for a couple of seconds (Until the process is finished). I'm trying to get rid of it by using Threading. Here is what I did so far:
Inside the while loop which is capturing the video:
while True:
ret, image = cap.read()
if next_time <= datetime.now():
content_type = 'image/jpeg'
headers = {'content-type': content_type}
_, img_encoded = cv2.imencode('.jpg', image)
loop = asyncio.get_event_loop()
future = asyncio.ensure_future(self.async_faces(img_encoded, headers))
loop.run_until_complete(future)
next_time += period
...
cv2.imshow('img', image)
Here are the methods:
async def async_faces(self, img, headers):
with ThreadPoolExecutor(max_workers=10) as executor:
loop = asyncio.get_event_loop()
tasks = [
loop.run_in_executor(
executor,
self.face_detection,
*(img, headers) # Allows us to pass in multiple arguments to `fetch`
)
]
for response in await asyncio.gather(*tasks):
pass
def face_detection(self, img, headers):
try:
response = requests.post(self.url, data=img.tostring(), headers=headers)
...
except Exception as e:
...
...
But unfortunately it's not working.
EDIT 1
In the following I add what the whole thing is supposed to do.
Originally, the function looked like:
import requests
import cv2
from datetime import datetime, timedelta
def face_recognition(self):
# Start camera
cap = cv2.VideoCapture(0)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1920)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 1080)
emotional_states = []
font = cv2.FONT_HERSHEY_SIMPLEX
period = timedelta(seconds=self.time_period)
next_time = datetime.now() + period
cv2.namedWindow('img', cv2.WND_PROP_FULLSCREEN)
cv2.setWindowProperty('img', cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
while True:
ret, image = cap.read()
if next_time <= datetime.now():
# Prepare headers for http request
content_type = 'image/jpeg'
headers = {'content-type': content_type}
_, img_encoded = cv2.imencode('.jpg', image)
try:
# Send http request with image and receive response
response = requests.post(self.url, data=img_encoded.tostring(), headers=headers)
emotional_states = response.json().get("emotions")
face_locations = response.json().get("locations")
except Exception as e:
emotional_states = []
face_locations = []
print(e)
next_time += period
for i in range(0, len(emotional_states)):
emotion = emotional_states[i]
face_location = face_locations[i]
cv2.putText(image, emotion, (int(face_location[0]), int(face_location[1])),
font, 0.8, (0, 255, 0), 2, cv2.LINE_AA)
cv2.imshow('img', image)
k = cv2.waitKey(1) & 0xff
if k == 27:
cv2.destroyAllWindows()
cap.release()
break
if k == ord('a'):
cv2.resizeWindow('img', 700,700)
I use the above method to film myself. This film will be shown live on my screen. Further, every 5 seconds one frame is send to an API where the image is processed in such a way that the emotion of the person in the image is returned. This emotion is displayed on my screen, next to myself. The problem is, that the live video is freezing for a couple of seconds until the emotion is returned from the API.
My OS is Ubuntu.
EDIT 2
The API is running locally. I created a Flask App and the following method is receiving the request:
from flask import Flask, request, Response
import numpy as np
import cv2
import json
#app.route('/api', methods=['POST'])
def facial_emotion_recognition():
# Convert string of image data to uint8
nparr = np.fromstring(request.data, np.uint8)
# Decode image
img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
# Analyse the image
emotional_state, face_locations = emotionDetection.analyze_facial_emotions(img)
json_dump = json.dumps({'emotions': emotional_state, 'locations': face_locations}, cls=NumpyEncoder)
return Response(json_dump, mimetype='application/json')