I want to send and receive image from cv2.Videocapture using WebSocket.
It could get json, but it couldn't decoded.
We need result that can be opened using cv2.imshow().
Somebody help me...
This is Client
ret, image_np = cap.read()
IMAGE_SHAPE = image_np.shape
encoded_image = base64.b64encode(image_np)
print(type(encoded_image))
payload = {
'from': 'rasp',
'image': str(encoded_image),
'shape': IMAGE_SHAPE,
}
data = json.dumps(payload)
try:
# Send encoded image data.
await websocket.send(data)
# receive server message
received_data = await websocket.recv()
print('< {}'.format(received_data))
# image = base64.b64decode(received_data)
# np_image = np.fromstring(image, dtype=np.uint8)
# source = np_image.reshape(IMAGE_SHAPE)
return websocket
except Exception:
print('WebSocket send or receive error.')
exit(1)
This is Server
async def server_on(websocket, path):
payload = {
'from': 'image-server',
# 'result': {
# # Default is null.
# 'isPerson': <bool>,
# 'centerPoint': <(x, y)>,
# },
}
data = json.dumps(payload)
try:
payload = await websocket.recv()
receive_data = json.loads(payload)
# At this line doesnt work...
decoded_image = base64.b64decode(receive_data['image'])
image_np = np.fromstring(decoded_image, dtype=np.uint8)
source = image_np.reshape(receive_data['shape'])
await websocket.send(data)
except Exception:
websocket.close()
return
In your Client I would say that you have an extra operation not needed.
Based on your latest comment, you might not need to use: str(encoded_image).
You could try to use: base64.encodestring(image_np) that will send you back a string container.
ret, image_np = cap.read()
IMAGE_SHAPE = image_np.shape
encoded_image = base64.encodestring(image_np)
print(type(encoded_image))
payload = {
'from': 'rasp',
'image': encoded_image.decode('utf-8'),
'shape': IMAGE_SHAPE,
}
Related
I have a post request that simply takes camera addresses from clients, Now I want to pass the response from this post request to another function that does the streaming via a web socket.
how can I pass the post-request function as input to get_stream ?
Post request function to take camera address
from fastapi import FastAPI, Request, WebSocket, WebSocketDisconnect
from pydantic import BaseModel
import cv2
import uvicorn
app = FastAPI()
class Address(BaseModel):
camera_id: Union[str, int] = 0
#app.post("/camera_id")
async def address(address: Address):
print(type(address.camera_id))
print('----------------------')
webcam = address.camera_id.isnumeric() or address.camera_id.endswith('.txt') or address.camera_id.lower().startswith(
('rtsp://', 'rtmp://', 'http://', 'https://'))
if webcam:
return address
else:
return {
'message': "Incorrect Camera Address",
'status': address.camera_id,
"expect_input": "The camera address should be (0, 1) or startwith ('rtsp://', 'rtmp://', 'http://', 'https://')"
}
Function that process frame via websocket
#app.websocket("/ws")
async def get_stream(websocket: WebSocket):
await websocket.accept()
camera_id = await address(parameter)
camera = cv2.VideoCapture(camera_id)
try:
while True:
frame = camera.frame()
if frame is not None:
ret, buffer = cv2.imencode('.jpg', frame)
await websocket.send_bytes(buffer.tobytes())
del frame, result
gc.collect()
torch.cuda.empty_cache()
else:
print('No frame is rendered')
break
except WebSocketDisconnect:
print("Client disconnected")
This is how I am calling my function in the get_stream function camera_id = await address(parameter)
I want to get the size of the image from a buffer.
import io
from PIL import Image
class Scraper:
asnyc _save_image(res):
buffer = await res.body()
img = Image.open(io.BytesIO(buffer))
img_w, img_h = img.size
async def scrape():
playwright = await async_playwright().start()
browser = await playwright.chromium.launch( headless = True, devtools = False )
page = browser.new_page()
page.on('response', _save_image)
await page.goto('https://www.example.com')
scraper = Scraper(key, id)
asyncio.run(scraper.scrape())
img = Image.open(io.BytesIO(buffer))
This code above is not an async function. I want know the size of images from the buffer asynchronously. How to so this?
My project uses socket.io to send/receive data.
I added aiohttp to help display the results on the browser.
import asyncio
from aiohttp import web
sio = socketio.AsyncServer(async_mode='`aiohttp`')
app = web.Application()
sio.attach(app)
I followed
https://us-pycon-2019-tutorial.readthedocs.io/aiohttp_file_uploading.html
to upload an image but I cannot upload a video.
def gen1():
# while True:
# if len(pm.list_image_display) > 1 :
image = cv2.imread("/home/duong/Pictures/Chess_Board.svg")
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# img = PIL.Image.new("RGB", (64, 64), color=(255,255,0))
image_pil = PIL.Image.fromarray(image)
fp = io.BytesIO()
image_pil.save(fp, format="JPEG")
content = fp.getvalue()
return content
async def send1():
print("11")
return web.Response(body=gen1(), content_type='image/jpeg')
How to display video via aiohttp on browsers?
To stream a video in aiohttp you may open a StreamResponse in response to the fetching of a img HTML node:
#routes.get('/video')
async def video_feed(request):
response = web.StreamResponse()
response.content_type = 'multipart/x-mixed-replace; boundary=frame'
await response.prepare(request)
for frame in frames('/dev/video0'):
await response.write(frame)
return response
and send your frames in the form of bytes:
def frames(path):
camera = cv2.VideoCapture(path)
if not camera.isOpened():
raise RuntimeError('Cannot open camera')
while True:
_, img = camera.read()
img = cv2.resize(img, (480, 320))
frame = cv2.imencode('.jpg', img)[1].tobytes()
yield b'--frame\r\nContent-Type: image/jpeg\r\n\r\n'+frame+b'\r\n'
This may be however network demanding as the bitrate required to send each frame individually is high. For real-time streaming with further compression you may want to use WebRTC implementations like aiortc.
I'm using opencv to capture a video from my webcam. Every 5 seconds, I'm processing a single frame / an image which can take some seconds. So far everything works. But whenever a frame is processed the entire video is freezing for a couple of seconds (Until the process is finished). I'm trying to get rid of it by using Threading. Here is what I did so far:
Inside the while loop which is capturing the video:
while True:
ret, image = cap.read()
if next_time <= datetime.now():
content_type = 'image/jpeg'
headers = {'content-type': content_type}
_, img_encoded = cv2.imencode('.jpg', image)
loop = asyncio.get_event_loop()
future = asyncio.ensure_future(self.async_faces(img_encoded, headers))
loop.run_until_complete(future)
next_time += period
...
cv2.imshow('img', image)
Here are the methods:
async def async_faces(self, img, headers):
with ThreadPoolExecutor(max_workers=10) as executor:
loop = asyncio.get_event_loop()
tasks = [
loop.run_in_executor(
executor,
self.face_detection,
*(img, headers) # Allows us to pass in multiple arguments to `fetch`
)
]
for response in await asyncio.gather(*tasks):
pass
def face_detection(self, img, headers):
try:
response = requests.post(self.url, data=img.tostring(), headers=headers)
...
except Exception as e:
...
...
But unfortunately it's not working.
EDIT 1
In the following I add what the whole thing is supposed to do.
Originally, the function looked like:
import requests
import cv2
from datetime import datetime, timedelta
def face_recognition(self):
# Start camera
cap = cv2.VideoCapture(0)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1920)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 1080)
emotional_states = []
font = cv2.FONT_HERSHEY_SIMPLEX
period = timedelta(seconds=self.time_period)
next_time = datetime.now() + period
cv2.namedWindow('img', cv2.WND_PROP_FULLSCREEN)
cv2.setWindowProperty('img', cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
while True:
ret, image = cap.read()
if next_time <= datetime.now():
# Prepare headers for http request
content_type = 'image/jpeg'
headers = {'content-type': content_type}
_, img_encoded = cv2.imencode('.jpg', image)
try:
# Send http request with image and receive response
response = requests.post(self.url, data=img_encoded.tostring(), headers=headers)
emotional_states = response.json().get("emotions")
face_locations = response.json().get("locations")
except Exception as e:
emotional_states = []
face_locations = []
print(e)
next_time += period
for i in range(0, len(emotional_states)):
emotion = emotional_states[i]
face_location = face_locations[i]
cv2.putText(image, emotion, (int(face_location[0]), int(face_location[1])),
font, 0.8, (0, 255, 0), 2, cv2.LINE_AA)
cv2.imshow('img', image)
k = cv2.waitKey(1) & 0xff
if k == 27:
cv2.destroyAllWindows()
cap.release()
break
if k == ord('a'):
cv2.resizeWindow('img', 700,700)
I use the above method to film myself. This film will be shown live on my screen. Further, every 5 seconds one frame is send to an API where the image is processed in such a way that the emotion of the person in the image is returned. This emotion is displayed on my screen, next to myself. The problem is, that the live video is freezing for a couple of seconds until the emotion is returned from the API.
My OS is Ubuntu.
EDIT 2
The API is running locally. I created a Flask App and the following method is receiving the request:
from flask import Flask, request, Response
import numpy as np
import cv2
import json
#app.route('/api', methods=['POST'])
def facial_emotion_recognition():
# Convert string of image data to uint8
nparr = np.fromstring(request.data, np.uint8)
# Decode image
img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
# Analyse the image
emotional_state, face_locations = emotionDetection.analyze_facial_emotions(img)
json_dump = json.dumps({'emotions': emotional_state, 'locations': face_locations}, cls=NumpyEncoder)
return Response(json_dump, mimetype='application/json')
I've written the following code to transfer a video over GRPC. Im using client-side streaming. The client sends the video in frames over the stream. Each frame is converted to bytes datatype and is transferred.
But my video is playing slower than the actual speed. How do I ensure that the video plays at the actual speed?
imageTest.proto file
syntax = "proto3";
option java_multiple_files = true;
option objc_class_prefix = "HLW";
// The greeting service definition.
service ImageTest {
// Sends a greeting
rpc Analyse (stream MsgRequest) returns (MsgReply) {}
}
// The request message containing the image.
message MsgRequest {
bytes img = 1;
}
// The response message containing the reply
message MsgReply {
int32 reply = 1;
}
imageTest_server.py
class Greeter(imageTest_pb2_grpc.ImageTestServicer):
def Analyse(self, request_iterator, context):
for req in request_iterator:
frame = np.array(list(req.img))
frame = frame.reshape( (576,704) )
frame = np.array(frame, dtype = np.uint8 )
cv2.imshow('Processed Image', frame)
cv2.waitKey(1)
return imageTest_pb2.MsgReply(reply = cnt )
def serve():
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
imageTest_pb2_grpc.add_ImageTestServicer_to_server(Greeter(), server)
server.add_insecure_port('[::]:50051')
server.start()
try:
while True:
time.sleep(_ONE_DAY_IN_SECONDS)
except KeyboardInterrupt:
server.stop(0)
if __name__ == '__main__':
serve()
imageTest_client.py
def run():
channel = grpc.insecure_channel('localhost:50051')
stub = imageTest_pb2_grpc.ImageTestStub(channel)
response = stub.Analyse( generateRequests() )
def generateRequests():
videogen = skvideo.io.vreader(URL)
for frame in videogen:
frame = cv2.cvtColor( frame, cv2.COLOR_RGB2GRAY )
frame = bytes(frame)
yield imageTest_pb2.MsgRequest(img= frame)
if __name__ == '__main__':
run()