Create a mjpeg stream from jpeg images in python - python

I need to serve real-time graphs and I would like to deliver a mjpeg stream over http (so that it is easy to include the graphs in a web-page by using a plain tag).
Is it possible to create an mjpeg stream from multiple jpeg images, in realtime ?
My strategy is:
Output the correct http headers:
Cache-Control:no-store, no-cache, must-revalidate, pre-check=0, post-check=0, max-age=0
Connection:close
Content-Type:multipart/x-mixed-replace;boundary=boundarydonotcross
Expires:Mon, 3 Jan 2000 12:34:56 GMT
Pragma:no-cache
Server:MJPG-Streamer/0.2
(got it from a curl -I {on a mjpeg-streamer instance}, but this seems strange)
Simply yield the successive jpeg images binaries, taking care to:
prepend the correct headers at the beginning of the stream (as mjpeg-streamer does):
Content-Type: image/jpeg
Content-Length: 5427
X-Timestamp: 3927662.086099
append the boundary string at the end of each jpeg streams.
--boudary--
Questions:
Have you done that,
do you know a python module that does that,
do you think it would work,
have you got any advice ?

I got it working as a proof-of-concept: https://github.com/damiencorpataux/pymjpeg
For memory:
import os, time
from glob import glob
import sys
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
boundary = '--boundarydonotcross'
def request_headers():
return {
'Cache-Control': 'no-store, no-cache, must-revalidate, pre-check=0, post-check=0, max-age=0',
'Connection': 'close',
'Content-Type': 'multipart/x-mixed-replace;boundary=%s' % boundary,
'Expires': 'Mon, 3 Jan 2000 12:34:56 GMT',
'Pragma': 'no-cache',
}
def image_headers(filename):
return {
'X-Timestamp': time.time(),
'Content-Length': os.path.getsize(filename),
#FIXME: mime-type must be set according file content
'Content-Type': 'image/jpeg',
}
# FIXME: should take a binary stream
def image(filename):
with open(filename, "rb") as f:
# for byte in f.read(1) while/if byte ?
byte = f.read(1)
while byte:
yield byte
# Next byte
byte = f.read(1)
# Basic HTTP server
class MyHandler(BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(200)
# Response headers (multipart)
for k, v in pymjpeg.request_headers().items():
self.send_header(k, v)
# Multipart content
for filename in glob('img/*'):
# Part boundary string
self.end_headers()
self.wfile.write(pymjpeg.boundary)
self.end_headers()
# Part headers
for k, v in pymjpeg.image_headers(filename).items():
self.send_header(k, v)
self.end_headers()
# Part binary
for chunk in pymjpeg.image(filename):
self.wfile.write(chunk)
def log_message(self, format, *args):
return
httpd = HTTPServer(('', 8001), MyHandler)
httpd.serve_forever()

You may use Flask framework to do this.
It is not only for mjpeg.
I adapted some code from here: https://blog.miguelgrinberg.com/post/video-streaming-with-flask
APP.py
#!/usr/bin/env python
from importlib import import_module
import os
from flask import Flask, render_template, Response
# import camera driver
if os.environ.get('CAMERA'):
Camera = import_module('camera_' + os.environ['CAMERA']).Camera
else:
from camera import Camera
# Raspberry Pi camera module (requires picamera package)
# from camera_pi import Camera
app = Flask(__name__)
#app.route('/')
def index():
"""Video streaming home page."""
return render_template('index.html')
def gen(camera):
"""Video streaming generator function."""
while True:
frame = camera.get_frame()
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
#app.route('/video_feed')
def video_feed():
"""Video streaming route. Put this in the src attribute of an img tag."""
return Response(gen(Camera()),
mimetype='multipart/x-mixed-replace; boundary=frame')
if __name__ == '__main__':
app.run(host='0.0.0.0', threaded=True)
base_camera.py
import time
import threading
try:
from greenlet import getcurrent as get_ident
except ImportError:
try:
from thread import get_ident
except ImportError:
from _thread import get_ident
class CameraEvent(object):
"""An Event-like class that signals all active clients when a new frame is
available.
"""
def __init__(self):
self.events = {}
def wait(self):
"""Invoked from each client's thread to wait for the next frame."""
ident = get_ident()
if ident not in self.events:
# this is a new client
# add an entry for it in the self.events dict
# each entry has two elements, a threading.Event() and a timestamp
self.events[ident] = [threading.Event(), time.time()]
return self.events[ident][0].wait()
def set(self):
"""Invoked by the camera thread when a new frame is available."""
now = time.time()
remove = None
for ident, event in self.events.items():
if not event[0].isSet():
# if this client's event is not set, then set it
# also update the last set timestamp to now
event[0].set()
event[1] = now
else:
# if the client's event is already set, it means the client
# did not process a previous frame
# if the event stays set for more than 5 seconds, then assume
# the client is gone and remove it
if now - event[1] > 5:
remove = ident
if remove:
del self.events[remove]
def clear(self):
"""Invoked from each client's thread after a frame was processed."""
self.events[get_ident()][0].clear()
class BaseCamera(object):
thread = None # background thread that reads frames from camera
frame = None # current frame is stored here by background thread
last_access = 0 # time of last client access to the camera
event = CameraEvent()
def __init__(self):
"""Start the background camera thread if it isn't running yet."""
if BaseCamera.thread is None:
BaseCamera.last_access = time.time()
# start background frame thread
BaseCamera.thread = threading.Thread(target=self._thread)
BaseCamera.thread.start()
# wait until frames are available
while self.get_frame() is None:
time.sleep(0)
def get_frame(self):
"""Return the current camera frame."""
BaseCamera.last_access = time.time()
# wait for a signal from the camera thread
BaseCamera.event.wait()
BaseCamera.event.clear()
return BaseCamera.frame
#staticmethod
def frames():
""""Generator that returns frames from the camera."""
raise RuntimeError('Must be implemented by subclasses.')
#classmethod
def _thread(cls):
"""Camera background thread."""
print('Starting camera thread.')
frames_iterator = cls.frames()
for frame in frames_iterator:
BaseCamera.frame = frame
BaseCamera.event.set() # send signal to clients
time.sleep(0)
# if there hasn't been any clients asking for frames in
# the last 10 seconds then stop the thread
if time.time() - BaseCamera.last_access > 10:
frames_iterator.close()
print('Stopping camera thread due to inactivity.')
break
BaseCamera.thread = None
camera.py
#D:\gstreamer\1.0\x86\bin>gst-launch-1.0.exe multifilesrc loop=true start-index=0 stop-index=0 location=d:/python/temp.png ! decodebin ! identity sleep-time=1000000 ! videoconvert ! autovideosink
import shutil
import time
import os,sys
from PIL import Image, ImageFont, ImageDraw, ImageFile
from io import BytesIO
from base_camera import BaseCamera
im = Image.new("RGB", (300, 30), (220, 180, 180))
#im.format'JPEG'
dr = ImageDraw.Draw(im)
font = ImageFont.truetype(os.path.join("fonts", "msyh.ttf"), 16)
text =time.strftime("%m/%d %H:%M:%S") +u"这是一段测试文本。"
dr.text((10, 5), text, font=font, fill="#000000")
im.save("d://python/temp.jpg")
dr.rectangle((0,0,300,500),fill="#FFFFFF")
text =time.strftime("%m/%d %H:%M:%S") +u"这是一段测试文本。"
dr.text((10, 5),text, font=font, fill="#000000")
f = BytesIO()
f.name="sdf.jpg"
im.save(f,"JPEG")
f.seek(0)
f.close()
class Camera(BaseCamera):
"""An emulated camera implementation that streams a repeated sequence of
files 1.jpg, 2.jpg and 3.jpg at a rate of one frame penr second."""
imgs = [open(f + '.jpg', 'rb').read() for f in ['1', '2', '3']]
#staticmethod
def frames():
while True:
text =time.strftime("%m/%d %H:%M:%S") +u"这是一段测试文本。"
dr.rectangle((0,0,300,500),fill="#FFFFFF")
dr.text((10, 5), text, font=font, fill="#000000")
f = BytesIO()
im.save(f,'JPEG')
try :
im.save("d:/python/temp.jpg")
except :
print("Unexpected error:", sys.exc_info()[0])
pass
# shutil.copy("d:/python/temp2.png","d:/python/temp.png")
f.seek(0)
time.sleep(1)
yield f.read() #Camera.imgs[int(time.time()) % 3]

Related

How to Stream Mp3 music from web using Python PyQt5?

I want to stream mp3 file from web using python PyQt5.I have researched a lot and only found code for streaming wav file.
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtMultimedia import *
import urllib.request
import threading
import time
class Streamer:
def __init__(self,url):
self.url = url
self.fancy = urllib.request.URLopener()
self.web = self.fancy.open(self.url)
self.content_len = int(self.web.headers["Content-Length"])
self.data = self.web.read(1024*1024)
self.buffer = QBuffer()
self.buffer.writeData(self.data[250:])
self.buffer.open(QBuffer.ReadWrite)
threading.Thread(target=self.stream).start()
self.format = QAudioFormat()
self.format.setSampleRate(48000)
self.format.setChannelCount(2)
self.format.setSampleSize(16)
self.format.setCodec("audio/pcm")
self.format.setByteOrder(QAudioFormat.LittleEndian)
self.format.setSampleType(QAudioFormat.SignedInt)
self.audio = QAudioOutput(self.format)
self.audio.start(self.buffer)
def stream(self):
# while True:
# self.sound_data = self.web.read(1024*1024)
# if not self.sound_data:
# break
# self.buffer.buffer().append(self.sound_data)
# time.sleep(2)
while len(self.data) < self.content_len:
self.sound_data = self.web.read(1024*1024)
self.buffer.buffer().append(self.sound_data)
self.data+=self.sound_data
time.sleep(2)
self.buffer.buffer().clear()
del self.data
if __name__ == "__main__":
app = QApplication([])
streamer = Streamer("https://raw.githubusercontent.com/PremKumarMishra/Stream-Songs/main/Audio.wav")
app.exec_()
I checked but cant add MPEG-3(mp3 codec) codec in QAudioFormat.So this current code does not work for mp3.
The basic behavior of QMediaPlayer should be enough to manage buffering of simple audio streams, as the backend will consider the 100% of buffer size as enough to guarantee playing.
In case you want more control over the buffer state, you need to implement a custom QIODevice subclass to act as a middle layer between QMediaPlayer and the download process.
In the following example I'm using QNetworkAccessManager to download the stream, the readyRead signal of the QNetworkReply is then connected to a function that reads the raw bytes, and emits a buffer status considering the current available read data and the minimum size set for the buffer.
The first time the received data has reached the minimum size, it begins to emit the readyRead signal, and if the player has not been started yet (no media set), it sets the media using the Buffer instance, and is then ready to play.
from PyQt5 import QtCore, QtWidgets, QtNetwork, QtMultimedia
url = 'https://url.to/stream'
Errors = {}
for k, v in QtMultimedia.QMediaPlayer.__dict__.items():
if isinstance(v, QtMultimedia.QMediaPlayer.Error):
Errors[v] = k
class Buffer(QtCore.QIODevice):
buffering = QtCore.pyqtSignal(object, object)
fullBufferEmitted = False
def __init__(self, reply, minBufferSize=250000):
super().__init__()
self.minBufferSize = max(200000, minBufferSize)
self.reply = reply
self.data = bytes()
# the network reply is on another thread, use a mutex to ensure that
# no simultaneous access is done in the meantime
self.mutex = QtCore.QMutex()
# this is important!
self.setOpenMode(self.ReadOnly|self.Unbuffered)
self.reply.readyRead.connect(self.dataReceived)
def dataReceived(self):
self.mutex.lock()
self.data += self.reply.readAll().data()
dataLen = len(self.data)
self.mutex.unlock()
self.buffering.emit(dataLen, self.minBufferSize)
if not self.fullBufferEmitted:
if dataLen < self.minBufferSize:
return
self.fullBufferEmitted = True
self.readyRead.emit()
def isSequential(self):
return True
def readData(self, size):
self.mutex.lock()
data = self.data[:size]
self.data = self.data[size:]
self.mutex.unlock()
return data
def bytesAvailable(self):
return len(self.data) + super().bytesAvailable()
class Player(QtWidgets.QWidget):
def __init__(self):
super().__init__()
layout = QtWidgets.QVBoxLayout(self)
self.playButton = QtWidgets.QPushButton('Play', enabled=False)
layout.addWidget(self.playButton)
self.volumeSlider = QtWidgets.QSlider(QtCore.Qt.Horizontal)
layout.addWidget(self.volumeSlider)
self.statusLabel = QtWidgets.QLabel('Waiting')
self.statusLabel.setFrameShape(
self.statusLabel.StyledPanel|self.statusLabel.Sunken)
layout.addWidget(self.statusLabel)
self.player = QtMultimedia.QMediaPlayer(volume=16)
self.volumeSlider.setValue(self.player.volume())
self.networkManager = QtNetwork.QNetworkAccessManager()
self.url = QtCore.QUrl(url)
self.media = QtMultimedia.QMediaContent(self.url)
reply = self.networkManager.get(QtNetwork.QNetworkRequest(self.url))
self.buffer = Buffer(reply)
self.playButton.clicked.connect(self.play)
self.volumeSlider.valueChanged.connect(self.player.setVolume)
self.player.error.connect(self.error)
self.buffer.buffering.connect(self.buffering)
def error(self, error):
errorStr = 'Error: {} ({})'.format(
Errors.get(error, 'Unknown error'), int(error))
self.statusLabel.setText(errorStr)
print(errorStr)
def buffering(self, loaded, minBufferSize):
self.statusLabel.setText('Buffer: {}%'.format(int(loaded / minBufferSize * 100)))
if self.player.media().isNull() and loaded >= minBufferSize:
self.player.setMedia(self.media, self.buffer)
self.playButton.setEnabled(True)
self.playButton.setFocus()
self.statusLabel.setText('Ready to play')
def play(self):
if self.player.state() == self.player.PlayingState:
self.player.pause()
self.playButton.setText('Play')
else:
self.player.play()
self.playButton.setText('Pause')
app = QtWidgets.QApplication([])
w = Player()
w.show()
app.exec_()
Note that:
as soon as QMediaPlayer begins to read the stream, the buffer length will obviously become smaller, as there's no way to know or control how the backend access the stream: when the player is reading (which doesn't mean it's playing), it will read the stream anyway;
due to the reason above, the shown buffer size is only "guessed" as soon as the media is set, based on the data read and the data received from the network reply;
you might want to control the media player status in case the buffer goes too low (but you must consider what explained above), and eventually pause it;

Turn image array to io bytes object to simulare open with 'rb'

I am trying to send frames of a video to a remote server using requests, the code I am using for the same is
def send_request(frame_path = "frame_on_disk_1.jpeg"):
files = {'upload': with open(frame_path,"rb")}
r = requests.post(URL, files=files)
return r
So I am writing the frames to disk and then reading them as bytes when sending over to the server, which is not the best way to do it.
However, I am not sure how I can actually convert the array in the following code represented by variable frame in the code below, directly into a read byte object without touching the disk.
import cv2
cap = cv2.VideoCapture("video.MOV")
count = 0
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
cv2.imwrite(f"all_frames/frame_num_{count}.png",frame)
You can use io.BytesIO and cv2.imencode to encode an image into a memory buffer.
I've also used a queue so the frames are enqueued and then HTTP requests are done in a separate threads.
import traceback
import cv2
from io import BytesIO
from queue import Queue
from threading import Thread
from requests import Session
URL = "http://example.com"
THREADS = 5
SAMPLE = "sample.mov"
class UploaderThread(Thread):
def __init__(self, q, s):
super().__init__()
self.q = q
self.s = s
def run(self):
for count, file in iter(self.q.get, "STOP"):
try:
r = self.s.post(URL, files={"upload": file})
except Exception:
traceback.print_exc()
else:
print(f"Frame ({count}): {r}")
def main():
cap = cv2.VideoCapture(SAMPLE)
q = Queue()
s = Session()
count = 0
threads = []
for _ in range(THREADS):
t = UploaderThread(q, s)
t.start()
threads.append(t)
while True:
ret, frame = cap.read()
count += 1
if not ret:
break
_, img = cv2.imencode(".png", frame)
q.put_nowait((count, BytesIO(img)))
for _ in range(THREADS):
q.put("STOP")
if __name__ == "__main__":
main()

Trying to display multiple streams with Opencv and Flask

I'm trying to capture two rtsp streams with opencv and then present them with a simple flask server. I can show the two streams together when just using opencv, but when I try to display it through flask it just picks either stream and shows it twice.
Here's the original creators blog
Here is my flask code:
#!/usr/bin/env python
from importlib import import_module
import os
from flask import Flask, render_template, Response
# import camera driver
'''
if os.environ.get('CAMERA'):
Camera = import_module('camera_' + os.environ['CAMERA']).Camera
else:
from camera import Camera
'''
#
from camera_opencv import Camera1, Camera2
app = Flask(__name__)
#app.route('/')
def index():
"""Video streaming home page."""
return render_template('index.html')
def gen(camera):
"""Video streaming generator function."""
while True:
frame = camera.get_frame()
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
#app.route('/video_feed1')
def video_feed1():
"""Video streaming route. Put this in the src attribute of an img tag."""
return Response(gen(Camera1()),
mimetype='multipart/x-mixed-replace; boundary=frame')
#app.route('/video_feed2')
def video_feed2():
"""Video streaming route. Put this in the src attribute of an img tag."""
return Response(gen(Camera2()),
mimetype='multipart/x-mixed-replace; boundary=frame')
if __name__ == '__main__':
app.run(host='0.0.0.0', threaded=True, port=8888)
Here's the camera_opencv file
import cv2
from base_camera import BaseCamera
class Camera1(BaseCamera):
video_source = 0
#staticmethod
def set_video_source(source):
Camera1.video_source = source
#staticmethod
def frames():
camera = cv2.VideoCapture(Camera1.video_source)
if not camera.isOpened():
raise RuntimeError('Could not start camera.')
while True:
# read current frame
_, img = camera.read()
# encode as a jpeg image and return it
yield cv2.imencode('.jpg', img)[1].tobytes()
class Camera2(BaseCamera):
video_source = 1
#staticmethod
def set_video_source(source):
Camera2.video_source = source
#staticmethod
def frames():
camera = cv2.VideoCapture(Camera2.video_source)
if not camera.isOpened():
raise RuntimeError('Could not start camera.')
while True:
# read current frame
_, img = camera.read()
# encode as a jpeg image and return it
yield cv2.imencode('.jpg', img)[1].tobytes()
Base camera file
import time
import threading
try:
from greenlet import getcurrent as get_ident
except ImportError:
try:
from thread import get_ident
except ImportError:
from _thread import get_ident
class CameraEvent(object):
"""An Event-like class that signals all active clients when a new frame is
available.
"""
def __init__(self):
self.events = {}
def wait(self):
"""Invoked from each client's thread to wait for the next frame."""
ident = get_ident()
if ident not in self.events:
# this is a new client
# add an entry for it in the self.events dict
# each entry has two elements, a threading.Event() and a timestamp
self.events[ident] = [threading.Event(), time.time()]
return self.events[ident][0].wait()
def set(self):
"""Invoked by the camera thread when a new frame is available."""
now = time.time()
remove = None
for ident, event in self.events.items():
if not event[0].isSet():
# if this client's event is not set, then set it
# also update the last set timestamp to now
event[0].set()
event[1] = now
else:
# if the client's event is already set, it means the client
# did not process a previous frame
# if the event stays set for more than 5 seconds, then assume
# the client is gone and remove it
if now - event[1] > 5:
remove = ident
if remove:
del self.events[remove]
def clear(self):
"""Invoked from each client's thread after a frame was processed."""
self.events[get_ident()][0].clear()
class BaseCamera(object):
thread = None # background thread that reads frames from camera
frame = None # current frame is stored here by background thread
last_access = 0 # time of last client access to the camera
event = CameraEvent()
def __init__(self):
"""Start the background camera thread if it isn't running yet."""
if BaseCamera.thread is None:
BaseCamera.last_access = time.time()
# start background frame thread
BaseCamera.thread = threading.Thread(target=self._thread)
BaseCamera.thread.start()
# wait until frames are available
while self.get_frame() is None:
time.sleep(0)
def get_frame(self):
"""Return the current camera frame."""
BaseCamera.last_access = time.time()
# wait for a signal from the camera thread
BaseCamera.event.wait()
BaseCamera.event.clear()
return BaseCamera.frame
#staticmethod
def frames():
""""Generator that returns frames from the camera."""
raise RuntimeError('Must be implemented by subclasses.')
#classmethod
def _thread(cls):
"""Camera background thread."""
print('Starting camera thread.')
frames_iterator = cls.frames()
for frame in frames_iterator:
BaseCamera.frame = frame
BaseCamera.event.set() # send signal to clients
time.sleep(0)
# if there hasn't been any clients asking for frames in
# the last 10 seconds then stop the thread
if time.time() - BaseCamera.last_access > 10:
frames_iterator.close()
print('Stopping camera thread due to inactivity.')
break
BaseCamera.thread = None
Index.html
<html>
<head>
<title>Video Streaming Demonstration</title>
</head>
<body>
<h1>Video Streaming Demonstration</h1>
<img src="{{ url_for('video_feed1') }}">
<img src="{{ url_for('video_feed2') }}">
</body>
</html>
So I kind of managed to make a hackey workaround. For whatever reason I could not resolve, the app just couldn't handle multiple streams individually.
So I changed the camera class and added multiple sources to it and used numpy.hstack(()) to merge both the frames together thus returning one unique stream.
Will be very grateful if someone could help out here as my method is not at all scalable.
import cv2
from base_camera import BaseCamera
import numpy as np
class Camera(BaseCamera):
video_source1 = 0
video_source2 = 1
#staticmethod
def set_video_source(sources):
Camera.video_source1 = sources[0]
Camera.video_source2 = sources[1]
#staticmethod
def frames():
camera1 = cv2.VideoCapture(Camera.video_source1)
camera2 = cv2.VideoCapture(Camera.video_source2)
if not (camera1.isOpened() or camera2.isOpened()):
raise RuntimeError('Could not start camera.')
while True:
# read current frame
_, img1 = camera1.read()
_, img2 = camera2.read()
img1 = cv2.resize(img1, (704, 396))
img2 = cv2.resize(img2, (704, 396))
img = np.hstack((img1, img2))
# encode as a jpeg image and return it
yield cv2.imencode('.jpg', img)[1].tobytes()
According to the blog, I use two generator and send the image to the index.html. And I can see tow streaming.
def generate2():
# it is a generator
global outputFrame2, lock
while True:
with lock:
if outputFrame is None:
continue
(flag, encodedImage2) = cv2.imencode(".jpg", outputFrame2)
if not flag:
continue
yield(b'--frame\r\n' b'Content-Type: image/jpeg\r\n\r\n' +
bytearray(encodedImage2) + b'\r\n')
And here is videofeed2
#app.route("/video_feed2")
def video_feed2():
return Response(generate2(),
mimetype = "multipart/x-mixed-replace; boundary=frame")

Python TCP socket send receive large delay

I used python socket to make a server on my Raspberry Pi 3 (Raspbian) and a client on my laptop (Windows 10). The server stream images to the laptop at a rate of 10fps, and can reach 15fps if I push it. The problem is when I want the laptop to send back a command based on the image, the frame rate drop sharply to 3fps. The process is like this:
Pi send img => Laptop receive img => Quick process => Send command based on process result => Pi receive command, print it => Pi send img => ...
The process time for each frame does not cause this (0.02s at most for each frame), so currently I am at a loss as to why the frame rate drop so much. The image is quite large, at around 200kB and the command is only a short string at 3B. The image is in matrix form and is pickled before sending, while the command is sent as is.
Can someone please explain to me why sending back such a short command would make the frame rate drop so much? And if possible, a solution for this problem. I tried making 2 servers, one dedicated to sending images and one for receiving command, but the result is the same.
Server:
import socket
import pickle
import time
import cv2
import numpy as np
from picamera.array import PiRGBArray
from picamera import PiCamera
from SendFrameInOO import PiImageServer
def main():
# initialize the server and time stamp
ImageServer = PiImageServer()
ImageServer2 = PiImageServer()
ImageServer.openServer('192.168.0.89', 50009)
ImageServer2.openServer('192.168.0.89', 50002)
# Initialize the camera object
camera = PiCamera()
camera.resolution = (320, 240)
camera.framerate = 10 # it seems this cannot go higher than 10
# unless special measures are taken, which may
# reduce image quality
camera.exposure_mode = 'sports' #reduce blur
rawCapture = PiRGBArray(camera)
# allow the camera to warmup
time.sleep(1)
# capture frames from the camera
print('<INFO> Preparing to stream video...')
timeStart = time.time()
for frame in camera.capture_continuous(rawCapture, format="bgr",
use_video_port = True):
# grab the raw NumPy array representing the image, then initialize
# the timestamp and occupied/unoccupied text
image = frame.array
imageData = pickle.dumps(image)
ImageServer.sendFrame(imageData) # send the frame data
# receive command from laptop and print it
command = ImageServer2.recvCommand()
if command == 'BYE':
print('BYE received, ending stream session...')
break
print(command)
# clear the stream in preparation for the next one
rawCapture.truncate(0)
print('<INFO> Video stream ended')
ImageServer.closeServer()
elapsedTime = time.time() - timeStart
print('<INFO> Total elapsed time is: ', elapsedTime)
if __name__ == '__main__': main()
Client:
from SupFunctions.ServerClientFunc import PiImageClient
import time
import pickle
import cv2
def main():
# Initialize
result = 'STP'
ImageClient = PiImageClient()
ImageClient2 = PiImageClient()
# Connect to server
ImageClient.connectClient('192.168.0.89', 50009)
ImageClient2.connectClient('192.168.0.89', 50002)
print('<INFO> Connection established, preparing to receive frames...')
timeStart = time.time()
# Receiving and processing frames
while(1):
# Receive and unload a frame
imageData = ImageClient.receiveFrame()
image = pickle.loads(imageData)
cv2.imshow('Frame', image)
key = cv2.waitKey(1) & 0xFF
# Exit when q is pressed
if key == ord('q'):
ImageClient.sendCommand('BYE')
break
ImageClient2.sendCommand(result)
ImageClient.closeClient()
elapsedTime = time.time() - timeStart
print('<INFO> Total elapsed time is: ', elapsedTime)
print('Press any key to exit the program')
#cv2.imshow('Picture from server', image)
cv2.waitKey(0)
if __name__ == '__main__': main()
PiImageServer and PiImageClient:
import socket
import pickle
import time
class PiImageClient:
def __init__(self):
self.s = None
self.counter = 0
def connectClient(self, serverIP, serverPort):
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.s.connect((serverIP, serverPort))
def closeClient(self):
self.s.close()
def receiveOneImage(self):
imageData = b''
lenData = self.s.recv(8)
length = pickle.loads(lenData) # should be 921764 for 640x480 images
print('Data length is:', length)
while len(imageData) < length:
toRead = length-len(imageData)
imageData += self.s.recv(4096 if toRead>4096 else toRead)
#if len(imageData)%200000 <= 4096:
# print('Received: {} of {}'.format(len(imageData), length))
return imageData
def receiveFrame(self):
imageData = b''
lenData = self.s.recv(8)
length = pickle.loads(lenData)
print('Data length is:', length)
'''length = 921764 # for 640x480 images
length = 230563 # for 320x240 images'''
while len(imageData) < length:
toRead = length-len(imageData)
imageData += self.s.recv(4096 if toRead>4096 else toRead)
#if len(imageData)%200000 <= 4096:
# print('Received: {} of {}'.format(len(imageData), length))
self.counter += 1
if len(imageData) == length:
print('Successfully received frame {}'.format(self.counter))
return imageData
def sendCommand(self, command):
if len(command) != 3:
print('<WARNING> Length of command string is different from 3')
self.s.send(command.encode())
print('Command {} sent'.format(command))
class PiImageServer:
def __init__(self):
self.s = None
self.conn = None
self.addr = None
#self.currentTime = time.time()
self.currentTime = time.asctime(time.localtime(time.time()))
self.counter = 0
def openServer(self, serverIP, serverPort):
print('<INFO> Opening image server at {}:{}'.format(serverIP,
serverPort))
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.s.bind((serverIP, serverPort))
self.s.listen(1)
print('Waiting for client...')
self.conn, self.addr = self.s.accept()
print('Connected by', self.addr)
def closeServer(self):
print('<INFO> Closing server...')
self.conn.close()
self.s.close()
#self.currentTime = time.time()
self.currentTime = time.asctime(time.localtime(time.time()))
print('Server closed at', self.currentTime)
def sendOneImage(self, imageData):
print('<INFO> Sending only one image...')
imageDataLen = len(imageData)
lenData = pickle.dumps(imageDataLen)
print('Sending image length')
self.conn.send(lenData)
print('Sending image data')
self.conn.send(imageData)
def sendFrame(self, frameData):
self.counter += 1
print('Sending frame ', self.counter)
frameDataLen = len(frameData)
lenData = pickle.dumps(frameDataLen)
self.conn.send(lenData)
self.conn.send(frameData)
def recvCommand(self):
commandData = self.conn.recv(3)
command = commandData.decode()
return command
I believe the problem is two-fold. First, you are serializing all activity: The server is sending a complete image, then instead of continuing on to send the next image (which would better fit the definition of "streaming"), it is stopping, waiting for all bytes of the previous image to make themselves across the network to the client, then for the client to receive all bytes of the image, unpickle it, send a response and for the response to then make its way across the wire to the server.
Is there a reason you need them to be in lockstep like this? If not, try to parallelize the two sides. Have your server create a separate thread to listen for commands coming back (or simply use select to determine when the command socket has something to receive).
Second, you are likely being bitten by Nagle's algorithm (https://en.wikipedia.org/wiki/Nagle%27s_algorithm), which is intended to prevent sending numerous packets with small payloads (but lots of overhead) across the network. So, your client-side kernel has gotten your three-bytes of command data and has buffered it, waiting for you to provide more data before it sends the data to the server (it will eventually send it anyway, after a delay). To change that, you would want to use the TCP_NODELAY socket option on the client side (see https://stackoverflow.com/a/31827588/1076479).

Raspberry Pi sending frame from piCam via websocket

I would like to send frames from piCam via websocket in base64 format.
I have the following simple code:
import websocket
import time
import picamera
import io
import base64
import StringIO
class MyClass:
ws = ''
picam = ''
stream = ''
def __init__(self):
self.init()
def on_message(self,ws , message):
print ws + "ok"
print message
def on_error(self, ws, error):
print error
def on_close(self, ws):
print "down"
exit()
def on_open(self, ws):
print "opening connection"
ws.send("Hello.")
self.main()
def main(self):
print "main"
output = StringIO.StringIO()
while True:
output.seek(0)
self.picam.capture(output, format="jpeg")
encoded_string = base64.b64encode(output.getvalue())
self.ws.send("{\"Image\":\""+encoded_string+"\"}")
time.sleep(0.2)
output.flush()
def init(self):
print "init"
websocket.enableTrace(True)
self.picam = picamera.PiCamera()
self.picam.resolution = (640, 480)
self.stream = io.BytesIO()
self.picam.start_preview()
self.ws = websocket.WebSocketApp("ws://xxxxx.",
on_message = self.on_message,
on_error = self.on_error,
on_close = self.on_close,
on_open= self.on_open)
self.ws.run_forever()
After starting it sends one Image in ~1-2 sec.
When I try to place into class var base64 image string and send it, this sends every ~0.2 sec.
When I try to just capture the image from piCam without send websocket, it is ok to, captures every ~0.2 sec.
I do not understand why the combination works so slowly?
try using the use_video_port option of the capture method:
self.picam.capture(output, format="jpeg", use_video_port=True)

Categories

Resources