I need to pass a live video stream from a Python analytics backend to a VB.NET WPF frontend. I'm using a Python server and a VB.NET client communicating via UDP.
This is what I have so far on both ends:
Python Server:
import cv2
from socket import *
import socket
import numpy as np
import sys
_s = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
_address = ('localhost',5000)
_quality=[int(cv2.IMWRITE_JPEG_QUALITY),80]
vcap = cv2.VideoCapture("rtsp://wowzaec2demo.streamlock.net/vod/mp4:BigBuckBunny_115k.mov")
while(1):
ret, frame = vcap.read()
image = frame
# processing
result, imgencode = cv2.imencode('.jpg', image, _quality)
data = np.array(imgencode)
stringData = data.tobytes()
_s.sendto(stringData,_address)
VB.NET client (Using a console app for testing):
Imports System.Net
Imports System.Net.Sockets
Module Program
Dim Mat = New Emgu.CV.Mat(New System.Drawing.Size(640, 480), Emgu.CV.CvEnum.DepthType.Cv8U, 3)
Sub Main(args As String())
Using socket As UdpClient = New UdpClient(5000)
Try
While True
Dim remoteEP = New IPEndPoint(IPAddress.Any, 5000)
Dim data As Byte() = socket.Receive(remoteEP)
Emgu.CV.CvInvoke.Imdecode(data, Emgu.CV.CvEnum.ImreadModes.ReducedColor8, Mat)
Emgu.CV.CvInvoke.Imshow("", Mat)
End While
Catch __unusedSocketException1__ As SocketException
Throw
End Try
End Using
End Sub
End Module
The code I've got so far doesn't throw any errors, but the output isn't displayed in EmguCV ImShow() window. Any help or alternative solutions for my use case are appreciated!
Related
I am working on a Real Time Video Streaming project using RTMP protocol. I have to use DirectX to capture the screen and then Kurento Media Server to stream.
For capturing, I am using dxcam in python:
import dxcam
import cv2
# import time
# camera = dxcam.create() # returns a DXCamera instance on primary monitor
target_fps = 30
camera = dxcam.create(output_idx=0, output_color="BGR")
camera.start(target_fps=target_fps, video_mode=True)
writer = cv2.VideoWriter(
"video.mp4", cv2.VideoWriter_fourcc(*"mp4v"), target_fps, (1920, 1080)
)
for i in range(1000):
writer.write(camera.get_latest_frame())
# time.sleep(10)
camera.stop()
writer.release()
del camera
I need help starting with the Kurento Media server to stream the video captured in real time but I can't find any tutorial to do that. Can someone help me with that?
I am trying to set access the camera feed from a robot and using some smaple code from their docs and running in to an error that makes 0 sense to me. My code is below.
from furhat_remote_api import FurhatRemoteAPI
import zmq
import numpy as np
import cv2
import json
import sys
import argparse
furhat = FurhatRemoteAPI("localhost")
# Get the voices on the robot
voices = furhat.get_voices()
# Set the voice of the robot
furhat.set_voice(name='Matthew')
# Say "Hi there!"
furhat.say(text="Hi there!")
parser = argparse.ArgumentParser(description='Display furhat camerafeed with overlayed annotations (face bounding boxes, user id:s,emotion estimates). Make sure external camera feed is enabled on the robot')
parser.add_argument('addr',default='127.0.0.1',help='IP address to furhat robot, excluding port nr')
# parser.add_argument('-ov','--output_video',help='if specified, raw video will be saved to this file')
args= parser.parse_args()
url = 'tcp://{}:3000'.format(args.addr)
# Setup the sockets
context = zmq.Context()
# Input camera feed from furhat using a SUB socket
insocket = context.socket(zmq.SUB)
insocket.setsockopt_string(zmq.SUBSCRIBE, '')
insocket.connect(url)
insocket.setsockopt(zmq.RCVHWM, 1)
insocket.setsockopt(zmq.CONFLATE, 1) # Only read the last message to avoid lagging behind the stream.
The error I receive is:
usage: main.py [-h] addr
main.py: error: the following arguments are required: addr
Any advice on how to fix this issue?
I am struggling on finding the solution for this:
I'm trying to create an image stream system where i can get all the frames and pass them through a neural network, but somehow I've not managed to get properly base64 image strings from my functions below.
The provided code works perfectly if i just call the decoded image from streaming instead of passing it through my functions where i convert to base64 and read them in memory and make cv2 show them properly.
My server code functions responsible to convert and decode base64 are described below:
Convert image object from stream into base64 BYTES and convert to one STRING (this is working as intended)
def convertImgBase64(image):
try:
imgString = base64.b64encode(image).decode('utf-8')
print('convertida com sucesso')
return imgString
except os.error as err :
print(f"Erro:'{err}'")
Base64 decoder that should convert to a readable cv2 compatible frame (Here is where the error begins):
def readb64(base64_string):
storage = '/home/caio/Desktop/img/'
try:
sbuf = BytesIO()
sbuf.write(base64.b64decode(str(base64_string)))
pimg = im.open(sbuf)
out = open('arq.jpeg', 'wb')
out.write(sbuf.read())
out.close()
print('leu string b64')
return cv2.cvtColor(np.array(pimg), cv2.COLOR_RGB2BGR)
except os.error as err :
print(f"Erro:'{err}'")
This is the current server i am building, but before proceeding i need to accomplish the frame capture correctly.
from io import BytesIO, StringIO
import numpy as np
import cv2
from imutils.video import FPS
import imagezmq
import base64
import darknet
import os
from PIL import Image as im
from numpy import asarray
from time import sleep
#imagezmq protocol receiver from client
image_hub = imagezmq.ImageHub()
def convertImgBase64(image):
try:
imgString = base64.b64encode(image).decode('utf-8')
return imgString
except os.error as err :
print(f"Error:'{err}'")
def readb64(base64_string):
try:
sbuf = BytesIO()
sbuf.write(base64.b64decode(str(base64_string)))
pimg = im.open(sbuf)
return cv2.cvtColor(np.array(pimg), cv2.COLOR_RGB2BGR)
except os.error as err :
print(f"Error:'{err}'")
def capture_img():
while True:
camera, jpg_buffer = image_hub.recv_jpg()
buffer = np.frombuffer(jpg_buffer, dtype='uint8')
imagedecoder = cv2.imdecode(buffer, cv2.IMREAD_COLOR)
img = im.fromarray(imagedecoder)
try:
string = convertImgBase64(imagedecoder)
cvimg = readb64(string)
#cv2.imshow(camera, cvimg) this is the line where its not working!
except os.error as err :
print(f"Error:'{err}'")
cv2.imshow(camera, imagedecoder)
cv2.waitKey(1) #cv2 wont work without this
image_hub.send_reply(b'OK') #imageZMQ needs acknowledge that its ok
Client code (raspberry pi code) is given below:
import sys
import socket
import time
import cv2
from imutils.video import VideoStream
import imagezmq
import argparse
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-s", "--server-ip", required=True,
help="ip address of the server to which the client will connect")
args = vars(ap.parse_args())
# initialize the ImageSender object with the socket address of the
# server
sender = imagezmq.ImageSender(connect_to="tcp://{}:5555".format(
args["server_ip"]))
# use either of the formats below to specifiy address of display computer
# sender = imagezmq.ImageSender(connect_to='tcp://192.168.1.190:5555')
rpi_name = socket.gethostname() # send RPi hostname with each image
vs = VideoStream(usePiCamera=True, resolution=(800, 600)).start()
time.sleep(2.0) # allow camera sensor to warm up
jpeg_quality = 95 # 0 to 100, higher is better quality, 95 is cv2 default
while True: # send images as stream until Ctrl-C
image = vs.read()
ret_code, jpg_buffer = cv2.imencode(
".jpg", image, [int(cv2.IMWRITE_JPEG_QUALITY), jpeg_quality])
sender.send_jpg(rpi_name, jpg_buffer)
My error output now is like:
I have been trying solution from here and here
If you would know another better way to pass an Image Object that i can use to process inside yolo/darknet neural network it could be awesome!!
Thanks!
The answers provided by #Christoph Rackwitz are correct. The design of ImageZMQ is to send and receive OpenCV images WITHOUT any base64 encoding. The ImageSender class sends OpenCV images. The ImageHub class receives OpenCV images. Optionally, ImageZMQ can send a jpg buffer (as your Raspberry Pi client code is doing).
Your Raspberry Pi client code is based on the ImageZMQ "send jpg" example.
Your server code should therefore use the matching ImageZMQ "receive jpg" example.
The essence of the ImageZMQ "receive jpg" example code is:
import numpy as np
import cv2
import imagezmq
image_hub = imagezmq.ImageHub()
while True: # show streamed images until Ctrl-C
rpi_name, jpg_buffer = image_hub.recv_jpg()
image = cv2.imdecode(np.frombuffer(jpg_buffer, dtype='uint8'), -1)
# see opencv docs for info on -1 parameter
cv2.imshow(rpi_name, image) # 1 window for each RPi
cv2.waitKey(1)
image_hub.send_reply(b'OK')
No base64 decoding required. The variable image already contains an OpenCV image. (FYI, I am the author of ImageZMQ)
In my previous post, we found a way to pass an image file from one Python script to another:
pass video data from one python script to another
I am now trying to pass a video (successive images):
write.py
import sys
import numpy as np
import cv2
from PIL import Image
import io
import time
while True:
img = cv2.imread('cat.jpg')
bimg = cv2.imencode('.jpg',img)[1]
sys.stdout.buffer.write(bimg)
sys.stdout.flush()
time.sleep(1)
read.py:
import sys
from PIL import Image
import io
import cv2
import numpy as np
from io import BytesIO
while True:
data = sys.stdin.buffer.read()
img_np = cv2.imdecode(np.frombuffer(BytesIO(data).read(), np.uint8), cv2.IMREAD_UNCHANGED)
cv2.imshow('image', img_np)
cv2.waitKey(0)
If I output the write.py data to terminal, it prints. If I manually hand data to read.py that gets read. But put them together (python3 write.py | python3 read.py) and it just hangs. write.py just writes once, and read.py never seems to get it.
My guess is that the read code is waiting for the write code to "end" before it wraps up the data package and calls it an image. Though if that were the case, I would think it that doing a flush would fix it.
I think I figured it out. In read.py, sys.stdin.buffer.read() reads and waits until the stdin pipe is closed but write.py never actually closes its stdout because of the while True loop. This proof of concept simplified example works:
write.py
import sys
import time
sys.stdout.buffer.write(b"Hello world")
sys.stdout.buffer.flush()
# Note if we comment out the code bellow it works again
while True:
# Keep this alive but don't have `while True:pass`
# because my computer might crash :D
time.sleep(10)
and read.py
import sys
with open("output.txt", "w") as file:
file.write(sys.stdin.read())
This will also hang and will never actually write anything to "output.txt". If we remove the while True loop from write.py the code will no longer hang and "Hello World" will be written to "output.py" because when write.py is finished writing it will close its process and that will close the pipe. To fix this issue I recommend changing read.py to something like this:
import sys
while True:
with open("output.txt", "a") as file:
file.write(sys.stdin.read(1))
Solution:
write.py
import sys
import time
MAX_FILE_SIZE = 16 # bytes
msg = b"Hello world"
# Tell `reader.py` that it needs to read x number of bytes.
length = len(msg)
# We also need to tell `read.py` how many bytes it needs to read.
# This means that we have reached the same problem as before.
# To fix that issue we are always going to send the number of bytes but
# We are going to pad it with `0`s at the start.
# https://stackoverflow.com/a/339013/11106801
length = str(length).zfill(MAX_FILE_SIZE)
sys.stdout.buffer.write(length.encode())
sys.stdout.buffer.write(msg)
sys.stdout.buffer.flush()
# We also need to tell `read.py` that it was the last file that we send
# Sending `1` means that the file has ended
sys.stdout.buffer.write(b"1")
sys.stdout.buffer.flush()
# Note if we comment out the code bellow it works again
while True:
# Keep this alive but don't have `while True:pass`
# because my computer might crash :D
time.sleep(10)
and read.py
import sys
import time
MAX_FILE_SIZE = 16 # bytes
while True:
time.sleep(1) # Make sure `write.py` has sent the data
# Read `MAX_FILE_SIZE` number of bytes and convert it to an int
# So that we know the size of the file comming in
length = int(sys.stdin.buffer.read(MAX_FILE_SIZE))
time.sleep(1) # Make sure `write.py` has sent the data
# Here you can switch to a different file every time `writer.py`
# Sends a new file
with open("output.txt", "wb") as file:
file.write(sys.stdin.buffer.read(length))
file_ended = sys.stdin.buffer.read(1)
if file_ended == b"1":
# File has ended
break
else:
# We are going to start reading again for the next file:
pass
Edit:
The solution works like this:
Send the size of the file
Send the actual file data
Send a byte that tell read.py if it should be expecting another file or not
For part 1, we just encode the length of the file as a string that is padded with 0s at the front. Note: Make sure that the MAX_FILE_SIZE is larger than the size of the largest file (large numbers will slightly decrease the performance). For part 3, if we send a "1" it will mean that there are no more files to be sent. Otherwise reader.py will wait and accept the next file. So write.py will become:
from math import log
import time
import sys
import cv2
MAX_FILE_SIZE = 62914560 # bytes
MAX_FILE_SIZE = int(log(MAX_FILE_SIZE, 2)+1)
def write_file(buffer, data, last_file=False):
# Tell `reader.py` that it needs to read x number of bytes.
length = len(data)
# We also need to tell `read.py` how many bytes it needs to read.
# This means that we have reached the same problem as before.
# To fix that issue we are always going to send the number of bytes but
# We are going to pad it with `0`s at the start.
# https://stackoverflow.com/a/339013/11106801
length = str(length).zfill(MAX_FILE_SIZE)
with open("output.txt", "w") as file:
file.write(length)
buffer.write(length.encode())
# Write the actual data
buffer.write(data)
# We also need to tell `read.py` that it was the last file that we send
# Sending `1` means that the file has ended
buffer.write(str(int(last_file)).encode())
buffer.flush()
while True:
img = cv2.imread("img.jpg")
bimg = cv2.imencode(".jpg", img)[1]
# Call write_data
write_file(sys.stdout.buffer, bimg, last_file=False)
# time.sleep(1) # Don't need this
and read.py will become:
from io import BytesIO
from math import log
import numpy as np
import time
import cv2
import sys
MAX_FILE_SIZE = 62914560 # bytes
MAX_FILE_SIZE = int(log(MAX_FILE_SIZE, 2)+1)
def read(buffer, number_of_bytes):
output = b""
while len(output) < number_of_bytes:
output += buffer.read(number_of_bytes - len(output))
assert len(output) == number_of_bytes, "An error occured."
return output
def read_file(buffer):
# Read `MAX_FILE_SIZE` number of bytes and convert it to an int
# So that we know the size of the file comming in
length = int(read(buffer, MAX_FILE_SIZE))
# Here you can switch to a different file every time `writer.py`
# Sends a new file
data = read(buffer, length)
# Read a byte so that we know if it is the last file
file_ended = read(buffer, 1)
return data, (file_ended == b"1")
while True:
print("Reading file")
data, last_file = read_file(sys.stdin.buffer)
img_np = cv2.imdecode(np.frombuffer(BytesIO(data).read(), np.uint8),
cv2.IMREAD_UNCHANGED)
cv2.imshow("image", img_np)
cv2.waitKey(0)
if last_file:
break;
You have mentioned that your image to send is not a consistent size, but I have to assume if it's coming from the same camera (for a given video stream) the raw image size does not change, rather just the compressed image size. I would imagine you likely have plenty of RAM to store at least one frame un-compressed in memory at a time, and you're just introducing processing overhead with all the compression and decompression.
Given that I would create a shared buffer using multiprocessing.shared_memory which can share frames between the two processes (you can even create a circular buffer of a couple frames if you wanna get real fancy, and prevent screen tearing, but it wasn't a big problem in my test)
Given that cv2.VideoCapture().read() can read straight into an existing array, and you can create a numpy array which uses the shared memory as it's buffer, you can read the data into the shared memory with zero extra copying. Using this I was able to read nearly 700 frames per second from a video file encoded with H.264 at 1280x688 resolution.
from multiprocessing.shared_memory import SharedMemory
import cv2
from time import sleep
import numpy as np
vid_device = r"D:\Videos\movies\GhostintheShell.mp4" #a great movie
#get the first frame to calculate size
cap = cv2.VideoCapture(vid_device)
success, frame = cap.read()
if not success:
raise Exception("error reading from video")
#create a shared memory for sending the frame shape
frame_shape_shm = SharedMemory(name="frame_shape", create=True, size=frame.ndim*4) #4 bytes per dim as long as int32 is big enough
frame_shape = np.ndarray(3, buffer=frame_shape_shm.buf, dtype='i4') #4 bytes per dim as long as int32 is big enough
frame_shape[:] = frame.shape
#create the shared memory for the frame buffer
frame_buffer_shm = SharedMemory(name="frame_buffer", create=True, size=frame.nbytes)
frame_buffer = np.ndarray(frame_shape, buffer=frame_buffer_shm.buf, dtype=frame.dtype)
input("writer is ready: press enter once reader is ready")
try: #use keyboardinterrupt to quit
while True:
cap.read(frame_buffer) #read data into frame buffer
# sleep(1/24) #limit framerate-ish (hitting actual framerate is more complicated than 1 line)
except KeyboardInterrupt:
pass
#cleanup: IMPORTANT, close this one first so the reader doesn't unlink() the
# shm's before this file has exited. (less important on windows)
cap.release()
frame_buffer_shm.close()
frame_shape_shm.close()
The reader process looks very similar, but instead of creating a video device, and reading frames, we just construct the shared array, and imshow a bunch. The GUI isn't quite as fast as just dumping the data, so we don't get quite 700 fps, but up to 500's isn't bad...
from multiprocessing.shared_memory import SharedMemory
import cv2
import numpy as np
#create a shared memory for reading the frame shape
frame_shape_shm = SharedMemory(name="frame_shape")
frame_shape = np.ndarray([3], buffer=frame_shape_shm.buf, dtype='i4')
#create the shared memory for the frame buffer
frame_buffer_shm = SharedMemory(name="frame_buffer")
#create the framebuffer using the shm's memory
frame_buffer = np.ndarray(frame_shape, buffer=frame_buffer_shm.buf, dtype='u1')
try:
while True:
cv2.imshow('frame', frame_buffer)
cv2.waitKey(1) #this is needed for cv2 to update the gui
except KeyboardInterrupt:
pass
#cleanup: IMPORTANT the writer process should close before this one, so nothing
# tries to access the shm after unlink() is called. (less important on windows)
frame_buffer_shm.close()
frame_buffer_shm.unlink()
frame_shape_shm.close()
frame_shape_shm.unlink()
EDIT: the user's other questions suggested a version of python earlier than 3.8 may be a requirement (or even working across versions), so here's an example of using posix_ipc in-place of multiprocessing.shared_memory to create the frame buffer (and how to clean it up):
#creation
shm = posix_ipc.SharedMemory(name="frame_buf",
flags=posix_ipc.O_CREX, #if this fails, cleanup didn't happen properly last time
size=frame.nbytes)
shm_map = mmap.mmap(shm.fd, shm.size)
buf = memoryview(shm_map)
#create the frame buffer
frame_buffer = np.ndarray(frame.shape, buffer=buf, dtype=frame.dtype)
frame_buffer[:] = frame[:] #copy first frame into frame buffer
#cleanup
shm.close_fd() #can happen after opening mmap
buf.release() #must happen after frame_buffer is no longer needed and before closing mmap
shm_map.close()
shm.unlink() #must only call from one of the two processes. unlink tells the os to reclaim the space once all handles are closed.
Two Solutions: ZeroMQ | DiskCache
It is quite easy to send frames from one python file to another using ZeroMQ.
ZeroMQ
Install via PyPI: pip install -U pyzmq. There are multiple way to send frames.
This is an example of using PUBLISHER and SUBSCRIBER
# writer | publisher
import base64
import time
import zmq
import cv2
# Prepare our context and publisher
context = zmq.Context()
publisher = context.socket(zmq.PUB)
publisher.bind("tcp://*:5563")
CAM_INDEX_OR_URI = 0
capture = cv2.VideoCapture(CAM_INDEX_OR_URI)
assert capture.isOpened(), "Cannot open camera"
while True:
# Write two messages, each with an envelope and content
# capture frame-by-frame
ret, frame = capture.read()
if not ret:
print("[+] No frame received. Stream ended.")
break
# resize the frame
frame = cv2.resize(frame, (640, 480))
encoded, buffer = cv2.imencode(".jpg", frame)
# all is good
# cv2.imshow("Frames", frame)
# stop with Esc key (27)
if cv2.waitKey(1) == 27:
break
sent_frame = base64.b64encode(buffer)
publisher.send_multipart([b"camera_A", sent_frame])
time.sleep(0.01)
# We never get here but clean up anyhow
publisher.close()
context.term()
capture.release()
cv2.destroyAllWindows()
# reader.py | subscriber
import numpy as np
import base64
import zmq
import cv2
# Prepare our context and publisher
context = zmq.Context()
subscriber = context.socket(zmq.SUB)
subscriber.connect("tcp://localhost:5563")
subscriber.setsockopt_string(zmq.SUBSCRIBE, "camera_A")
while True:
# Read envelope with address
[address, contents] = subscriber.recv_multipart()
receive_frame = base64.b64decode(contents)
frame = np.frombuffer(receive_frame, dtype=np.uint8)
frame = cv2.imdecode(frame, 1)
cv2.namedWindow("Frames", cv2.WINDOW_NORMAL)
cv2.imshow("Frames", frame)
# stop with Esc key (27)
if cv2.waitKey(1) == 27:
break
subscriber.close()
context.term()
cv2.destroyAllWindows()
DiskCache
You could also consider using diskcache. It allows passing python objects through memory. It is like Redis but all Python and does not require a server. NB: pip install --upgrade diskcache. You can tweak to start sending live frames from camera | video
# writer.py
import time
from pathlib import Path
import diskcache as dc
import cv2
tmp = Path("/tmp/stream")
with dc.Cache(tmp) as cache:
print(f"[+] Ready to push data to {tmp}.")
while True:
img = cv2.imread("cat.jpg")
cache.push(img, expire=5)
time.sleep(10)
# reader.py
import time
from pathlib import Path
import diskcache as dc
import cv2
tmp = Path("/tmp/stream")
with dc.Cache(tmp) as cache:
print(f"[+] Ready to pull data from {tmp}")
while True:
(key, value), _ = cache.pull(expire_time=True)
if key:
cv2.imshow("cat", value)
cv2.waitKey(0)
cv2.destroyAllWindows()
time.sleep(0.1)
I will go in these directions and not sys because you have total control over stream data. See diskcache Documentation
What abut using ROS publisher and subscriber. It will be simple to implement and easy to perceive.
I'm wanting to take an internet audio/radio stream (specifically Longplayer, click for direct stream URL) and play it with python.
It's preferable that it's backgrounded, such that the script able to continue running its main loop. (e.g. as game background music or something, though Pyglet, PyGame et al. may provide their own tools for that.)
I've seen some likely out of date examples of recording internet radio using requests and dumping it into a file but this isn't exactly what I want and the answers' comments seemed to have arguments about requests being problematic among other things? (see here)
I'm open to using any packages you can pip so long as it works with Python 3.X. (Currently using 3.6 purely because I haven't gathered the effort to install 3.7 yet)
To reiterate, I don't want to save the stream, just play it immediately (or with buffering if that's needed?) back to the user. This is preferably without blocking the script, which I imagine would need multithreadng/multiprocessing but this is secondary to just getting playback.)
As it always seems to be the case with these kinds of apparently simple questions, the devil is in the details. I ended up writing some code that should solve this question. The pip dependencies can be installed using python3 -m pip install ffmpeg-python PyOpenAL. The workflow of the code can be divided into two steps:
The code must download binary chunks of mp3 file data from an online stream and convert them to raw PCM data (basically signed uint16_t amplitude values) for playback. This is done using the ffmpeg-python library, which is a wrapper for FFmpeg. This wrapper runs FFmpeg in a separate process, so no blocking occurs here.
The code must then queue these chunks for playback. This is done using PyOpenAL, which is a wrapper for OpenAL. After creating a device and context to enable audio playback, a 3d-positioned source is created. This source is continuously queued with buffers (simulating a "ring buffer") that are filled with data piped in from FFmpeg. This runs on a separate thread from the first step, making downloading new audio chunks run independently from audio chunk playback.
Here is what that code looks like (with some commenting). Please let me know if you have any questions about the code or any other part of this answer.
import ctypes
import ffmpeg
import numpy as np
from openal.al import *
from openal.alc import *
from queue import Queue, Empty
from threading import Thread
import time
from urllib.request import urlopen
def init_audio():
#Create an OpenAL device and context.
device_name = alcGetString(None, ALC_DEFAULT_DEVICE_SPECIFIER)
device = alcOpenDevice(device_name)
context = alcCreateContext(device, None)
alcMakeContextCurrent(context)
return (device, context)
def create_audio_source():
#Create an OpenAL source.
source = ctypes.c_uint()
alGenSources(1, ctypes.pointer(source))
return source
def create_audio_buffers(num_buffers):
#Create a ctypes array of OpenAL buffers.
buffers = (ctypes.c_uint * num_buffers)()
buffers_ptr = ctypes.cast(
ctypes.pointer(buffers),
ctypes.POINTER(ctypes.c_uint),
)
alGenBuffers(num_buffers, buffers_ptr)
return buffers_ptr
def fill_audio_buffer(buffer_id, chunk):
#Fill an OpenAL buffer with a chunk of PCM data.
alBufferData(buffer_id, AL_FORMAT_STEREO16, chunk, len(chunk), 44100)
def get_audio_chunk(process, chunk_size):
#Fetch a chunk of PCM data from the FFMPEG process.
return process.stdout.read(chunk_size)
def play_audio(process):
#Queues up PCM chunks for playing through OpenAL
num_buffers = 4
chunk_size = 8192
device, context = init_audio()
source = create_audio_source()
buffers = create_audio_buffers(num_buffers)
#Initialize the OpenAL buffers with some chunks
for i in range(num_buffers):
buffer_id = ctypes.c_uint(buffers[i])
chunk = get_audio_chunk(process, chunk_size)
fill_audio_buffer(buffer_id, chunk)
#Queue the OpenAL buffers into the OpenAL source and start playing sound!
alSourceQueueBuffers(source, num_buffers, buffers)
alSourcePlay(source)
num_used_buffers = ctypes.pointer(ctypes.c_int())
while True:
#Check if any buffers are used up/processed and refill them with data.
alGetSourcei(source, AL_BUFFERS_PROCESSED, num_used_buffers)
if num_used_buffers.contents.value != 0:
used_buffer_id = ctypes.c_uint()
used_buffer_ptr = ctypes.pointer(used_buffer_id)
alSourceUnqueueBuffers(source, 1, used_buffer_ptr)
chunk = get_audio_chunk(process, chunk_size)
fill_audio_buffer(used_buffer_id, chunk)
alSourceQueueBuffers(source, 1, used_buffer_ptr)
if __name__ == "__main__":
url = "http://icecast.spc.org:8000/longplayer"
#Run FFMPEG in a separate process using subprocess, so it is non-blocking
process = (
ffmpeg
.input(url)
.output("pipe:", format='s16le', acodec='pcm_s16le', ac=2, ar=44100, loglevel="quiet")
.run_async(pipe_stdout=True)
)
#Run audio playing OpenAL code in a separate thread
thread = Thread(target=play_audio, args=(process,), daemon=True)
thread.start()
#Some example code to show that this is not being blocked by the audio.
start = time.time()
while True:
print(time.time() - start)
With pyminiaudio: (it provides an icecast stream source class):
import miniaudio
def title_printer(client: miniaudio.IceCastClient, new_title: str) -> None:
print("Stream title: ", new_title)
with miniaudio.IceCastClient("http://icecast.spc.org:8000/longplayer",
update_stream_title=title_printer) as source:
print("Connected to internet stream, audio format:", source.audio_format.name)
print("Station name: ", source.station_name)
print("Station genre: ", source.station_genre)
print("Press <enter> to quit playing.\n")
stream = miniaudio.stream_any(source, source.audio_format)
with miniaudio.PlaybackDevice() as device:
device.start(stream)
input() # wait for user input, stream plays in background