I'm trying to establish a real-time audio communication between Pepper's tablet and my PC. I'm using Gstreamer to establish that. The audio from Pepper's mic to PC is working but there seems to be no audio from my PC to Pepper's tablet. What am I doing wrong?
PC side:
audio_pipeline = Gst.Pipeline('audio_pipeline')
audio_udpsrc = Gst.ElementFactory.make('udpsrc', None)
audio_udpsrc.set_property('port', args.audio)
audio_caps = Gst.caps_from_string('application/x-rtp,media=(string)audio, clock-rate=(int)44100, width=16, height=16, encoding-name=(string)L16, encoding-params=(string)1, channels=(int)1, channel-positions=(int)1, payload=(int)96')
audio_filter = Gst.ElementFactory.make('capsfilter', None)
audio_filter.set_property('caps',audio_caps)
audio_depay = Gst.ElementFactory.make('rtpL16depay', None)
audio_convert = Gst.ElementFactory.make('audioconvert', None)
audio_sink = Gst.ElementFactory.make('alsasink', None)
audio_sink.set_property('sync',False)
audio_pipeline.add(audio_udpsrc,audio_filter,audio_depay,audio_convert,audio_sink)
audio_udpsrc.link(audio_filter)
audio_filter.link(audio_depay)
audio_depay.link(audio_convert)
audio_convert.link(audio_sink)
Robot side (Choregraphe):
audio_src = gst.element_factory_make('autoaudiosrc')
audio_convert = gst.element_factory_make('audioconvert')
audio_caps = gst.caps_from_string('audio/x-raw-int,channels=1,depth=16,width=16,rate=44100')
audio_filter = gst.element_factory_make('capsfilter')
audio_filter.set_property('caps',audio_caps)
# audio_enc = gst.element_factory_make('mad')
audio_pay = gst.element_factory_make('rtpL16pay')
audio_udp = gst.element_factory_make('udpsink')
audio_udp.set_property('host',user_ip)
audio_udp.set_property('port',int(user_audio_port))
self.audio_pipeline.add(audio_src,audio_convert,audio_filter,audio_pay,audio_udp)
gst.element_link_many(audio_src,audio_convert,audio_filter,audio_pay,audio_udp)
or
Robot's side (Python SDK):
GObject.threads_init()
Gst.init(None)
audio_pipeline = Gst.Pipeline('audio_pipeline')
audio_src = Gst.ElementFactory.make('autoaudiosrc')
audio_convert = Gst.ElementFactory.make('audioconvert')
audio_caps = Gst.ElementFactory.make('audio/x-raw-int,channels=2,depth=16,width=16,rate=44100')
audio_filter = Gst.ElementFactory.make('capsfilter')
audio_filter.set_property('caps',audio_caps)
audio_pay = Gst.ElementFactory.make('rtpL16pay')
audio_udp = Gst.ElementFactory.make('udpsink')
audio_udp.set_property('host',user_ip)
audio_udp.set_property('port',int(user_audio_port))
audio_pipeline.add(audio_src,audio_convert,audio_filter,audio_pay,audio_udp)
audio_src.link(audio_convert)
audio_convert.link(audio_filter)
audio_filter.link(audio_pay)
audio_pay.link(audio_udp)
audio_pipeline.set_state(Gst.State.PLAYING)
Computer's mic to Pepper:
audio_port = 80
s_audio_pipeline = Gst.Pipeline('s_audio_pipeline')
s_audio_src = Gst.ElementFactory.make('autoaudiosrc')
s_audio_convert = Gst.ElementFactory.make('audioconvert')
s_audio_caps = Gst.ElementFactory.make('audio/x-raw-int,channels=2,depth=16,width=16,rate=44100')
s_audio_filter = Gst.ElementFactory.make('capsfilter')
s_audio_filter.set_property('caps',audio_caps)
s_audio_pay = Gst.ElementFactory.make('rtpL16pay')
s_audio_udp = Gst.ElementFactory.make('udpsink')
s_audio_udp.set_property('host',ip)
s_audio_udp.set_property('port',int(audio_port))
s_audio_pipeline.add(s_audio_src,s_audio_convert,s_audio_filter,s_audio_pay,s_audio_udp)
s_audio_src.link(s_audio_convert)
s_audio_convert.link(s_audio_filter)
s_audio_filter.link(s_audio_pay)
s_audio_pay.link(s_audio_udp)
Pepper receiving:
audio = 80
r_audio_pipeline = Gst.Pipeline('r_audio_pipeline')
#defining audio pipeline attributes
r_audio_udpsrc = Gst.ElementFactory.make('udpsrc', None)
r_audio_udpsrc.set_property('port', audio)
r_audio_caps = Gst.caps_from_string('application/x-rtp,media=(string)audio, clock-rate=(int)44100, width=16, height=16, encoding-name=(string)L16, encoding-params=(string)1, channels=(int)2, format=(string)S16LE, channel-positions=(int)1, payload=(int)96')
r_audio_filter = Gst.ElementFactory.make('capsfilter', None)
r_audio_filter.set_property('caps',r_audio_caps)
r_audio_depay = Gst.ElementFactory.make('rtpL16depay', None)
r_audio_convert = Gst.ElementFactory.make('audioconvert', None)
r_audio_sink = Gst.ElementFactory.make('alsasink', None)
r_audio_sink.set_property('sync',False)
#linking the various attributes
r_audio_pipeline.add(r_audio_udpsrc,r_audio_filter,r_audio_depay,r_audio_convert,r_audio_sink)
r_audio_udpsrc.link(r_audio_filter)
r_audio_filter.link(r_audio_depay)
r_audio_depay.link(r_audio_convert)
r_audio_convert.link(r_audio_sink)
r_audio_pipeline.set_state(Gst.State.PLAYING)
I think there might be a problem with the pepper's receiving port number... I tried different port numbers (including 9559) but nothing seemed to work. Is the source ID wrong?
Is it possible to run the 2-way stream in the same pipeline?
I took a look at other libraries like ffmpeg and PyAudio, but I couldn't any method for live streaming.
Make sure you run the Python script on the robot.
Also, did you run the GMainLoop ?
Choregraphe behaviors are run in NAOqi, and NAOqi runs a GMainLoop already in the background. Maybe this is what is missing in your stand-alone script.
Finally, you show no piece of code in your snippets that is meant to take the PC's audio to the network, nor from the network to Pepper's speakers.
Related
using python-vlc in python
I want to display a video on each of the two monitors
2 videos played successfully
But only monitor 1 shows the video
Is there a way to show the video on the second monitor?
p1 = vlc.Instance('--directx-device=\\\\.\\DISPLAY1')
# p1 = vlc.Instance('--qt-fullscreen-screennumber=1')
media = p1.media_new("t1.mp4")
player = p1.media_player_new()
player.set_media(media)
player.video_set_scale(0.5)
player.play()
p2 = vlc.Instance('--directx-device=\\\\.\\DISPLAY2')
# p2 = vlc.Instance('--qt-fullscreen-screennumber=2')
media = p2.media_new("t2.mp4")
player2 = p2.media_player_new()
player2.set_media(media)
player2.video_set_scale(0.5)
player2.play()
I searched and put options in vlc.Instance, but the behavior was the same
Help me
I'm using python on windows
I'm attempting to write a python project that plays multiple parts of a song at the same time.
For background information, a song is split into "stems", and then each stem is played simultaneously to recreate the full song. What I am trying to achieve is using potentiometers to control the volume of each stem, so that the user can mix songs differently. For a product relation, the StemPlayer from Kanye West is what I am trying to achieve.
I can change the volume of the overlayed song at the end, but what I want to do is change the volume of each stem using a potentiometer while the song is playing. Is this even possible using pyDub? Below is the code I have right now.
from pydub import AudioSegment
from pydub.playback import play
vocals = AudioSegment.from_file("walkin_vocals.mp3")
drums = AudioSegment.from_file("walkin_drums.mp3")
bass = AudioSegment.from_file("walkin_bass.mp3")
vocalsDrums = vocals.overlay(drums)
bassVocalsDrums = vocalsDrums.overlay(bass)
songQuiet = bassVocalsDrums - 20
play(songQuiet)
Solved this question, I ended up using pyaudio instead of pydub.
With pyaudio, I was able to define a custom stream_callback function. Within this callback function, I multiply each stem by a modifier, then add each stem to one audio output.
def callback(in_data, frame_count, time_info, status):
global drumsMod, vocalsMod, bassMod, otherMod
drums = drumsWF.readframes(frame_count)
vocals = vocalsWF.readframes(frame_count)
bass = bassWF.readframes(frame_count)
other = otherWF.readframes(frame_count)
decodedDrums = numpy.frombuffer(drums, numpy.int16)
decodedVocals = numpy.frombuffer(vocals, numpy.int16)
decodedBass = numpy.frombuffer(bass, numpy.int16)
decodedOther = numpy.frombuffer(other, numpy.int16)
newdata = (decodedDrums*drumsMod + decodedVocals*vocalsMod + decodedBass*bassMod + decodedOther*otherMod).astype(numpy.int16)
return (newdata.tobytes(), pyaudio.paContinue)
The normal way to create a videocapture is this:
cam = cv2.VideoCapture(n)
where n corresponds to the number of /dev/video0, dev/video1
But because I'm building a robot that uses multiple cameras for different things, I needed to make sure that it was being assigned to the correct camera, I created udev rules that created devices with symbolic links to the correct port whenever a specific camera was plugged in.
They appear to be working because when I look in the /dev directory I can see the link:
/dev/front_cam -> video1
However I'm not sure how to actually use this now.
I thought I could just open it from the filename, as if it was a video, but cam = cv2.VideoCapture('/dev/front_cam') doesn't work.
Neither does cv2.VideoCapture('/dev/video1')
It doesn't throw an error, it does return a VideoCapture object, just not one that's opened (cam.isOpened() returns False).
Each of my video4linux devices creates 2 device nodes. For example, /dev/video0 and /dev/video1 are both related to my internal webcam. When I plug in a second USB webcam, /dev/video2 and /dev/video3 both appear. However, I can only use the lower-numbered device of each pair for video capture (i.e. /dev/video0 and /dev/video2).
I watched my device arrival with udevadm monitor, and then inspected each of the camera devices with udevadm info --path=$PATH_FROM_UDEVADM_MONITOR --attribute-walk. The devices which work for video capture have ATTR{index}=="0".
Maybe instead of trying to open /dev/video1, you just need to open /dev/video0:
cam = cv2.CaptureVideo("/dev/video0")
Instead of the suggested solution I found a shorter one, that feels a little bit hacky.
I just look at where the symbolic link points, find the integer in it, and then use that.
import subprocess
cmd = "readlink -f /dev/CAMC"
process = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE)
# output of form /dev/videoX
out = process.communicate()[0]
# parse for ints
nums = [int(x) for x in out if x.isdigit()]
cap = cv2.VideoCapture(nums[0])
If you know the model of the camera, you can look it up in /dev/v4l/by-id/.... We are using an HDMI-USB video converter, and we connect to it like this:
#! /usr/bin/env python
import os
import re
import cv2
DEFAULT_CAMERA_NAME = '/dev/v4l/by-id/usb-AVerMedia_Technologies__Inc._Live_Gamer_Portable_2_Plus_5500114600612-video-index0'
device_num = 0
if os.path.exists(DEFAULT_CAMERA_NAME):
device_path = os.path.realpath(DEFAULT_CAMERA_NAME)
device_re = re.compile("\/dev\/video(\d+)")
info = device_re.match(device_path)
if info:
device_num = int(info.group(1))
print("Using default video capture device on /dev/video" + str(device_num))
cap = cv2.VideoCapture(device_num)
This follows the device name symlink to the /dev/video name, then parses that for the device number.
import re
import subprocess
import cv2
import os
device_re = re.compile("Bus\s+(?P<bus>\d+)\s+Device\s+(?P<device>\d+).+ID\s(?P<id>\w+:\w+)\s(?P<tag>.+)$", re.I)
df = subprocess.check_output("lsusb", shell=True)
for i in df.split('\n'):
if i:
info = device_re.match(i)
if info:
dinfo = info.groupdict()
if "Logitech, Inc. Webcam C270" in dinfo['tag']:
print "Camera found."
bus = dinfo['bus']
device = dinfo['device']
break
device_index = None
for file in os.listdir("/sys/class/video4linux"):
real_file = os.path.realpath("/sys/class/video4linux/" + file)
print real_file
print "/" + str(bus[-1]) + "-" + str(device[-1]) + "/"
if "/" + str(bus[-1]) + "-" + str(device[-1]) + "/" in real_file:
device_index = real_file[-1]
print "Hurray, device index is " + str(device_index)
camera = cv2.VideoCapture(int(device_index))
while True:
(grabbed, frame) = camera.read() # Grab the first frame
cv2.imshow("Camera", frame)
key = cv2.waitKey(1) & 0xFF
First search for desired string in USB devices list. Get BUS and DEVICE number.
Find symbolic link under video4linux directory. Extract device index from realpath and pass it to VideoCapture method.
A possibility that was not explored in the other answers is to use the "name" file in the /sys/class/video4linux/video*/ directories.
Example :
def get_camera(camera_name):
cam_num = None
for file in os.listdir("/sys/class/video4linux"):
real_file = os.path.realpath("/sys/class/video4linux/" + file + "/name")
with open(real_file, "rt") as name_file:
name = name_file.read().rstrip()
if camera_name in name:
cam_num = int(re.search("\d+$", file).group(0))
found = "FOUND!"
else:
found = " "
print("{} {} -> {}".format(found, file, name))
return cam_num
Which gives :
get_camera('HUE')
FOUND! video1 -> HUE HD Pro Camera: HUE HD Pro C
video0 -> HP HD Camera: HP HD Camera
I am attempting to create a program in python that plays a particular harpsichord note when a certain key is pressed. I want it to remain responsive so you can continue to play more notes (kind of like a normal electric piano.) However, because the wav files that the notes are stored in are about 7-10 seconds long I am experiencing some issues. I can press at least 10 keys per second. So, over the duration of one note I could have around 100 different wav files playing at once. I tried to use winsound, but it was unable to play multiple wav files at once. I then moved on to PyAudio and it works kind of. The only way that I found to accomplish what I wanted was this:
from msvcrt import getch
import pyaudio
import wave
import multiprocessing as mp
#This function is just code for playing a sound in PyAudio
def playNote(filename):
CHUNK = 1024
wf = wave.open(filename, 'rb')
p = pyaudio.PyAudio()
stream = p.open(format=p.get_format_from_width(wf.getsampwidth()),
channels=wf.getnchannels(),
rate=wf.getframerate(),
output=True)
data = wf.readframes(CHUNK)
while data != '':
stream.write(data)
data = wf.readframes(CHUNK)
stream.stop_stream()
stream.close()
p.terminate()
if __name__ == "__main__":
while True:
#If the 'a' key is pressed: start a new process that calls playNote
#and pass in the file name for a note.
if ord(getch()) == 97: #a
mp.Process(target=playNote, args=("F:\Project Harpsichord\The wavs\A1.wav",)).start()
#If the 's' key is pressed: start a new process that calls playNote
#and pass in the file name for another note.
if ord(getch()) == 115: #s
mp.Process(target=playNote, args=("F:\Project Harpsichord\The wavs\A0.wav",)).start()
Basically whenever I want to play a new wav, I have to start a new process that runs the code in the playNote function. As I already stated I can potentially have up to 100 of these playing at once. Suffice it to say, one hundred copies of the python interpreter all running at once almost crashed my computer. I also tried a similar approach with multi-threading, but had the same problems.
This post shows a way to mix multiple wav files together so they can be played at the same time, but since my program will not necessarily be starting the sounds at the same time I am unsure if this will work.
I need an efficient way to play multiple notes at the same time. Whether this comes in the form of another library, or even a different language I really don't care.
I checked out pygame like J.F Sebastian suggested. It ended up being exactly what I needed. I used pygame.mixer.Sound() in conjunction with pygame.mixer.set_num_channels(). Here's what I came up with.
import pygame as pg
import time
pg.mixer.init()
pg.init()
a1Note = pg.mixer.Sound("F:\Project Harpsichord\The wavs\A1.wav")
a2Note = pg.mixer.Sound("F:\Project Harpsichord\The wavs\A0.wav")
pg.mixer.set_num_channels(50)
for i in range(25):
a1Note.play()
time.sleep(0.3)
a2Note.play()
time.sleep(0.3)
This doesn't really solve your problem, but it's too long for the comments, and it may be useful. I gave it a bash, got defeated on a few fronts - giving up and going for pizza. Audio is really not my thing, but it was quite a lot of fun playing around with it.
Give Pydub a look. I've Played around with a couple of methods, but haven't had any satisfactory success. This answer here explains quite a few things regarding adding two signals together nicely. I assume that the static you have is because of clipping.
Sorry that I didn't deliver, but I may as well post all the things I've created in case you or someone else wants to grab something from it:
#using python 2.7
#example animal sounds from http://www.wavsource.com/animals/animals.htm
#note that those sounds have lots of different sampling rates and encoding types. Causes problems.
#required installs:
#numpy
#scipy
#matplotlib
#pyaudio -sudo apt-get install python-pyaudio
#pydub: -pip install pydub
def example():
"example sounds and random inputs"
sExampleSoundsDir = "/home/roman/All/Code/sound_files"
sExampleFile1 = 'bird.wav'
sExampleFile2 = 'frog.wav'
oJ = Jurgenmeister(sExampleSoundsDir)
#load audio into numpy array
dSound1 = oJ.audio2array(sExampleFile1)
dSound2 = oJ.audio2array(sExampleFile2)
#Simply adding the arrays is noisy...
dResSound1 = oJ.resample(dSound1)
dResSound2 = oJ.resample(dSound2)
dJoined = oJ.add_sounds(dResSound1, dResSound2)
#pydub method
oJ.overlay_sounds(sExampleFile1, sExampleFile2)
#listen to the audio - mixed success with these sounds.
oJ.play_array(dSound1)
oJ.play_array(dSound2)
oJ.play_array(dResSound1)
oJ.play_array(dResSound2)
oJ.play_array(dJoined)
#see what the waveform looks like
oJ.plot_audio(dJoined)
class Jurgenmeister:
"""
Methods to play as many sounds on command as necessary
Named in honour of op, and its as good a name as I can come up with myself.
"""
def __init__(self, sSoundsDir):
import os
import random
lAllSounds = os.listdir(sSoundsDir)
self.sSoundsDir = sSoundsDir
self.lAllSounds = lAllSounds
self.sRandSoundName = lAllSounds[random.randint(0, len(lAllSounds)-1)]
def play_wave(self, sFileName):
"""PyAudio play a wave file."""
import pyaudio
import wave
iChunk = 1024
sDir = "{}/{}".format(self.sSoundsDir, sFileName)
oWave = wave.open(sDir, 'rb')
oPyaudio = pyaudio.PyAudio()
oStream = oPyaudio.open(
format = oPyaudio.get_format_from_width(oWave.getsampwidth()),
channels = oWave.getnchannels(),
rate = oWave.getframerate(),
output = True
)
sData = oWave.readframes(iChunk)
while sData != '':
oStream.write(sData)
sData = oWave.readframes(iChunk)
oStream.stop_stream()
oStream.close()
oPyaudio.terminate()
def audio2array(self, sFileName):
"""
Returns monotone data for a wav audio file in form:
iSampleRate, aNumpySignalArray, aNumpyTimeArray
Should perhaps do this with scipy again, but I threw that code away because I wanted
to try the pyaudio package because of its streaming functions. They defeated me.
"""
import wave
import numpy as np
sDir = "{}/{}".format(self.sSoundsDir, sFileName)
oWave = wave.open(sDir,"rb")
tParams = oWave.getparams()
iSampleRate = tParams[2] #frames per second
iLen = tParams[3] # number of frames
#depending on the type of encoding of the file. Usually 16
try:
sSound = oWave.readframes(iLen)
oWave.close()
aSound = np.fromstring(sSound, np.int16)
except ValueError:
raise ValueError("""wave package seems to want all wav incodings to be in int16, else it throws a mysterious error.
Short way around it: find audio encoded in the right format. Or use scipy.io.wavfile.
""")
aTime = np.array( [float(i)/iSampleRate for i in range(len(aSound))] )
dRet = {
'iSampleRate': iSampleRate,
'aTime': aTime,
'aSound': aSound,
'tParams': tParams
}
return dRet
def resample(self, dSound, iResampleRate=11025):
"""resample audio arrays
common audio sample rates are 44100, 22050, 11025, 8000
#creates very noisy results sometimes.
"""
from scipy import interpolate
import numpy as np
aSound = np.array(dSound['aSound'])
iOldRate = dSound['iSampleRate']
iOldLen = len(aSound)
rPeriod = float(iOldLen)/iOldRate
iNewLen = int(rPeriod*iResampleRate)
aTime = np.arange(0, rPeriod, 1.0/iOldRate)
aTime = aTime[0:iOldLen]
oInterp = interpolate.interp1d(aTime, aSound)
aResTime = np.arange(0, aTime[-1], 1.0/iResampleRate)
aTime = aTime[0:iNewLen]
aResSound = oInterp(aResTime)
aResSound = np.array(aResSound, np.int16)
tParams = list(x for x in dSound['tParams'])
tParams[2] = iResampleRate
tParams[3] = iNewLen
tParams = tuple(tParams)
dResSound = {
'iSampleRate': iResampleRate,
'aTime': aResTime,
'aSound': aResSound,
'tParams': tParams
}
return dResSound
def add_sounds(self, dSound1, dSound2):
"""join two sounds together and return new array
This method creates a lot of clipping. Not sure how to get around that.
"""
if dSound1['iSampleRate'] != dSound2['iSampleRate']:
raise ValueError('sample rates must be the same. Please resample first.')
import numpy as np
aSound1 = dSound1['aSound']
aSound2 = dSound2['aSound']
if len(aSound1) < len(aSound2):
aRet = aSound2.copy()
aRet[:len(aSound1)] += aSound1
aTime = dSound2['aTime']
tParams = dSound2['tParams']
else:
aRet = aSound1.copy()
aRet[:len(aSound2)] += aSound2
aTime = dSound1['aTime']
tParams = dSound1['tParams']
aRet = np.array(aRet, np.int16)
dRet = {
'iSampleRate': dSound1['iSampleRate'],
'aTime': aTime,
'aSound': aRet,
'tParams': tParams
}
return dRet
def overlay_sounds(self, sFileName1, sFileName2):
"I think this method warrants a bit more exploration
Also very noisy."
from pydub import AudioSegment
sDir1 = "{}/{}".format(self.sSoundsDir, sFileName1)
sDir2 = "{}/{}".format(self.sSoundsDir, sFileName2)
sound1 = AudioSegment.from_wav(sDir1)
sound2 = AudioSegment.from_wav(sDir2)
# mix sound2 with sound1, starting at 0ms into sound1)
output = sound1.overlay(sound2, position=0)
# save the result
sDir = "{}/{}".format(self.sSoundsDir, 'OUTPUT.wav')
output.export(sDir, format="wav")
def array2audio(self, dSound, sDir=None):
"""
writes an .wav audio file to disk from an array
"""
import struct
import wave
if sDir == None:
sDir = "{}/{}".format(self.sSoundsDir, 'OUTPUT.wav')
aSound = dSound['aSound']
tParams = dSound['tParams']
sSound = struct.pack('h'*len(aSound), *aSound)
oWave = wave.open(sDir,"wb")
oWave.setparams(tParams)
oWave.writeframes(sSound)
oWave.close()
def play_array(self, dSound):
"""Tried to use use pyaudio to play array by just streaming it. It didn't behave, and I moved on.
I'm just not getting the pyaudio stream to play without weird distortion
when not loading from file. Perhaps you have more luck.
"""
self.array2audio(dSound)
self.play_wave('OUTPUT.wav')
def plot_audio(self, dSound):
"just plots the audio array. Nice to see plots when things are going wrong."
import matplotlib.pyplot as plt
plt.plot(dSound['aTime'], dSound['aSound'])
plt.show()
if __name__ == "__main__":
example()
I also get this error when I use wave. It still works, so I just ignore it.
Problem seems to be widespread. Error lines:
ALSA lib pcm_dsnoop.c:618:(snd_pcm_dsnoop_open) unable to open slave
ALSA lib pcm_dmix.c:1022:(snd_pcm_dmix_open) unable to open slave
ALSA lib pcm.c:2239:(snd_pcm_open_noupdate) Unknown PCM cards.pcm.rear
ALSA lib pcm.c:2239:(snd_pcm_open_noupdate) Unknown PCM cards.pcm.center_lfe
ALSA lib pcm.c:2239:(snd_pcm_open_noupdate) Unknown PCM cards.pcm.side
bt_audio_service_open: connect() failed: Connection refused (111)
bt_audio_service_open: connect() failed: Connection refused (111)
bt_audio_service_open: connect() failed: Connection refused (111)
bt_audio_service_open: connect() failed: Connection refused (111)
ALSA lib pcm_dmix.c:1022:(snd_pcm_dmix_open) unable to open slave
Cannot connect to server socket err = No such file or directory
Cannot connect to server request channel
jack server is not running or cannot be started
Good luck!
I'm trying to create an audio stream that has a constant audio source (in this case, audiotestsrc) to which I can occasionally add sounds from files (of various formats, that's why I'm using decodebin) through the play_file() method. I use an adder for that purpose. However, for some reason, I cannot add the second sound correctly. Not only does the program play the sound incorrectly, it also completely stops the original audiotestsrc. Here's my code so far:
import gst; import gobject; gobject.threads_init()
pipe = gst.Pipeline()
adder = gst.element_factory_make("adder", "adder")
first_sink = adder.get_request_pad('sink%d')
pipe.add(adder)
test = gst.element_factory_make("audiotestsrc", "test")
test.set_property('freq', 100)
pipe.add(test)
testsrc = test.get_pad("src")
testsrc.link(first_sink)
output = gst.element_factory_make("alsasink", "output")
pipe.add(output)
adder.link(output)
pipe.set_state(gst.STATE_PLAYING)
raw_input('Press key to play sound')
def play_file(filename):
adder_sink = adder.get_request_pad('sink%d')
audiofile = gst.element_factory_make('filesrc', 'audiofile')
audiofile.set_property('location', filename)
decoder = gst.element_factory_make('decodebin', 'decoder')
def on_new_decoded_pad(element, pad, last):
pad.link(adder_sink)
decoder.connect('new-decoded-pad', on_new_decoded_pad)
pipe.add(audiofile)
pipe.add(decoder)
audiofile.link(decoder)
pipe.set_state(gst.STATE_PAUSED)
pipe.set_state(gst.STATE_PLAYING)
play_file('sample.wav')
while True:
pass
Thanks to moch on #gstreamer, I realized that all adder sources should have the same format. I modified the above script so as to have the caps "audio/x-raw-int, endianness=(int)1234, channels=(int)1, width=(int)16, depth=(int)16, signed=(boolean)true, rate=(int)11025" (example) go before every input in the adder.