I have a wav conversation of 2 people(customer and tech support)
I have 3 separate functions that extract 1 voice, cut 10 seconds and transform it to embedding.
def get_customer_voice(file):
print('getting customer voice only')
wav = wf.read(file)
ch = wav[1].shape[1]#customer voice always in 1st track
sr = wav[0]
c1 = wav[1][:,1]
#print('c0 %i'%c0.size)
if ch==1:
exit()
vad = VoiceActivityDetection()
vad.process(c1)
voice_samples = vad.get_voice_samples()
#this is trouble - how to pass it without saving anywhere as wav?
wf.write('%s_customer.wav'%file,sr,voice_samples)
function below cuts 10 seconds of wav file from function above.
import sys
from pydub import AudioSegment
def get_customer_voice_10_seconds(file):
voice = AudioSegment.from_wav(file)
new_voice = voice[0:10000]
file = str(file) + '_10seconds.wav'
new_voice.export(file, format='wav')
if __name__ == '__main__':
if len(sys.argv) < 2:
print('give wav file to process!')
else:
print(sys.argv)
get_customer_voice_10_seconds(sys.argv[1])
how to pass it as wav or other format without saving it to some directory? It's to be used in rest api, i don't know where it will save that wav, so preferably it should be passed somehow.
I figured it out - the function below just works without saving, buffer etc.
It receives a wav file and edits it and just sends straight to the get math embedding function:
def get_customer_voice_and_cutting_10_seconds_embedding(file):
print('getting customer voice only')
wav = read(file)
ch = wav[1].shape[1]
sr = wav[0]
c1 = wav[1][:,1]
vad = VoiceActivityDetection()
vad.process(c1)
voice_samples = vad.get_voice_samples()
audio_segment = AudioSegment(voice_samples.tobytes(), frame_rate=sr,sample_width=voice_samples.dtype.itemsize, channels=1)
audio_segment = audio_segment[0:10000]
file = str(file) + '_10seconds.wav'
return get_embedding(file)
the key is tobytes() in Audio segment, it just assembles all them together in 1 track again
Related
As a personal project, I decided to create one of the reddit text-to-speech bot.
I pulled all the data from reddit with praw
import praw, random
def scrapeData(subredditName):
# Instantiate praw
reddit = praw.Reddit()
# Get subreddit
subreddit = reddit.subreddit(subredditName)
# Get a bunch of posts and convert them into a list
posts = list(subreddit.new(limit=100))
# Get random number
randomNumber = random.randint(0, 100)
# Store post's title and description in variables
postTitle = posts[randomNumber].title
postDesc = posts[randomNumber].selftext
return postTitle + " " + postDesc
Then, I converted it to speech stored in a .mp3 file with gTTS.
from google.cloud import texttospeech
def convertTextToSpeech(textString):
# Instantiate TTS
client = texttospeech.TextToSpeechClient().from_service_account_json("path/to/json")
# Set text input to be synthesized
synthesisInput = texttospeech.SynthesisInput(text=textString)
# Build the voice request
voice = texttospeech.VoiceSelectionParams(language_code = "en-us",
ssml_gender = texttospeech.SsmlVoiceGender.MALE)
# Select the type of audio file
audioConfig = texttospeech.AudioConfig(audio_encoding =
texttospeech.AudioEncoding.MP3)
# Perform the TTS request on the text input
response = client.synthesize_speech(input = synthesisInput, voice =
voice, audio_config= audioConfig)
# Convert from binary to mp3
with open("output.mp3", "wb") as out:
out.write(response.audio_content)
I've created an .mp4 with moviepy that has generic footage in the background with the audio synced over it,
from moviepy.editor import *
from moviepy.video.tools.subtitles import SubtitlesClip
# get vide and audio source files
clip = VideoFileClip("background.mp4").subclip(20,30)
audio = AudioFileClip("output.mp3").subclip(0, 10)
# Set audio and create final video
videoClip = clip.set_audio(audio)
videoClip.write_videofile("output.mp4")
but my issue is I can't find a way to have only the current word or sentence displayed on screen as a subtitle, rather than the entire post.
I am using streamlit
I want to prepare audio dataset, so for that I have a .csv file which contains n numbers of lines which should be spoken by a user and voice of that user should be saved in a folder
Structure of CSV FILE
Sr#
Word
1
Hello
2
World
this file should be loaded and one-by-one all the words should be shown and below that word user should record sound and then press save button to save that audio and then next word appear. I have written the code but it is not working properly
import re
from turtle import onclick
from zlib import DEFLATED
from requests import session
from streamlit_option_menu import option_menu
import streamlit as st
import pandas as pd
import os
import sounddevice as sd
import wavio
def record(duration=5, fs=48000):
sd.default.samplerate = fs
sd.default.channels = 1
myrecording = sd.rec(int(duration * fs))
sd.wait(duration)
return myrecording
def save_record(path_myrecording, myrecording, fs):
wavio.write(path_myrecording, myrecording, fs, sampwidth=2)
return None
def read_audio(file):
with open(file, "rb") as audio_file:
audio_bytes = audio_file.read()
return audio_bytes
st.header("Data Pipeline")
with st.sidebar:
choose = option_menu("Modules", ["1. Recording","2. Preprocess Data","3. Create
Dataset"],
icons=['headset', 'mic', 'clipboard'],
default_index=0)
my_list=['a','b']
if choose=="1. Recording":
st.header("Voice Recording")
st.text("Follow the following steps")
s_name=''
s_name= st.text_input("1. Enter Your Name")
if os.path.isdir("./Recordings/"+s_name):
pass
else:
os.mkdir("./Recordings/"+s_name)
file =st.file_uploader("2. Upload an csv file", type=
['csv'],accept_multiple_files=False,key='file')
# if file:
# df= pd.read_csv(file)
# for i in df['Urdu']:
# my_list.append(i)
show_next = st.button("next", disabled = False)
if "current_index" not in st.session_state:
st.session_state.current_index = 0
st.session_state.load_state=True
# Whenever someone clicks on the button
if show_next and st.session_state.load_state :
# Show next element in list
st.write(my_list[st.session_state.current_index])
Record=st.button('Record', key='rec')
if st.session_state.get('rec')!=True:
st.session_state['rec']=True
if st.session_state['rec']==True:
print('i am here')
record_state = st.text("Recording...")
duration = 1 # seconds
fs = 48000
myrecording = record(duration, fs)
path_myrecording = f"./temp/AMAD.wav"
save_record(path_myrecording, myrecording, fs)
st.audio(read_audio(path_myrecording))
path="./Recordings/"+s_name+"/1.wav"
os.remove(path_myrecording)
save_record(path, myrecording, fs)
st.success("Recording Saved")
# Update and store the index
st.session_state.current_index += 1
if len(my_list) == st.session_state.current_index :
st.session_state.load_state = False
st.session_state
I have 1,440 audio files to feed into a neural network. The problem is that they are not all the same length. I used the answer posted on:
Adding silent frame to wav file using python
but it doesn't seem to work. I wanted to add a few seconds of silence to the end of my files, and then trim them to all be 5 seconds long. Can someone please help me with this?
(I also tried using pysox, but that gives me the This install of SoX cannot process .wav files. error.)
I am using Google Colab for this. The code is:
import wave, os, glob
from pydub import AudioSegment
from pydub.playback import play
path = 'drive/MyDrive/Ravdess/Sad' #This is the folder from my Google Drive which has the audio files
count = 0
for filename in glob.glob(os.path.join(path, '*.wav')):
w = wave.open(filename, 'r')
d = w.readframes(w.getnframes())
frames = w.getnframes()
rate = w.getframerate()
duration = frames/float(rate)
count+=1
print(filename, "count =", count, "duration = ", duration)
audio_in_file = filename
audio_out_file = "out.wav"
new_duration = duration
#Only append silence until time = 5 seconds.
one_sec = AudioSegment.silent(duration=2000) #duration in milliseconds
song = AudioSegment.from_wav(audio_in_file)
final_song = one_sec + song
new_frames = w.getnframes()
new_rate = w.getframerate()
new_duration = new_frames/float(rate)
final_song.export(audio_out_file, format="wav")
print(final_song, "count =", count, "new duration = ", new_duration)
w.close()
This gives the output:
drive/MyDrive/Ravdess/Sad/03-01-04-01-02-01-01.wav count = 1 duration = 3.5035
<pydub.audio_segment.AudioSegment object at 0x7fd5b7ca06a0> count = 1 new duration = 3.5035
drive/MyDrive/Ravdess/Sad/03-01-04-01-02-02-01.wav count = 2 duration = 3.370041666666667
<pydub.audio_segment.AudioSegment object at 0x7fd5b7cbc860> count = 2 new duration = 3.370041666666667
... (and so on for all the files)
Since you are already using pydub, I'd do something like this:
from pydub import AudioSegment
from pydub.playback import play
input_wav_file = "/path/to/input.wav"
output_wav_file = "/path/to/output.wav"
target_wav_time = 5 * 1000 # 5 seconds (or 5000 milliseconds)
original_segment = AudioSegment.from_wav(input_wav_file)
silence_duration = target_wav_time - len(original_segment)
silenced_segment = AudioSegment.silent(duration=silence_duration)
combined_segment = original_segment + silenced_segment
combined_segment.export(output_wav_file, format="wav")
I am attempting to create a program in python that plays a particular harpsichord note when a certain key is pressed. I want it to remain responsive so you can continue to play more notes (kind of like a normal electric piano.) However, because the wav files that the notes are stored in are about 7-10 seconds long I am experiencing some issues. I can press at least 10 keys per second. So, over the duration of one note I could have around 100 different wav files playing at once. I tried to use winsound, but it was unable to play multiple wav files at once. I then moved on to PyAudio and it works kind of. The only way that I found to accomplish what I wanted was this:
from msvcrt import getch
import pyaudio
import wave
import multiprocessing as mp
#This function is just code for playing a sound in PyAudio
def playNote(filename):
CHUNK = 1024
wf = wave.open(filename, 'rb')
p = pyaudio.PyAudio()
stream = p.open(format=p.get_format_from_width(wf.getsampwidth()),
channels=wf.getnchannels(),
rate=wf.getframerate(),
output=True)
data = wf.readframes(CHUNK)
while data != '':
stream.write(data)
data = wf.readframes(CHUNK)
stream.stop_stream()
stream.close()
p.terminate()
if __name__ == "__main__":
while True:
#If the 'a' key is pressed: start a new process that calls playNote
#and pass in the file name for a note.
if ord(getch()) == 97: #a
mp.Process(target=playNote, args=("F:\Project Harpsichord\The wavs\A1.wav",)).start()
#If the 's' key is pressed: start a new process that calls playNote
#and pass in the file name for another note.
if ord(getch()) == 115: #s
mp.Process(target=playNote, args=("F:\Project Harpsichord\The wavs\A0.wav",)).start()
Basically whenever I want to play a new wav, I have to start a new process that runs the code in the playNote function. As I already stated I can potentially have up to 100 of these playing at once. Suffice it to say, one hundred copies of the python interpreter all running at once almost crashed my computer. I also tried a similar approach with multi-threading, but had the same problems.
This post shows a way to mix multiple wav files together so they can be played at the same time, but since my program will not necessarily be starting the sounds at the same time I am unsure if this will work.
I need an efficient way to play multiple notes at the same time. Whether this comes in the form of another library, or even a different language I really don't care.
I checked out pygame like J.F Sebastian suggested. It ended up being exactly what I needed. I used pygame.mixer.Sound() in conjunction with pygame.mixer.set_num_channels(). Here's what I came up with.
import pygame as pg
import time
pg.mixer.init()
pg.init()
a1Note = pg.mixer.Sound("F:\Project Harpsichord\The wavs\A1.wav")
a2Note = pg.mixer.Sound("F:\Project Harpsichord\The wavs\A0.wav")
pg.mixer.set_num_channels(50)
for i in range(25):
a1Note.play()
time.sleep(0.3)
a2Note.play()
time.sleep(0.3)
This doesn't really solve your problem, but it's too long for the comments, and it may be useful. I gave it a bash, got defeated on a few fronts - giving up and going for pizza. Audio is really not my thing, but it was quite a lot of fun playing around with it.
Give Pydub a look. I've Played around with a couple of methods, but haven't had any satisfactory success. This answer here explains quite a few things regarding adding two signals together nicely. I assume that the static you have is because of clipping.
Sorry that I didn't deliver, but I may as well post all the things I've created in case you or someone else wants to grab something from it:
#using python 2.7
#example animal sounds from http://www.wavsource.com/animals/animals.htm
#note that those sounds have lots of different sampling rates and encoding types. Causes problems.
#required installs:
#numpy
#scipy
#matplotlib
#pyaudio -sudo apt-get install python-pyaudio
#pydub: -pip install pydub
def example():
"example sounds and random inputs"
sExampleSoundsDir = "/home/roman/All/Code/sound_files"
sExampleFile1 = 'bird.wav'
sExampleFile2 = 'frog.wav'
oJ = Jurgenmeister(sExampleSoundsDir)
#load audio into numpy array
dSound1 = oJ.audio2array(sExampleFile1)
dSound2 = oJ.audio2array(sExampleFile2)
#Simply adding the arrays is noisy...
dResSound1 = oJ.resample(dSound1)
dResSound2 = oJ.resample(dSound2)
dJoined = oJ.add_sounds(dResSound1, dResSound2)
#pydub method
oJ.overlay_sounds(sExampleFile1, sExampleFile2)
#listen to the audio - mixed success with these sounds.
oJ.play_array(dSound1)
oJ.play_array(dSound2)
oJ.play_array(dResSound1)
oJ.play_array(dResSound2)
oJ.play_array(dJoined)
#see what the waveform looks like
oJ.plot_audio(dJoined)
class Jurgenmeister:
"""
Methods to play as many sounds on command as necessary
Named in honour of op, and its as good a name as I can come up with myself.
"""
def __init__(self, sSoundsDir):
import os
import random
lAllSounds = os.listdir(sSoundsDir)
self.sSoundsDir = sSoundsDir
self.lAllSounds = lAllSounds
self.sRandSoundName = lAllSounds[random.randint(0, len(lAllSounds)-1)]
def play_wave(self, sFileName):
"""PyAudio play a wave file."""
import pyaudio
import wave
iChunk = 1024
sDir = "{}/{}".format(self.sSoundsDir, sFileName)
oWave = wave.open(sDir, 'rb')
oPyaudio = pyaudio.PyAudio()
oStream = oPyaudio.open(
format = oPyaudio.get_format_from_width(oWave.getsampwidth()),
channels = oWave.getnchannels(),
rate = oWave.getframerate(),
output = True
)
sData = oWave.readframes(iChunk)
while sData != '':
oStream.write(sData)
sData = oWave.readframes(iChunk)
oStream.stop_stream()
oStream.close()
oPyaudio.terminate()
def audio2array(self, sFileName):
"""
Returns monotone data for a wav audio file in form:
iSampleRate, aNumpySignalArray, aNumpyTimeArray
Should perhaps do this with scipy again, but I threw that code away because I wanted
to try the pyaudio package because of its streaming functions. They defeated me.
"""
import wave
import numpy as np
sDir = "{}/{}".format(self.sSoundsDir, sFileName)
oWave = wave.open(sDir,"rb")
tParams = oWave.getparams()
iSampleRate = tParams[2] #frames per second
iLen = tParams[3] # number of frames
#depending on the type of encoding of the file. Usually 16
try:
sSound = oWave.readframes(iLen)
oWave.close()
aSound = np.fromstring(sSound, np.int16)
except ValueError:
raise ValueError("""wave package seems to want all wav incodings to be in int16, else it throws a mysterious error.
Short way around it: find audio encoded in the right format. Or use scipy.io.wavfile.
""")
aTime = np.array( [float(i)/iSampleRate for i in range(len(aSound))] )
dRet = {
'iSampleRate': iSampleRate,
'aTime': aTime,
'aSound': aSound,
'tParams': tParams
}
return dRet
def resample(self, dSound, iResampleRate=11025):
"""resample audio arrays
common audio sample rates are 44100, 22050, 11025, 8000
#creates very noisy results sometimes.
"""
from scipy import interpolate
import numpy as np
aSound = np.array(dSound['aSound'])
iOldRate = dSound['iSampleRate']
iOldLen = len(aSound)
rPeriod = float(iOldLen)/iOldRate
iNewLen = int(rPeriod*iResampleRate)
aTime = np.arange(0, rPeriod, 1.0/iOldRate)
aTime = aTime[0:iOldLen]
oInterp = interpolate.interp1d(aTime, aSound)
aResTime = np.arange(0, aTime[-1], 1.0/iResampleRate)
aTime = aTime[0:iNewLen]
aResSound = oInterp(aResTime)
aResSound = np.array(aResSound, np.int16)
tParams = list(x for x in dSound['tParams'])
tParams[2] = iResampleRate
tParams[3] = iNewLen
tParams = tuple(tParams)
dResSound = {
'iSampleRate': iResampleRate,
'aTime': aResTime,
'aSound': aResSound,
'tParams': tParams
}
return dResSound
def add_sounds(self, dSound1, dSound2):
"""join two sounds together and return new array
This method creates a lot of clipping. Not sure how to get around that.
"""
if dSound1['iSampleRate'] != dSound2['iSampleRate']:
raise ValueError('sample rates must be the same. Please resample first.')
import numpy as np
aSound1 = dSound1['aSound']
aSound2 = dSound2['aSound']
if len(aSound1) < len(aSound2):
aRet = aSound2.copy()
aRet[:len(aSound1)] += aSound1
aTime = dSound2['aTime']
tParams = dSound2['tParams']
else:
aRet = aSound1.copy()
aRet[:len(aSound2)] += aSound2
aTime = dSound1['aTime']
tParams = dSound1['tParams']
aRet = np.array(aRet, np.int16)
dRet = {
'iSampleRate': dSound1['iSampleRate'],
'aTime': aTime,
'aSound': aRet,
'tParams': tParams
}
return dRet
def overlay_sounds(self, sFileName1, sFileName2):
"I think this method warrants a bit more exploration
Also very noisy."
from pydub import AudioSegment
sDir1 = "{}/{}".format(self.sSoundsDir, sFileName1)
sDir2 = "{}/{}".format(self.sSoundsDir, sFileName2)
sound1 = AudioSegment.from_wav(sDir1)
sound2 = AudioSegment.from_wav(sDir2)
# mix sound2 with sound1, starting at 0ms into sound1)
output = sound1.overlay(sound2, position=0)
# save the result
sDir = "{}/{}".format(self.sSoundsDir, 'OUTPUT.wav')
output.export(sDir, format="wav")
def array2audio(self, dSound, sDir=None):
"""
writes an .wav audio file to disk from an array
"""
import struct
import wave
if sDir == None:
sDir = "{}/{}".format(self.sSoundsDir, 'OUTPUT.wav')
aSound = dSound['aSound']
tParams = dSound['tParams']
sSound = struct.pack('h'*len(aSound), *aSound)
oWave = wave.open(sDir,"wb")
oWave.setparams(tParams)
oWave.writeframes(sSound)
oWave.close()
def play_array(self, dSound):
"""Tried to use use pyaudio to play array by just streaming it. It didn't behave, and I moved on.
I'm just not getting the pyaudio stream to play without weird distortion
when not loading from file. Perhaps you have more luck.
"""
self.array2audio(dSound)
self.play_wave('OUTPUT.wav')
def plot_audio(self, dSound):
"just plots the audio array. Nice to see plots when things are going wrong."
import matplotlib.pyplot as plt
plt.plot(dSound['aTime'], dSound['aSound'])
plt.show()
if __name__ == "__main__":
example()
I also get this error when I use wave. It still works, so I just ignore it.
Problem seems to be widespread. Error lines:
ALSA lib pcm_dsnoop.c:618:(snd_pcm_dsnoop_open) unable to open slave
ALSA lib pcm_dmix.c:1022:(snd_pcm_dmix_open) unable to open slave
ALSA lib pcm.c:2239:(snd_pcm_open_noupdate) Unknown PCM cards.pcm.rear
ALSA lib pcm.c:2239:(snd_pcm_open_noupdate) Unknown PCM cards.pcm.center_lfe
ALSA lib pcm.c:2239:(snd_pcm_open_noupdate) Unknown PCM cards.pcm.side
bt_audio_service_open: connect() failed: Connection refused (111)
bt_audio_service_open: connect() failed: Connection refused (111)
bt_audio_service_open: connect() failed: Connection refused (111)
bt_audio_service_open: connect() failed: Connection refused (111)
ALSA lib pcm_dmix.c:1022:(snd_pcm_dmix_open) unable to open slave
Cannot connect to server socket err = No such file or directory
Cannot connect to server request channel
jack server is not running or cannot be started
Good luck!
I'm trying to create an audio stream that has a constant audio source (in this case, audiotestsrc) to which I can occasionally add sounds from files (of various formats, that's why I'm using decodebin) through the play_file() method. I use an adder for that purpose. However, for some reason, I cannot add the second sound correctly. Not only does the program play the sound incorrectly, it also completely stops the original audiotestsrc. Here's my code so far:
import gst; import gobject; gobject.threads_init()
pipe = gst.Pipeline()
adder = gst.element_factory_make("adder", "adder")
first_sink = adder.get_request_pad('sink%d')
pipe.add(adder)
test = gst.element_factory_make("audiotestsrc", "test")
test.set_property('freq', 100)
pipe.add(test)
testsrc = test.get_pad("src")
testsrc.link(first_sink)
output = gst.element_factory_make("alsasink", "output")
pipe.add(output)
adder.link(output)
pipe.set_state(gst.STATE_PLAYING)
raw_input('Press key to play sound')
def play_file(filename):
adder_sink = adder.get_request_pad('sink%d')
audiofile = gst.element_factory_make('filesrc', 'audiofile')
audiofile.set_property('location', filename)
decoder = gst.element_factory_make('decodebin', 'decoder')
def on_new_decoded_pad(element, pad, last):
pad.link(adder_sink)
decoder.connect('new-decoded-pad', on_new_decoded_pad)
pipe.add(audiofile)
pipe.add(decoder)
audiofile.link(decoder)
pipe.set_state(gst.STATE_PAUSED)
pipe.set_state(gst.STATE_PLAYING)
play_file('sample.wav')
while True:
pass
Thanks to moch on #gstreamer, I realized that all adder sources should have the same format. I modified the above script so as to have the caps "audio/x-raw-int, endianness=(int)1234, channels=(int)1, width=(int)16, depth=(int)16, signed=(boolean)true, rate=(int)11025" (example) go before every input in the adder.