Trying to get the frequencies of a .wav file in Python - python

What I'm trying to do seems simple: I want to know exactly what frequencies there are in a .wav file at given times; i.e. "from the time n milliseconds to n + 10 milliseconds, the average frequency of the sound was x hertz". I have seen people talking about Fourier transforms and Goertzel algorithms, as well as various modules, that I can't seem to figure out how to get to do what I've described.
What I'm looking for is a solution like this pseudocode, or at least one that will do something like what the pseudocode is getting at:
import some_module_that_can_help_me_do_this as freq
file = 'output.wav'
start_time = 1000 # Start 1000 milliseconds into the file
end_time = 1010 # End 10 milliseconds thereafter
print("Average frequency = " + str(freq.average(start_time, end_time)) + " hz")
I don't come from a mathematics background, so I don't want to have to understand the implementation details.

If you'd like to detect pitch of a sound (and it seems you do), then in terms of Python libraries your best bet is aubio. Please consult this example for implementation.
import sys
from aubio import source, pitch
win_s = 4096
hop_s = 512
s = source(your_file, samplerate, hop_s)
samplerate = s.samplerate
tolerance = 0.8
pitch_o = pitch("yin", win_s, hop_s, samplerate)
pitch_o.set_unit("midi")
pitch_o.set_tolerance(tolerance)
pitches = []
confidences = []
total_frames = 0
while True:
samples, read = s()
pitch = pitch_o(samples)[0]
pitches += [pitch]
confidence = pitch_o.get_confidence()
confidences += [confidence]
total_frames += read
if read < hop_s: break
print("Average frequency = " + str(np.array(pitches).mean()) + " hz")
Be sure to check docs on pitch detection methods.
I also thought you might be interested in estimation of mean frequency and some other audio parameters without using any special libraries. Let's just use numpy! This should give you much better insight into how such audio features can be calculated. It's based off specprop from seewave package. Check docs for meaning of computed features.
import numpy as np
def spectral_properties(y: np.ndarray, fs: int) -> dict:
spec = np.abs(np.fft.rfft(y))
freq = np.fft.rfftfreq(len(y), d=1 / fs)
spec = np.abs(spec)
amp = spec / spec.sum()
mean = (freq * amp).sum()
sd = np.sqrt(np.sum(amp * ((freq - mean) ** 2)))
amp_cumsum = np.cumsum(amp)
median = freq[len(amp_cumsum[amp_cumsum <= 0.5]) + 1]
mode = freq[amp.argmax()]
Q25 = freq[len(amp_cumsum[amp_cumsum <= 0.25]) + 1]
Q75 = freq[len(amp_cumsum[amp_cumsum <= 0.75]) + 1]
IQR = Q75 - Q25
z = amp - amp.mean()
w = amp.std()
skew = ((z ** 3).sum() / (len(spec) - 1)) / w ** 3
kurt = ((z ** 4).sum() / (len(spec) - 1)) / w ** 4
result_d = {
'mean': mean,
'sd': sd,
'median': median,
'mode': mode,
'Q25': Q25,
'Q75': Q75,
'IQR': IQR,
'skew': skew,
'kurt': kurt
}
return result_d

I felt the OPs frustration - it shouldnt be so hard to find how to get values of the sprectrogram instead of seeing the spectrogram image if someone needs to:
#!/usr/bin/env python
import librosa
import sys
import numpy as np
import matplotlib.pyplot as plt
import librosa.display
np.set_printoptions(threshold=sys.maxsize)
filename = 'filename.wav'
Fs = 44100
clip, sample_rate = librosa.load(filename, sr=Fs)
n_fft = 1024 # frame length
start = 0
hop_length=512
#commented out code to display Spectrogram
X = librosa.stft(clip, n_fft=n_fft, hop_length=hop_length)
#Xdb = librosa.amplitude_to_db(abs(X))
#plt.figure(figsize=(14, 5))
#librosa.display.specshow(Xdb, sr=Fs, x_axis='time', y_axis='hz')
#If to pring log of frequencies
#librosa.display.specshow(Xdb, sr=Fs, x_axis='time', y_axis='log')
#plt.colorbar()
#librosa.display.waveplot(clip, sr=Fs)
#plt.show()
#now print all values
t_samples = np.arange(clip.shape[0]) / Fs
t_frames = np.arange(X.shape[1]) * hop_length / Fs
#f_hertz = np.arange(N / 2 + 1) * Fs / N # Works only when N is even
f_hertz = np.fft.rfftfreq(n_fft, 1 / Fs) # Works also when N is odd
#example
print('Time (seconds) of last sample:', t_samples[-1])
print('Time (seconds) of last frame: ', t_frames[-1])
print('Frequency (Hz) of last bin: ', f_hertz[-1])
print('Time (seconds) :', len(t_samples))
#prints array of time frames
print('Time of frames (seconds) : ', t_frames)
#prints array of frequency bins
print('Frequency (Hz) : ', f_hertz)
print('Number of frames : ', len(t_frames))
print('Number of bins : ', len(f_hertz))
#This code is working to printout frame by frame intensity of each frequency
#on top line gives freq bins
curLine = 'Bins,'
for b in range(1, len(f_hertz)):
curLine += str(f_hertz[b]) + ','
print(curLine)
curLine = ''
for f in range(1, len(t_frames)):
curLine = str(t_frames[f]) + ','
for b in range(1, len(f_hertz)): #for each frame, we get list of bin values printed
curLine += str("%.02f" % np.abs(X[b, f])) + ','
#remove format of the float for full details if needed
#curLine += str(np.abs(X[b, f])) + ','
#print other useful info like phase of frequency bin b at frame f.
#curLine += str("%.02f" % np.angle(X[b, f])) + ','
print(curLine)

This answer is quite late, but you could try this:
(Note: I deserve very little credit for this since I got most of it from other SO posts and this great article on FFT using Python: https://realpython.com/python-scipy-fft/)
import numpy as np
from scipy.fft import *
from scipy.io import wavfile
def freq(file, start_time, end_time):
# Open the file and convert to mono
sr, data = wavfile.read(file)
if data.ndim > 1:
data = data[:, 0]
else:
pass
# Return a slice of the data from start_time to end_time
dataToRead = data[int(start_time * sr / 1000) : int(end_time * sr / 1000) + 1]
# Fourier Transform
N = len(dataToRead)
yf = rfft(dataToRead)
xf = rfftfreq(N, 1 / sr)
# Uncomment these to see the frequency spectrum as a plot
# plt.plot(xf, np.abs(yf))
# plt.show()
# Get the most dominant frequency and return it
idx = np.argmax(np.abs(yf))
freq = xf[idx]
return freq
This code can work for any .wav file, but it may be slightly off since it only returns the most dominant frequency, and also because it only uses the first channel of the audio (if not mono).
If you want to learn more about how the Fourier transform works, check out this video by 3blue1brown with a visual explanation: https://www.youtube.com/watch?v=spUNpyF58BY

Try something along the below, it worked for me with a sine wave file with a freq of 1234 I generated
from this page.
from scipy.io import wavfile
def freq(file, start_time, end_time):
sample_rate, data = wavfile.read(file)
start_point = int(sample_rate * start_time / 1000)
end_point = int(sample_rate * end_time / 1000)
length = (end_time - start_time) / 1000
counter = 0
for i in range(start_point, end_point):
if data[i] < 0 and data[i+1] > 0:
counter += 1
return counter/length
freq("sin.wav", 1000 ,2100)
1231.8181818181818
edited: cleaned up for loop a bit

Related

Not understanding how movering average implementation works

I have a simple time series and I have a code implementing the moving average:
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
keras = tf.keras
def plot_series(time, series, format="-", start=0, end=None, label=None):
plt.plot(time[start:end], series[start:end], format, label=label)
plt.xlabel("Time")
plt.ylabel("Value")
if label:
plt.legend(fontsize=14)
plt.grid(True)
def trend(time, slope=0):
return slope * time
def seasonal_pattern(season_time):
"""Just an arbitrary pattern, you can change it if you wish"""
return np.where(season_time < 0.4,
np.cos(season_time * 2 * np.pi),
1 / np.exp(3 * season_time))
def seasonality(time, period, amplitude=1, phase=0):
"""Repeats the same pattern at each period"""
season_time = ((time + phase) % period) / period
return amplitude * seasonal_pattern(season_time)
def white_noise(time, noise_level=1, seed=None):
rnd = np.random.RandomState(seed)
return rnd.randn(len(time)) * noise_level
time = np.arange(4 * 365 + 1)
slope = 0.05
baseline = 10
amplitude = 40
series = baseline + trend(time, slope) + seasonality(time, period=365, amplitude=amplitude)
noise_level = 5
noise = white_noise(time, noise_level, seed=42)
series += noise
plt.figure(figsize=(10, 6))
plot_series(time, series)
plt.show()
def moving_average_forecast(series, window_size):
"""Forecasts the mean of the last few values.
If window_size=1, then this is equivalent to naive forecast"""
forecast = []
for time in range(len(series) - window_size):
forecast.append(series[time:time + window_size].mean())
return np.array(forecast)
split_time = 1000
time_train = time[:split_time]
x_train = series[:split_time]
time_valid = time[split_time:]
x_valid = series[split_time:]
moving_avg = moving_average_forecast(series, 30)[split_time - 30:]
plt.figure(figsize=(10, 6))
plot_series(time_valid, x_valid, label="Series")
plot_series(time_valid, moving_avg, label="Moving average (30 days)")
I am not getting this part:
for time in range(len(series) - window_size):
forecast.append(series[time:time + window_size].mean())
return np.array(forecast)
What I do not understand is how series[time:time + window_size] works? Window_size is given into the function and can be a value specifying how many days are considered to calculate the mean, like 5 or 30 days.
When I try something similiar to illustrate this to myself, like
plot(series[time:time + 30]) this does not work.
Furthermore I do not get how len(series) - window_size) works.
debug your code and add some print statements to see how it is responding
Write them down and try to analyze the results
Step back and write a similar code that reproduce the same output
Compare
if it is the same congrats
if it is no then try to run again with timers on and see which one is faster.
if your code is faster the congrats.
Seems like the function moving_average_forecast simply calculates the x day rolling average? If that is the intention then:
The line for time in range(len(series) - window_size): gives you the index time that goes from 0 to some number n where n + 1 is the number of rolling averages you can get out of a time series of size N (i.e. if you have 11 data points and want to calculate 10 day rolling averages, you can get at most 2, here N = 11 = len(series), window_size = 10, so n = 1 and time = [0, 1]
The line series[time:time + window_size] I think should actually be series[time:time + window_size - 1] simply index into your data contained in series and calculate each of the rolling averages (i.e. using our example earlier, in the first iteration time = 0, time + window_size - 1 = 9 so series[time:time + window_size - 1] returns an array with the first 10 data points and so on
Hope that helps.

Change in beat frequency in isochronic tones in python

I just started learning about isochronic tones and started writing a basic python script to generate the same. Following is a script that I have written for doing the following:
Generate a trapezoidal wave of beat frequency
Modulate a sine wave of base frequency, with the trapezoidal wave of beat frequency
#!/usr/bin/env python
# encoding: utf-8
"""
Small program for creating Isochronic Tones of desired base frequency, beat frequency and ramp frequencies
"""
import math
import wave
import struct
import array
import sys
def make_isochronic_wave(beat_freq, beat_ramp_percent, beat_sampl_rate, beat_time, base_freq, amplitude):
#
# The time for which the trapezoidal beat frequency wave is present in the entire cycle
#
up_time = 1 / beat_freq
#
# Gap between consequtive trapezoidal beats
#
gap_percent = up_time * 0.15
#
# To accomodate gaps
#
up_time = up_time * 0.85
#
# Total number of samples
#
data_size = beat_sampl_rate * beat_time
#
# No. of gaps per sec = No. of beats per sec
#
no_of_gaps = beat_freq
#
# Samples per gap = percentage of total time allocated for gaps * No. of samples per sec
#
sampls_per_gap = gap_percent * beat_sampl_rate
#
# Total number of samples in all the gaps
#
gap_sampls = no_of_gaps * sampls_per_gap
#
# Change the beat sample rate to accomodate gaps
#
beat_sampl_rate = beat_sampl_rate - gap_sampls
#
# nsps = Number of Samples Per Second
#
# NOTE: Please see the image at:
#
beat_nsps_defined = beat_sampl_rate * up_time
beat_nsps_inc = beat_nsps_defined * beat_ramp_percent
beat_nsps_dec = beat_nsps_defined * beat_ramp_percent
beat_nsps_stable = beat_nsps_defined - (beat_nsps_inc + beat_nsps_dec)
beat_nsps_undefined = beat_sampl_rate - beat_nsps_defined
#
# Trapezoidal values
#
values = []
#
# Trapezoidal * sine == Isochronic values
#
isoch_values = []
#
# Samples constructed
#
sampls_const = 0
#
# Iterate till all the samples in data_size are constructed
#
while sampls_const < data_size:
prev_sampl_value_inc = 0.0
prev_sampl_value_dec = 1.0
#
# Construct one trapezoidal beat wave (remember this is not the entire sample)
#
for sampl_itr in range(0, int(beat_sampl_rate)):
if sampl_itr < beat_nsps_inc:
value = prev_sampl_value_inc + (1 / beat_nsps_inc)
prev_sampl_value_inc = value
values.append(value)
if sampl_itr > beat_nsps_inc:
if sampl_itr < (beat_nsps_inc + beat_nsps_stable):
value = 1
values.append(value)
elif (sampl_itr > (beat_nsps_inc + beat_nsps_stable)) and (sampl_itr < (beat_nsps_inc + beat_nsps_stable + beat_nsps_dec)):
value = prev_sampl_value_dec - (1 / beat_nsps_dec)
prev_sampl_value_dec = value
values.append(value)
#
# Add the gap cycles
#
for gap_iter in range(0, int(sampls_per_gap)):
values.append(0)
#
# Increment the number of samples constructed to reflect the values
#
sampls_const = sampls_const + beat_nsps_defined + gap_sampls
#
# Open the wave file
#
wav_file = wave.open("beat_wave_%s_%s.wav" % (base_freq, beat_freq), "w")
#
# Define parameters
#
nchannels = 2
sampwidth = 2
framerate = beat_sampl_rate
nframes = data_size
comptype = "NONE"
compname = "not compressed"
wav_file.setparams((nchannels, sampwidth, framerate, nframes, comptype, compname))
#
# Calculate isochronic wave point values
#
value_iter = 0
for value in values:
isoch_value = value * math.sin(2 * math.pi * base_freq * (value_iter / beat_sampl_rate))
value_iter = value_iter + 1
isoch_values.append(isoch_value)
#
# Create the wave file (in .wav format)
#
for value in isoch_values:
data = array.array('h')
data.append(int(value * amplitude / 2)) # left channel
data.append(int(value * amplitude / 2)) # right channel
wav_file.writeframes(data.tostring())
wav_file.close()
try:
base_freq = int(sys.argv[1], 10)
beat_freq = int(sys.argv[2], 10)
sample_rate = int(sys.argv[3], 10)
output_time = int(sys.argv[4], 10)
ramp_percent = float(sys.argv[5])
amplitude = float(sys.argv[6])
make_isochronic_wave(beat_freq, ramp_percent, sample_rate, output_time, base_freq, amplitude)
except:
msg = """
<program> <base freqency> <beat frequency> <sample rate> <output time> <ramp percent> <amplitude>
"""
print (msg)
The above code works fine, and I get a wave of the below format:
The above format being similar to what is generated using audacity using IsoMod plugin. However, I would like to generate a tone which has the beat frequency reduce as a ramp. For this, I have enhanced the above script to call the trapezoidal wave generation in a loop, once every second. However, setting the parameters for wav file for writing multiple times (with changes in data_size due to change in beat_freq across the ramp), I am getting the following error
G:\>python gen_isochronic_tones.py 70 10 5 11025 5 0.15 8000
gen_isochronic_tones.py:195: DeprecationWarning: tostring() is deprecated. Use tobytes() instead.
make_isochronic_wave(beat_freq_start, beat_freq_end, ramp_percent, sample_rate, output_time, base_freq, amplitude)
Traceback (most recent call last):
File "gen_isochronic_tones.py", line 195, in <module>
make_isochronic_wave(beat_freq_start, beat_freq_end, ramp_percent, sample_rate, output_time, base_freq, amplitude)
File "gen_isochronic_tones.py", line 156, in make_isochronic_wave
wav_file.setparams((nchannels, sampwidth, framerate, data_size, comptype, compname))
File "G:\Python37-32\lib\wave.py", line 399, in setparams
raise Error('cannot change parameters after starting to write')
wave.Error: cannot change parameters after starting to write
Looks like wave module allows the paramters (namely data_size above) to be changed only once. Any idea how to make this work for changing data_size?

How to count occurrences of a given frequency in a spectrogram

I have a spectrogram like this and I would like to sum up all the occurrences of a given frequency:
I tried to outline the questions that finally have no sense. I apologize for that.
Well the picture shows the information in dB of a wav file, that I load. The picture is the result of specgram method.
As far as I know I can see 4 main features during the time starting from 0.8 sec. in 7k Hz, 4.5 kz, 2.5 kz, 900 hz . Well, it looks like it maintains during the time so I want to add all this occurrences
My source code right now, is something like this. You can see that I get some freq information but this is not corresponding with the graphic values (in the intervals 7k Hz, 4.5 kz, 2.5 kz, 900 hz )
for i in range(0, int(RATE / CHUNK_SIZE * RECORD_SECONDS)):
# little endian, signed shortdata_chunk
data_chunk = array('h', stream.read(CHUNK_SIZE))
if byteorder == 'big':
data_chunk.byteswap()
data_all.extend(data_chunk)
# Take the fft and square each value
fftData=abs(np.fft.rfft(data_chunk))**2
# find the maximum
which = fftData[1:].argmax() + 1
# use quadratic interpolation around the max
if which != len(fftData)-1:
print "which %f and %f." % (which,which)
y0,y1,y2 = np.log(fftData[which-1:which+2:])
x1 = (y2 - y0) * .5 / (2 * y1 - y2 - y0)
# find the frequency and output it
#== THIS IS NOT the real freq ======================================
#== How can I get the values of the freq ???========================
thefreq = (which+x1)*RATE/CHUNK_SIZE
print "The freq is %f Hz. and %d" % (thefreq,int(thefreq) )
else:
thefreq = which*RATE/CHUNK_SIZE
print "The freq is %f Hz." % (thefreq)
Fs = 16000
f = np.arange(1, 9) * 2000
t = np.arange(RECORD_SECONDS * Fs) / Fs
x = np.empty(t.shape)
for i in range(8):
x[i*Fs:(i+1)*Fs] = np.cos(2*np.pi * f[i] * t[i*Fs:(i+1)*Fs])
w = np.hamming(512)
Pxx, freqs, bins = mlab.specgram(data_all, NFFT=512, Fs=Fs, window=w,
noverlap=464)
#plot the spectrogram in dB
Pxx_dB = np.log10(Pxx)
pyplot.subplots_adjust(hspace=0.4)
pyplot.subplot(311)
ex1 = bins[0], bins[-1], freqs[0], freqs[-1]
pyplot.imshow(np.flipud(Pxx_dB), extent=ex1)
#pyplot.axis('auto')
pyplot.axis('tight')
pyplot.xlabel('time (s)')
pyplot.ylabel('freq (Hz)')
#== EXTRA LOG ======================================
print ("The max number is >>>>>", np.max(Pxx), " - ", np.max(bins))
Pxx_dB = np.log10(Pxx)
print ("The max number is >>>>>", np.max(Pxx_dB))
np.savetxt("./tmp__PXX", Pxx, fmt = '%f')
np.savetxt("./tmp__PXX_dB", Pxx_dB, fmt = '%f')
pyplot.show()
I would like to do something like this that you can find in this other question
Removing specific frequencies between a range, The thing is how can I count all these frequencies.
Thank you.
Using the answer from this question in SO and dumping to a simple matrix, I solved the problem, thank you very much for your help

Inverse Wavelet Transform [/xpost signalprocessing]

Main Problem: How can the scipy.signal.cwt() function be inversed.
I have seen where Matlab has an inverse continuous wavelet transform function which will return the original form of the data by inputting the wavelet transform, although you can filter out the slices you don't want.
MATALAB inverse cwt funciton
Since scipy doesn't appear to have the same function, I have been trying to figure out how to get the data back in the same form, while removing the noise and background.
How do I do this?
I tried squaring it to remove negative values, but this gives me values way to large and not quite right.
Here is what I have been trying:
# Compute the wavelet transform
widths = range(1,11)
cwtmatr = signal.cwt(xy['y'], signal.ricker, widths)
# Maybe we multiple by the original data? and square?
WT_to_original_data = (xy['y'] * cwtmatr)**2
And here is a fully compilable short script to show you the type of data I am trying to get and what I have etc.:
import numpy as np
from scipy import signal
import matplotlib.pyplot as plt
# Make some random data with peaks and noise
def make_peaks(x):
bkg_peaks = np.array(np.zeros(len(x)))
desired_peaks = np.array(np.zeros(len(x)))
# Make peaks which contain the data desired
# (Mid range/frequency peaks)
for i in range(0,10):
center = x[-1] * np.random.random() - x[0]
amp = 60 * np.random.random() + 10
width = 10 * np.random.random() + 5
desired_peaks += amp * np.e**(-(x-center)**2/(2*width**2))
# Also make background peaks (not desired)
for i in range(0,3):
center = x[-1] * np.random.random() - x[0]
amp = 40 * np.random.random() + 10
width = 100 * np.random.random() + 100
bkg_peaks += amp * np.e**(-(x-center)**2/(2*width**2))
return bkg_peaks, desired_peaks
x = np.array(range(0, 1000))
bkg_peaks, desired_peaks = make_peaks(x)
y_noise = np.random.normal(loc=30, scale=10, size=len(x))
y = bkg_peaks + desired_peaks + y_noise
xy = np.array( zip(x,y), dtype=[('x',float), ('y',float)])
# Compute the wavelet transform
# I can't figure out what the width is or does?
widths = range(1,11)
# Ricker is 2nd derivative of Gaussian
# (*close* to what *most* of the features are in my data)
# (They're actually Lorentzians and Breit-Wigner-Fano lines)
cwtmatr = signal.cwt(xy['y'], signal.ricker, widths)
# Maybe we multiple by the original data? and square?
WT = (xy['y'] * cwtmatr)**2
# plot the data and results
fig = plt.figure()
ax_raw_data = fig.add_subplot(4,3,1)
ax = {}
for i in range(0, 11):
ax[i] = fig.add_subplot(4,3, i+2)
ax_desired_transformed_data = fig.add_subplot(4,3,12)
ax_raw_data.plot(xy['x'], xy['y'], 'g-')
for i in range(0,10):
ax[i].plot(xy['x'], WT[i])
ax_desired_transformed_data.plot(xy['x'], desired_peaks, 'k-')
fig.tight_layout()
plt.show()
This script will output this image:
Where the first plot is the raw data, the middle plots are the wavelet transforms and the last plot is what I want to get out as the processed (background and noise removed) data.
Does anyone have any suggestions? Thank you so much for the help.
I ended up finding a package which provides an inverse wavelet transform function called mlpy. The function is mlpy.wavelet.uwt. This is the compilable script I ended up with which may interest people if they are trying to do noise or background removal:
import numpy as np
from scipy import signal
import matplotlib.pyplot as plt
import mlpy.wavelet as wave
# Make some random data with peaks and noise
############################################################
def gen_data():
def make_peaks(x):
bkg_peaks = np.array(np.zeros(len(x)))
desired_peaks = np.array(np.zeros(len(x)))
# Make peaks which contain the data desired
# (Mid range/frequency peaks)
for i in range(0,10):
center = x[-1] * np.random.random() - x[0]
amp = 100 * np.random.random() + 10
width = 10 * np.random.random() + 5
desired_peaks += amp * np.e**(-(x-center)**2/(2*width**2))
# Also make background peaks (not desired)
for i in range(0,3):
center = x[-1] * np.random.random() - x[0]
amp = 80 * np.random.random() + 10
width = 100 * np.random.random() + 100
bkg_peaks += amp * np.e**(-(x-center)**2/(2*width**2))
return bkg_peaks, desired_peaks
# make x axis
x = np.array(range(0, 1000))
bkg_peaks, desired_peaks = make_peaks(x)
avg_noise_level = 30
std_dev_noise = 10
size = len(x)
scattering_noise_amp = 100
scat_center = 100
scat_width = 15
scat_std_dev_noise = 100
y_scattering_noise = np.random.normal(scattering_noise_amp, scat_std_dev_noise, size) * np.e**(-(x-scat_center)**2/(2*scat_width**2))
y_noise = np.random.normal(avg_noise_level, std_dev_noise, size) + y_scattering_noise
y = bkg_peaks + desired_peaks + y_noise
xy = np.array( zip(x,y), dtype=[('x',float), ('y',float)])
return xy
# Random data Generated
#############################################################
xy = gen_data()
# Make 2**n amount of data
new_y, bool_y = wave.pad(xy['y'])
orig_mask = np.where(bool_y==True)
# wavelet transform parameters
levels = 8
wf = 'h'
k = 2
# Remove Noise first
# Wave transform
wt = wave.uwt(new_y, wf, k, levels)
# Matrix of the difference between each wavelet level and the original data
diff_array = np.array([(wave.iuwt(wt[i:i+1], wf, k)-new_y) for i in range(len(wt))])
# Index of the level which is most similar to original data (to obtain smoothed data)
indx = np.argmin(np.sum(diff_array**2, axis=1))
# Use the wavelet levels around this region
noise_wt = wt[indx:indx+1]
# smoothed data in 2^n length
new_y = wave.iuwt(noise_wt, wf, k)
# Background Removal
error = 10000
errdiff = 100
i = -1
iter_y_dict = {0:np.copy(new_y)}
bkg_approx_dict = {0:np.array([])}
while abs(errdiff)>=1*10**-24:
i += 1
# Wave transform
wt = wave.uwt(iter_y_dict[i], wf, k, levels)
# Assume last slice is lowest frequency (background approximation)
bkg_wt = wt[-3:-1]
bkg_approx_dict[i] = wave.iuwt(bkg_wt, wf, k)
# Get the error
errdiff = error - sum(iter_y_dict[i] - bkg_approx_dict[i])**2
error = sum(iter_y_dict[i] - bkg_approx_dict[i])**2
# Make every peak higher than bkg_wt
diff = (new_y - bkg_approx_dict[i])
peak_idxs_to_remove = np.where(diff>0.)[0]
iter_y_dict[i+1] = np.copy(new_y)
iter_y_dict[i+1][peak_idxs_to_remove] = np.copy(bkg_approx_dict[i])[peak_idxs_to_remove]
# new data without noise and background
new_y = new_y[orig_mask]
bkg_approx = bkg_approx_dict[len(bkg_approx_dict.keys())-1][orig_mask]
new_data = diff[orig_mask]
##############################################################
# plot the data and results
fig = plt.figure()
ax_raw_data = fig.add_subplot(121)
ax_WT = fig.add_subplot(122)
ax_raw_data.plot(xy['x'], xy['y'], 'g')
for bkg in bkg_approx_dict.values():
ax_raw_data.plot(xy['x'], bkg[orig_mask], 'k')
ax_WT.plot(xy['x'], new_data, 'y')
fig.tight_layout()
plt.show()
And here is the output I am getting now:
As you can see, there is still a problem with the background removal (it shifts to the right after each iteration), but it is a different question which I will address here.

Easy way to implement a Root Raised Cosine (RRC) filter using Python & Numpy

SciPy/Numpy seems to support many filters, but not the root-raised cosine filter. Is there a trick to easily create one rather than calculating the transfer function? An approximation would be fine as well.
The commpy package has several filters included with it. The order of return variables was switched in an earlier version (as of this edit, current version is 0.7.0). To install, foemphasized textllow instructions here or here.
Here's a use example for 1024 symbols of QAM16:
import numpy as np
from commpy.modulation import QAMModem
from commpy.filters import rrcosfilter
N = 1024 # Number of symbols
os = 8 #over sampling factor
# Create modulation. QAM16 makes 4 bits/symbol
mod1 = QAMModem(16)
# Generate the bit stream for N symbols
sB = np.random.randint(0, 2, N*mod1.num_bits_symbol)
# Generate N complex-integer valued symbols
sQ = mod1.modulate(sB)
sQ_upsampled = np.zeros(os*(len(sQ)-1)+1,dtype = np.complex64)
sQ_upsampled[::os] = sQ
# Create a filter with limited bandwidth. Parameters:
# N: Filter length in samples
# 0.8: Roll off factor alpha
# 1: Symbol period in time-units
# 24: Sample rate in 1/time-units
sPSF = rrcosfilter(N, alpha=0.8, Ts=1, Fs=over_sample)[1]
# Analog signal has N/2 leading and trailing near-zero samples
qW = np.convolve(sPSF, sQ_upsampled)
Here's some explanation of the parameters. N is the number of baud samples. You need 4 times as many bits (in the case of QAM) as samples. I made the sPSF array return with N elements so we can see the signal with leading and trailing samples. See the Wikipedia Root-raised-cosine filter page for explanation of parameter alpha. Ts is the symbol period in seconds and Fs is the number of filter samples per Ts. I like to pretend Ts=1 to keep things simple (unit symbol rate). Then Fs is the number of complex waveform samples per baud point.
If you use return element 0 from rrcosfilter to get the sample time indexes, you need to insert the correct symbol period and filter sample rate in Ts and Fs for the index values to be correctly scaled.
It would be nice to have the root-raised cosine filter standardized in a common package. Here is my implementation in the meantime based on commpy. It vectorized with numpy, and normalized without consideration of the symbol rate.
def raised_root_cosine(upsample, num_positive_lobes, alpha):
"""
Root raised cosine (RRC) filter (FIR) impulse response.
upsample: number of samples per symbol
num_positive_lobes: number of positive overlaping symbols
length of filter is 2 * num_positive_lobes + 1 samples
alpha: roll-off factor
"""
N = upsample * (num_positive_lobes * 2 + 1)
t = (np.arange(N) - N / 2) / upsample
# result vector
h_rrc = np.zeros(t.size, dtype=np.float)
# index for special cases
sample_i = np.zeros(t.size, dtype=np.bool)
# deal with special cases
subi = t == 0
sample_i = np.bitwise_or(sample_i, subi)
h_rrc[subi] = 1.0 - alpha + (4 * alpha / np.pi)
subi = np.abs(t) == 1 / (4 * alpha)
sample_i = np.bitwise_or(sample_i, subi)
h_rrc[subi] = (alpha / np.sqrt(2)) \
* (((1 + 2 / np.pi) * (np.sin(np.pi / (4 * alpha))))
+ ((1 - 2 / np.pi) * (np.cos(np.pi / (4 * alpha)))))
# base case
sample_i = np.bitwise_not(sample_i)
ti = t[sample_i]
h_rrc[sample_i] = np.sin(np.pi * ti * (1 - alpha)) \
+ 4 * alpha * ti * np.cos(np.pi * ti * (1 + alpha))
h_rrc[sample_i] /= (np.pi * ti * (1 - (4 * alpha * ti) ** 2))
return h_rrc
commpy doesn't seem to be released yet. But here is my nugget of knowledge.
beta = 0.20 # roll off factor
Tsample = 1.0 # sampling period, should at least twice the rate of the symbol
oversampling_rate = 8 # oversampling of the bit stream, this gives samples per symbol
# must be at least 2X the bit rate
Tsymbol = oversampling_rate * Tsample # pulse duration should be at least 2 * Ts
span = 50 # number of symbols to span, must be even
n = span*oversampling_rate # length of the filter = samples per symbol * symbol span
# t_step must be from -span/2 to +span/2 symbols.
# each symbol has 'sps' number of samples per second.
t_step = Tsample * np.linspace(-n/2,n/2,n+1) # n+1 to include 0 time
BW = (1 + beta) / Tsymbol
a = np.zeros_like(t_step)
for item in list(enumerate(t_step)):
i,t = item
# t is n*Ts
if (1-(2.0*beta*t/Tsymbol)**2) == 0:
a[i] = np.pi/4 * np.sinc(t/Tsymbol)
print 'i = %d' % i
elif t == 0:
a[i] = np.cos(beta * np.pi * t / Tsymbol)/ (1-(2.0*beta*t/Tsymbol)**2)
print 't = 0 captured'
print 'i = %d' % i
else:
numerator = np.sinc( np.pi * t/Tsymbol )*np.cos( np.pi*beta*t/Tsymbol )
denominator = (1.0 - (2.0*beta*t/Tsymbol)**2)
a[i] = numerator / denominator
#a = a/sum(a) # normalize total power
plot_filter = 0
if plot_filter == 1:
w,h = signal.freqz(a)
fig = plt.figure()
plt.subplot(2,1,1)
plt.title('Digital filter (raised cosine) frequency response')
ax1 = fig.add_subplot(211)
plt.plot(w/np.pi, 20*np.log10(abs(h)),'b')
#plt.plot(w/np.pi, abs(h),'b')
plt.ylabel('Amplitude (dB)', color = 'b')
plt.xlabel(r'Normalized Frequency ($\pi$ rad/sample)')
ax2 = ax1.twinx()
angles = np.unwrap(np.angle(h))
plt.plot(w/np.pi, angles, 'g')
plt.ylabel('Angle (radians)', color = 'g')
plt.grid()
plt.axis('tight')
plt.show()
plt.subplot(2,1,2)
plt.stem(a)
plt.show()
I think the correct response is to generate the desire impulse response. For a raised cosine filter the function is
h(n) = (sinc(n/T)*cos(pi * alpha* n /T)) / (1-4*(alpha*n/T)**2)
Select the number of points for your filter and generate the weights.
output = scipy.signal.convolve(signal_in, h)
This is basically the same function as in CommPy but much smaller in code:
def rcosfilter(N, beta, Ts, Fs):
t = (np.arange(N) - N / 2) / Fs
return np.where(np.abs(2*t) == Ts / beta,
np.pi / 4 * np.sinc(t/Ts),
np.sinc(t/Ts) * np.cos(np.pi*beta*t/Ts) / (1 - (2*beta*t/Ts) ** 2))
SciPy will support any filter. Just calculate the impulse response and use any of the appropriate scipy.signal filter/convolve functions.

Categories

Resources