Updating plot of large arrays pyqtgraph - python

So using a raspberry pi, I am trying to scan a range of frequencies using an rtl-scanner and then plot that information. However, the updating plot that I get runs fairly slow. Is there anyway that I can speed this process up? Here is my code:
#!bin/usr/python
#gathers from the rtl library needed for this program
from rtlsdr import RtlSdr
import math
from pylab import *
import time
from scipy.fftpack import fft, ifft
import pyqtgraph as pg
from pyqtgraph.Qt import QtGui, QtCore
import numpy as np
sdr = RtlSdr()
freq = 5.5e6
num_samples = 1024
##freq_range_lo = 52e6
freq_range_lo = 5.3e6
freq_range_hi = 1766e6
##freq_range_hi = 5.8e6
freq_step = 1e6
sdr.sample_rate = 1e6
sdr.center_freq = freq
sdr.freq_correction =1142
sdr.gain = 49.6
samples = np.array([])
freq_array = []
fft_data = []
app = QtGui.QApplication([])
win = pg.GraphicsWindow(title="Basic plotting examples")
win.resize(1000,600)
win.setWindowTitle('pyqtgraph example: Plotting')
pg.setConfigOptions(antialias=True)
win.nextRow()
p6 = win.addPlot(title="Freq plot")
curve = p6.plot(pen='y')
#data = np.random.normal(size=(5.5,1766))
data = np.random.normal(size=(10,1000))
ptr = 0
def scan(low, hi, freq, freq_step):
global samples, fft_data
for freq in range (int(low), int(hi), int(freq_step)):
samp_val = 0
samples = fft(sdr.read_samples(256))
samples = abs(samples[0::])
for i in samples:
samp_val += samples[i]
fft_data.append(samp_val)
return fft_data
def update():
global curve, data, ptr, p6,samples, fft_data
curve.setData(scan(freq_range_lo, freq_range_hi, freq, freq_step))
if ptr == 0:
p6.enableAutoRange('xy', False)
ptr += 1
del fft_data[:]
timer = pg.QtCore.QTimer()
timer.timeout.connect(update)
timer.start(2)
## Start Qt event loop unless running in interactive mode or using pyside.
if __name__ == '__main__':
import sys
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.instance().exec_()
For the scanner, I am using the setup provided from this link, https://github.com/roger-/pyrtlsdr
Any and all help in getting this sped up would be greatly appreciated.

Related

Going to Specific Location using Neural Network in ROS

i am developing a program that can be used to move the turtlebot to specific location using neural network with lidar sensors.
in this program i have some problem, can move the turtlebot and avoid the obstacle, but they cannot go to specific location that i want.
i dont know where the problem is?
theres no problem when compile and run this program (if u talking about the software i used) iam sure that the problem is in my code
import rospy
import random
import numpy as np
import tensorflow as tf
from geometry_msgs.msg import Twist, Point
from sensor_msgs.msg import LaserScan
from nav_msgs.msg import Path
from nav_msgs.msg import Odometry
from geometry_msgs.msg import PoseStamped
from math import atan2
from tf.transformations import euler_from_quaternion
x = 0.0
y = 0.0
theta = 0.0
laser_range = np.array([])
def newOdom(msg):
global x
global y
global theta
x = msg.pose.pose.position.x
y = msg.pose.pose.position.y
rot_q = msg.pose.pose.orientation
(roll,pitch,theta) = euler_from_quaternion ([rot_q.x,rot_q.y,rot_q.z,rot_q.w])
def scan_callback(msg):
global laser_range
laser_range = np.expand_dims(np.array(msg.ranges[:30] + msg.ranges[-30:]), axis = 0)
laser_range[laser_range == np.inf] = 3.5
if __name__ == "__main__":
scan_sub = rospy.Subscriber('scan', LaserScan, scan_callback)
move = rospy.Publisher('cmd_vel', Twist, queue_size = 1)
sub = rospy.Subscriber('/odom',Odometry, newOdom)
rospy.init_node('obstacle_avoidance_tf')
state_change_time = rospy.Time.now()
rate = rospy.Rate(10)
goal = Point()
goal.x = 1
goal.y = 1
model = tf.keras.models.load_model('catkin_ws/src/turtlebot3_neural_network/src/model.hdf5')
while not rospy.is_shutdown():
inc_x = goal.x - x
inc_y = goal.y - y
angle_to_goal = atan2(inc_y,inc_x)
predictions = model.predict(laser_range)
action = np.argmax(predictions)
twist = Twist()
if action == 0 and abs(angle_to_goal - theta) > 0.1:
twist.linear.x = 0.3
else:
twist.angular.z = 0.3
move.publish(twist)
print(x)
print(y)
rate.sleep()

Getting "keyerror: 0" when running my code

I am getting the below error when I try to plot a short time energy function, please I need your help solve this problem.
Code:
import os
from tqdm import tqdm
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from scipy.io import wavfile
from python_speech_features import mfcc
from python_speech_features import logfbank
import librosa
def plot_ste(ste):
fig, axes = plt.subplots(nrows=1, ncols=1, sharex=False,
sharey=False, figsize=(400, 50))
fig.suptitle('Short Time Energy', size=100, y=1.02)
i = 0
for x in range(1):
for y in range(1):
data = list(ste.values())[i]
x, win = data[0], data[1]
axes[x,y].set_title(list(ste.keys())[i])
axes[x,y].plot(win, x)
axes[x,y].get_xaxis().set_visible(False)
axes[x,y].get_yaxis().set_visible(False)
i+=1
def ste(x, win):
"""Compute short-time energy."""
if isinstance(win, str):
win = scipy.signal.get_window(win, max(1, len(x) // 8))
win = win / len(win)
return scipy.signal.convolve(x**2, win**2, mode="same")
df = pd.read_csv('/dir/to/a.csv')
df.set_index('fname', inplace=True)
classes = list(np.unique(df.ID))
df.reset_index(inplace=True)
ste = {}
for c in classes:
wav_file = df[df.ID==c].iloc[0, 0]
signal, rate = librosa.load('/dir/to/wav_file')
ste[c] = ste
plot_ste(ste)
plt.show()
Error:
File "/home/Desktop/Program/stft_plot_full_Dir.py", line 35, in plot_ste
x, win = data[0], data[1]
KeyError: 0

Similarity Index between 2 spectrograms

So I want to get the similarity index between 2D arrays representing 2 singers' voices.
I read my mp3 files with pydub.
AudioFunctions.py
from pydub import AudioSegment
class SongData():
def __init__(self, path):
self.audio = AudioSegment.from_file(path).set_channels(1)
self.rate, self.data = self.audio.frame_rate, np.array(self.audio.get_array_of_samples())
self.length = len(self.data)
self.duration = self.audio.duration_seconds
self.time = np.linspace(0, self.duration, self.length)
self.freq = np.linspace(0, self.rate / 2, int(self.length / 2))
self.fftArray = fft(self.data)
self.fftArrayPositive = self.fftArray[:self.length // 2]
self.fftArrayNegative = np.flip(self.fftArray[self.length // 2:])
self.fftArrayAbs = np.abs(self.fftArray)
self.fftPlotting = self.fftArrayAbs[: self.length // 2]
def song2data(path):
songClass = SongData(path)
return songClass
def getFirstData(songArr, time):
selectedData = songArr[:int(time*44100)]
return selectedData
And this is my code to get the data of 2 songs and get their spectrograms...
main.py
from AudioFunctions import *
from scipy import signal
import matplotlib.pyplot as plt
import librosa
import sklearn
from scipy import spatial
from sklearn.metrics.pairwise import cosine_similarity
songClass1 = song2data("sia1.mp3")
songClass2 = song2data("sia2.mp3")
# print(songClass.data)
# print(songClass.rate)
# print(songClass.duration)
# print(songClass.length)
songArray = getFirstData(songClass1.data, 120)
songArray2 = getFirstData(songClass2.data, 120)
frequencies, times, spectrogram = signal.spectrogram(songArray, 44100)
frequencies2, times2, spectrogram2 = signal.spectrogram(songArray2, 44100)
# print(frequencies)
spec = spectrogram.flatten()
spec2 = spectrogram2.flatten()
result = 1 - spatial.distance.cosine(spec, spec2)
print(result)
The result represents a simliarity index between the 2 voices. However, it gives me low number (0.133) when comparing between 2 songs of the same singer(Sia).
Song 1: https://drive.google.com/file/d/1svV0Ry_lNaEA9Z8c61t3S25XCSgIv6sW/view
Song 2:https://drive.google.com/file/d/1ToKQo2MERBbxZezqDcEtEE2dhgmH-wus/view
Is there any problem in my logic ? Or this result could be logic for some cases?
Thanks in advance
Bit of a necro answer as yes in terms of spectrogram that is a time line of frequencies played by intensity the 2 different songs will be totally different and yes your logic is flawed

python real time plot of serial port data has huge lag

I am trying to read from serial port and plot the data in graph using matplot.
Following is my code :
I see that because of plot, there is huge lag (data in queue goes up to 10000 bytes) hence i dont see real time plot coming. Can you please help me if i am doing anything wrong.
<
import serial # import Serial Library
import numpy # Import numpy
import matplotlib.pyplot as plt
Accelerometer= []
COM_read = serial.Serial('COM5', 9600, timeout=None,parity='N',stopbits=1,rtscts=0) #Creating our serial object name
plt.ion() #Tell matplotlib you want interactive mode to plot live data
cnt=0
COM_read.flushInput()
COM_read.flushOutput()
def makeFig(): #Create a function that makes our desired plot
plt.title('My Live Streaming Sensor Data') #Plot the title
plt.grid(True) #Turn the grid on
plt.ylabel('Acc in g') #Set ylabels
plt.plot(Accelerometer, 'ro-', label='Accelerometer g') #plot the temperature
plt.legend(loc='upper left') #plot the legend
plt.ylim(15000,30000) #Set limits of second y axis- adjust to readings you are getting
print "came through"
while True: # While loop that loops forever
print COM_read.inWaiting()
while (COM_read.inWaiting()==0): #Wait here until there is data
pass #do nothing
s = COM_read.readline() #read the line of text from the serial port
decx = int(s[0:4],16)
decy = int(s[5:9],16)
decz = int(s[10:14],16)
if decx > 32768:
decx = decx - 65536;
if decy > 32768:
decy = decy - 65536;
if decz > 32768:
decz = decz - 65536;
#print decx,decy,decz
res = ((decx*decx) + (decy*decy) + (decz*decz))**(1.0/2.0)
Accelerometer.append(res) #Build our Accelerometer array by appending temp readings
drawnow(makeFig) #Call drawnow to update our live graph
plt.pause(.000001) #Pause Briefly. Important to keep drawnow from crashing
cnt=cnt+1
if(cnt>50): #If you have 50 or more points, delete the first one from the array
Accelerometer.pop(0) #This allows us to just see the last 50 data points
>
----------------Based on Dr.John's suggestion, the code is written as follows -----
import serial
import matplotlib
import threading, time
import pylab
import matplotlib.pyplot as plt
import matplotlib.animation as anim
class MAIN:
#ser =0;
def __init__(self):
self.res = 0.0
self.ser = serial.Serial('COM5', 9600, timeout=None,parity='N',stopbits=1,rtscts=0)
self.ser.flushInput()
elf.ser.flushOutput()
c = self.ser.read(1);
while(c != '\n'):
=self.ser.read(1)
return
def acquire_data(self):
s = self.ser.readline()
decx = int(s[0:4],16)
decy = int(s[5:9],16)
decz = int(s[10:14],16)
if decx > 32768:
decx = decx - 65536;
if decy > 32768:
decy = decy - 65536;
if decz > 32768:
decz = decz - 65536;
self.res = ((decx*decx) + (decy*decy) + (decz*decz))**(1.0/2.0)
print self.res
return
ex = MAIN()
threading.Timer(2,ex.acquire_data).start() #starts function acquire_data every 2 sec and resets self.res with 0.5Hz
fig, ax = plt.subplots()
ax.plot(time.time(), self.res, 'o-')
print "closed"
A while true loop is a bad idea in most cases.
Non-OO-Programming is a bad idea in most cases.
Appending plot-data to lists is a bad idea in most cases (laggy).
Start the data aquisition at defined times with
import threading, time
threading.Timer(2, acquire_data).start() #starts function acquire_data every 2 sec and resets self.res with 0.5Hz
then instead of the while-loop, please define class based functions for your own benefit:
class MAIN:
def __init__(self):
self.res = 0
def acquire_data(self):
....
return self.res
and plot with
fig, ax = plt.subplots()
ax.plot(time.time(), self.res, 'o-')
Greets Dr. Cobra

Memory allocated to Python is not released back in Linux even after gc.collect()

I have written code within Python that doesn't release memory the way it should. The memory is taken by Python but never gets released even after not being used anymore. Even if you break the running program with ctrl+c. Delete the variable and run gc.collect() it doesn't seem to collect. Or the same as in Ipython and running %reset. The memory won't be freed and running gc.collect() has no effect. I tested this in Windows because I wanted to see if it could possibly be with the garbage collector library. It appears that is the case. Run the code below in Linux and then also in windows. Then compare the memory usage. You will need numpy and scipy installed. Any help or insight on this issue would be much appreciated.
Import the Model, create an instance, and then run createSpecific().
Here is a code that exhibits this behavior in Ubuntu 10.04:
from numpy import array, maximum,intersect1d, meshgrid, std, log, log10, zeros, ones, argwhere, abs, arange, size, copy, sqrt, sin, cos, pi, vstack, hstack, zeros, exp, max, mean, savetxt, loadtxt, minimum, linspace, where
from numpy.fft import fft
from scipy.stats import f_oneway, kruskal, sem, scoreatpercentile
#import matplotlib
#matplotlib.use('cairo.pdf')
from matplotlib.pyplot import plot, clf, show, cla, xlim, xscale, imshow, ylabel, xlabel, figure, savefig, close, bar, title, xticks, yticks, axes, axis
from matplotlib.axes import Axes
from mpl_toolkits.mplot3d import Axes3D
#from enthought.mayavi import mlab
from matplotlib import cm
import matplotlib.pyplot as plt
import os
from time import clock
from timeit import Timer
class Model:
#Constructors and default includes
def __init__(self, prevAud = None, debug=False):
if (prevAud == None):
self.fs=16000. #sample rate
self.lowFreq=60.
self.hiFreq=5000.
self.numFilt=300 #number of channel
self.EarQ = 9.26449 #9.26449
self.minBW = 24.7 #24.7
self.integrationWindow=.01
self.sliceAt=.035
self.maxOverallInhibit = 0.1
self.winLen = int(self.fs*self.integrationWindow+.01) #default integration window 10 ms
self.fullWind = 0.300
self.outShortWindow = None
self.siderArray = None
self.maxNormalizeValue = .284 # Optimized at .284
self.outputSemiModel = None
self.semitones = 11
self.activationTrace = None
return
def setErbScale(self, erbScale = None):
if (erbScale ==None):
self.erbScale = arange(100,500,5)
else:
self.erbScale = erbScale
def trainModel(self,soundVec=None, fs=None, lowfreq=None, highfreq=None, numfilt=None, figto=0, savefig = 'N', prompts=False, plotter=False):
self.setErbScale()
templateArray = self.intWindow(self.halfWaveRec(self.creGammatone(soundVec)))
for i in xrange(templateArray[0].size):
self.outerTest(self.innerTest(templateArray[:,i]))
return templateArray
def createSpecific(self, freqArray = None, semitones = 11, timeforHarm = .3, soundVec=None, fs=None, lowfreq=None, highfreq=None, numfilt=None, figto=0, saveData='N', fileDir='TempRunT/', prompts=False, plotter=False):
if (freqArray == None):
self.setErbScale()
freqArray = self.erbScale
if (type(semitones) == int):
semitones = arange(semitones+1)
totalRuns = int(timeforHarm/self.integrationWindow+.001)
inhibitWindowArray = zeros((freqArray.size,(semitones.size),self.numFilt,totalRuns))
for x in xrange(freqArray.size):
tempHarm = self.makeHarmonicAmpMod(freqArray[x],timeforHarm, numHarm=7,modulation=10)
for y in semitones:
tempChord = self.makeSemiChordAmpMod(tempHarm, freqArray[x],timeforHarm,modulation=10,numHarm=7,semi=y)
inhibitWindowArray[x,y] = self.trainModel( tempChord, savefig = 'N', plotter=plotter)
self.inhibitWindowArray = inhibitWindowArray
def creGammatone(self, soundVec):
temp = zeros((300,soundVec.size))
for i in xrange(temp[:,0].size):
temp[i] = -1**i*soundVec
return temp
def halfWaveRec(self, halfWaveFilts):
filtShape = halfWaveFilts.shape
if (filtShape[1] != int(self.fs*self.fullWind)):
halfWaveFilts = hstack((halfWaveFilts,zeros((self.numFilt,int(self.fs*self.fullWind)-filtShape[1]))))
temp = zeros((halfWaveFilts[:,0].size,halfWaveFilts[0].size))
halfWaveFilts = maximum(halfWaveFilts,temp)
del temp
return halfWaveFilts
def intWindow(self, integratedFilts):
winlen = self.winLen
length = integratedFilts[0].size/winlen
mod = integratedFilts[0].size%winlen
outShortWindow = zeros((integratedFilts[:,0].size,length))
meanval = 0
if (mod != 0):
for i in xrange(integratedFilts[:,0].size):
mean(integratedFilts[i,0:-mod].reshape(length,winlen),1,out=outShortWindow[i])
else:
for i in xrange(integratedFilts[:,0].size):
mean(integratedFilts[i].reshape(length,winlen),1,out=outShortWindow[i])
del integratedFilts
return outShortWindow
def innerTest(self, window):
temper = copy(window)
sider = 7
st = .04
sizer = temper.size
inhibVal = 0
for j in xrange(sider):
inhibVal = (temper[0:j+sider+1].sum())*(sider*2+1)/(sider+1+j)
window[j] += - st*(inhibVal)
for j in xrange(sider,sizer - sider):
inhibVal = temper[j-sider:j+sider+1].sum()
window[j] += - st*(inhibVal)
for j in xrange(sizer-sider, sizer):
inhibVal = (temper[j-sider:sizer].sum())*(sider*2+1)/(sider+sizer-j)
window[j] += - st*(inhibVal)
maxsub = max(window) * self.maxOverallInhibit
window += - maxsub
del temper
return window
def outerTest(self, window):
newSatValue = scoreatpercentile(window, (76))
numones = where(window > newSatValue)
window[numones]=1
self.maxSatValue = newSatValue
del numones
return window
def makeHarmonicAmpMod(self, freq = 100, time = 1.,modulation=10, fsamp=None, numHarm=7):
if fsamp == None: fsamp = self.fs
samples = arange(time*fsamp)
signal = 0
for x in xrange(1,(numHarm+1),1):
signal = signal + sin(samples/float(fsamp)*x*freq*2*pi)
signal = (signal)*maximum(zeros(time*fsamp),sin((samples/float(fsamp)*modulation*2*pi)))
return signal
def makeSemiChordAmpMod(self, harmVec = None, freq=100, time = 1., modulation=10, fsamp=None, numHarm=7, semi = 2):
if (harmVec == None): harmVec = self.makeHarmonicAmpMod(freq,time,modulation,fsamp,numHarm)
if (semi == 0): return harmVec
return harmVec + self.makeHarmonicAmpMod(freq*(2**(semi/12.)),time,modulation,fsamp,numHarm)
Virtual memory is not a scarce resource. There is no need to return it to the system since each process has its own address space. What is your actual problem? What issue is this behavior causing you?
I had installed the latest svn of numpy and the issue had vanished. I assume it was inside one of the numpy functions. I never got a chance to dig further into it.

Categories

Resources