How to use python block to remove white noise? - python

I want to design a python block to remove the white noise of my received signal
As shown in the red circle:
So I first tested on Spyder whether my code can remove noise normally
I got the result as pictured:
Just when I finished the test, I wanted to transfer it to the python block, but it couldn't execute normally, causing a crash
Below is the test program on my python block and spyder:
i = 0
j = 0
t = 0
data=[]
buf=[]
wav = numpy.fromfile(open('C:/Users/user/Desktop/datas'), dtype=numpy.uint8)
for i in range(int(len(wav)/10)):
for j in range(10):
buf.append(wav[(i*10)+j])
if (buf[j]<=180)and(buf[j]>=90):
t = t+1
if t < 6:
data = numpy.append(data,buf)
# else:
# data = numpy.append(data,numpy.zeros(10))
t= 0
j = 0
buf.clear()
"""
Embedded Python Blocks:
Each time this file is saved, GRC will instantiate the first class it finds
to get ports and parameters of your block. The arguments to __init__ will
be the parameters. All of them are required to have default values!
"""
import numpy as np
from gnuradio import gr
i = 0
j = 0
t = 0
data=[]
buf=[]
class blk(gr.sync_block): # other base classes are basic_block, decim_block, interp_block
"""Embedded Python Block example - a simple multiply const"""
def __init__(self): # only default arguments here
"""arguments to this function show up as parameters in GRC"""
gr.sync_block.__init__(
self,
name='noise out', # will show up in GRC
in_sig=[np.float32],
out_sig=[np.float32]
)
# if an attribute with the same name as a parameter is found,
# a callback is registered (properties work, too).
def work(self, input_items, output_items):
"""example: multiply with constant"""
np.frombuffer(input_items, dtype=np.uint8)
for i in range(int((len(input_items[0]))/10)):
for j in range(10):
buf.append(input_items[0][(i*10)+j])
if (buf[j]<=180)and(buf[j]>=90):
t = t+1
if t < 6:
data = numpy.append(data,buf)
else:
data = numpy.append(data,numpy.zeros(10))
t= 0
j = 0
buf.clear()
for i in range(len(output_items[0])):
output_items[0][i]=data[i]
return len(output_items[0])
What should I do to modify it if I want it to run normally?

Related

Sending and receiving a signal at the same time

I’m working in python on a raspberry pi. I’m trying to send out a signal on a motor controller, and then receive a signal with a sensing hat after it pass through my plant (an RC filter in this case).
The important thing is I want to generate the output and read the input as close to simultaneously as possible. I was hoping to use multiprocessing to have a thread send the signal while the other read the incoming signal. But I keep getting confused on how threads work in python.
In short is it possible to do 2 different tasks with multiprocessing and then repeat those tasks (sending and reading a signal) until a condition is met. (like in a while loop)
(Edited with Code)
from __future__ import print_function
from PyQt5.QtWidgets import QAction
from pyqtgraph.Qt import QtGui, QtCore
from adafruit_motorkit import MotorKit
import pyqtgraph as pg
import sys
from sys import stdout
import numpy as np
from daqhats import mcc118, OptionFlags, HatIDs, HatError
from daqhats_utils import select_hat_device, enum_mask_to_string, \
chan_list_to_mask
from decimal import *
import math
import time
getcontext().prec = 3
total_samples_read = 0
READ_ALL_AVAILABLE = -1
channelData = np.zeros(4, dtype=float)
CURSOR_BACK_2 = '\x1b[2D'
ERASE_TO_END_OF_LINE = '\x1b[0K'
# for plotting data
########################################
scan_rate = 1000 # scan rate in hz
maxtime = 30 # second s to run for
Datatime = np.zeros(maxtime * scan_rate, dtype=float)#List of times when smaples are taken
Data1 = np.zeros(maxtime * scan_rate, dtype=float) #sampels taken
data_index = 0 # Maximum index of data points taken
dt = Decimal(1 / scan_rate) # difference in time between indexes of Datatime
display_index = 0 # maximum index of Data being displayed on plot
#################################
# variables for Data logger
##########################
is_scanning = False
channels = [0]
channel_mask = chan_list_to_mask(channels)
num_channels = len(channels)
samples_per_channel = 0
options = OptionFlags.CONTINUOUS
######################################
startedTime = 0 # time at program start
myTime = 0 # time since program started
try:
address = select_hat_device(HatIDs.MCC_118)
hat = mcc118(address)
except (HatError, ValueError) as err:
print('\n', err)
class MainWindow(pg.GraphicsWindow):
def __init__(self, *args, **kwargs):
super(pg.GraphicsWindow, self).__init__(*args, **kwargs)
self.delay = 30 #ms
self.quit = QAction("Quit", self)
self.quit.triggered.connect(self.clean_close)
self.timer = QtCore.QTimer()
self.timer.setInterval(self.delay)
self.timer.timeout.connect(self.update_plot)
# plots data and runs calibrate between trials
def update_plot(self):
global display_index, Datatime, Data1
kit.motor1.throttle = .4 + .2 * math.cos((time.time()-startedTime)* 2 * np.pi* 1) # 1hz sinusiod out of motor
if data_index < len(Data1):
Collect_Data()
plot.setXRange(0, 20, padding=0)
plot.setXRange(0, 20, padding=0)
curve.setData(Datatime[:display_index], Data1[:display_index])
display_index += 1
app.processEvents()
def clean_close(self):
self.close()
# starts data collection
def Collect_Data():
global is_scanning
"""
This function is executed automatically when the module is run directly.
"""
# Store the channels in a list and convert the list to a channel mask that
# can be passed as a parameter to the MCC 118 functions.
try:
# Select an MCC 118 HAT device to use.
# actual_scan_rate = hat.a_in_scan_actual_rate(num_channels, scan_rate)
# Configure and start the scan.
# Since the continuous option is being used, the samples_per_channel
# parameter is ignored if the value is less than the default internal
# buffer size (10000 * num_channels in this case). If a larger internal
# buffer size is desired, set the value of this parameter accordingly.
if not is_scanning:
hat.a_in_scan_start(channel_mask, samples_per_channel, scan_rate,
options)
is_scanning = True
try:
read_and_display_data(hat, num_channels)
except KeyboardInterrupt:
# Clear the '^C' from the display.
print(CURSOR_BACK_2, ERASE_TO_END_OF_LINE, '\n')
print('Stopping')
hat.a_in_scan_stop()
hat.a_in_scan_cleanup()
except (HatError, ValueError) as err:
print('\n', err)
# reads Data off of Hat and adds to Data1
def read_and_display_data(hat, num_channels):
global channelData, data_index, Datatime, Data1
total_samples_read = 0
read_request_size = READ_ALL_AVAILABLE
# When doing a continuous scan, the timeout value will be ignored in the
# call to a_in_scan_read because we will be requesting that all available
# samples (up to the default buffer size) be returned.
timeout = 5.0
# Read all of the available samples (up to the size of the read_buffer which
# is specified by the user_buffer_size). Since the read_request_size is set
# to -1 (READ_ALL_AVAILABLE), this function returns immediately with
# whatever samples are available (up to user_buffer_size) and the timeout
# parameter is ignored.
trigger = True
while trigger == True:
read_result = hat.a_in_scan_read(read_request_size, timeout)
# Check for an overrun error
if read_result.hardware_overrun:
print('\n\nHardware overrun\n')
break
elif read_result.buffer_overrun:
print('\n\nBuffer overrun\n')
break
samples_read_per_channel = int(len(read_result.data) / num_channels)
total_samples_read += samples_read_per_channel
# adds all data in buffer to data to be plotted.
count = 0
if samples_read_per_channel > 0:
index = samples_read_per_channel * num_channels - num_channels
while count < samples_read_per_channel:
for i in range(num_channels):
channelData[i] = read_result.data[index + i]
if data_index < len(Data1):
Data1[data_index] = channelData[0]
Datatime[data_index] = float(dt * Decimal(data_index))
data_index += 1
count += 1
trigger = False
stdout.flush()
if __name__ == '__main__':
app = QtGui.QApplication([])
win = MainWindow() # display window
plot = win.addPlot(1, 0)
curve = plot.plot()
win.show()
kit = MotorKit() # implements motor driver
kit.motor1.throttle = .4 # values 1 is 5v and 0 is 0 volts
startedTime = time.time()
# u = .2*math.cos(t * 2*np.pi*1)
win.timer.start()
sys.exit(app.exec_())

multithread communication in python3.6 freeze

I'm trying to develop a multithread function in python 3.6 and sometime my code freeze. from my tests I think that the problem come from os.write() or os.read(), but I don't know why.
here is my code (I don't think that partialTransform() cause the freeze but I put it to understand the code):
def naiveTransform(netData,**kwargs):
#parralelisable part
def partialTransform(debut, fin) :
for i in range(debut, fin) :
j = 0
#calcul of all the distances :
while j < nbrPoint :
distance[j] = euclidianDistance(netData[i], netData[j])
j += 1
#construction of the graph :
j = 0
del distance[i]
while j < k :
nearest = min(distance, key=distance.get)
del distance[nearest] #if k > 1 we don't want to get always the same point.
graph.append([i, nearest])
j += 1
return graph
k = kwargs.get('k', 1) # valeur par défault à definir.
nbrCore = kwargs.get('Core', 1)
nbrPoint = len(netData)
nbrPointCore = nbrPoint//nbrCore
distance = dict()
graph = []
#pipes
r = [-1]*nbrCore
w = [-1]*nbrCore
pid = [-1]*nbrCore
for i in range(nbrCore):
r[i], w[i] = os.pipe()
try:
pid[i] = os.fork()
except OSError:
exit("Could not create a child process\n")
if pid[i] == 0:
if i < nbrCore-1 :
g = partialTransform(i*nbrPointCore, (i+1)*nbrPointCore)
else :
g = partialTransform(i*nbrPointCore, nbrPoint) #to be sure that there is not a forgoten point.
print("write in " + str(i))
import sys
print(sys.getsizeof(g))
os.write(w[i], pickle.dumps(g))
print("exit")
exit()
for i in range(nbrCore):
print("waiting " + str(i))
finished = os.waitpid(pid[i], 0)
print("received")
graph += pickle.loads(os.read(r[i], 250000000))
return graph
When the argument k is superior or equal to 5 the code freeze after the
print(sys.getsizeof(g))
For my example case when k = 4 the size is of 33928 and for k = 5 the size is of 43040 so I don't think that it's the problem ?
The number of core used don't seem to have any influence on the freeze.
I'm still a beginner in python so it may be something obvious but I didn't find any similar problem on internet. Do you have any idea of what could cause theses freeze ?
Pipes have limited size buffers and the child will block writing the pipe until the parent reads it. But the parent is waiting for the child to exit, so you hang. You can avoid the buffer limit by writing the object to a temporary file instead. The data will be in the operation system file cache when the parent reads so it will still be fast.
There is a trick in all this. The parent needs to convince libc to re-examine the file after the child writes it or the read will just be satisfied by its 0 length internal cache. You can do that with a seek.
import tempfile
def naiveTransform(netData,**kwargs):
// *** code removed for example ***
# files
tmp = [tempfile.TemporaryFile() for _ in range(nbrCore)]
pid = [-1]*nbrCore
for i in range(nbrCore):
try:
pid[i] = os.fork()
except OSError:
exit("Could not create a child process\n")
if pid[i] == 0:
if i < nbrCore-1 :
g = partialTransform(i*nbrPointCore, (i+1)*nbrPointCore)
else :
g = partialTransform(i*nbrPointCore, nbrPoint) #to be sure that there is not a forgoten point.
print("write in " + str(i))
import sys
print(sys.getsizeof(g))
pickle.dump(g, tmp[i])
tmp[i].close()
print("exit")
exit()
for i in range(nbrCore):
print("waiting " + str(i))
finished = os.waitpid(pid[i], 0)
print("received")
# seek to get updated file content
tmp[i].seek(0,2)
tmp[i].seek(0)
graph += pickle.load(tmp[i])
return graph

Correct configuration of Aubio / Alsaaudio

I am trying to use aubio and python for a school project, here's the goal : detect when someone emit two sounds, each with a length of 2s, and with an interval between them of max 3s. The second one need to be higher than the first one. When these conditions are met, the program send a Wake-On-Lan package (not implemented in current code).
import alsaaudio
import numpy as np
import aubio
import time
import threading
class Audio_watcher:
# constants
samplerate = 44100
win_s = 2048
hop_s = win_s // 2
framesize = hop_s
nb_samples = 20
tone_duration = 2.0
per_sampling = tone_duration / nb_samples
tone_max_interval = 3.0
tone_diff_ratio = 2
def __init__(self):
self.last_frequencies = np.zeros(Audio_watcher.nb_samples)
self.last_energies = np.zeros(Audio_watcher.nb_samples)
self.detected_tone = 0
# set up audio input
recorder = alsaaudio.PCM(type=alsaaudio.PCM_CAPTURE)
recorder.setperiodsize(Audio_watcher.framesize)
recorder.setrate(Audio_watcher.samplerate)
recorder.setformat(alsaaudio.PCM_FORMAT_FLOAT_LE)
recorder.setchannels(1)
self.recorder = recorder
pitcher = aubio.pitch("default", Audio_watcher.win_s, Audio_watcher.hop_s, Audio_watcher.samplerate)
pitcher.set_unit("Hz")
pitcher.set_silence(-40)
self.pitcher = pitcher
# A filter
f = aubio.digital_filter(7)
f.set_a_weighting(Audio_watcher.samplerate)
self.f = f
def get_audio(self):
# read and convert data from audio input
_, data = self.recorder.read()
samples = np.fromstring(data, dtype=aubio.float_type)
filtered_samples = self.f(samples)
print(filtered_samples)
# pitch and energy of current frame
freq = self.pitcher(filtered_samples)[0]
print(freq)
self.last_frequencies = np.roll(self.last_frequencies, 1)
self.last_frequencies[0] = freq
self.last_energies = np.roll(self.last_energies, 1)
self.last_energies[0] = np.sum(filtered_samples**2)/len(filtered_samples)
threading.Timer(Audio_watcher.per_sampling, self.get_audio).start()
def reset_detected_tone():
self.detected_tone = 0
def detect_tone(self):
std_last = np.std(self.last_frequencies)
if std_last <= 200 and std_last > 0:
mean_freq = np.mean(self.last_frequencies)
if self.detected_tone == 0:
self.detected_tone = mean_freq
threading.Timer(Audio_watcher.tone_max_interval, self.reset_detected_tone).start()
elif mean_freq > Audio_watcher.tone_diff_ratio * self.detected_tone:
print('wol')
threading.Timer(Audio_watcher.tone_duration, self.detect_tone).start()
aw = Audio_watcher()
aw.get_audio()
aw.detect_tone()
However with this code I get a great delay between the sounds and their detection, I think it has to do with the recorder being called only one time every 0.1s, but I can't find how to give correct parameters to aubio.
Does anyone knows how to configure the constants so it works ?
Thanks a lot !
Found out what was causing this error, I needed to put the code that sets up the audio input in the get_audio function so it renewed everytime

multiple python progressive progresssbar with varying lengths

I am trying to use the progressive python progressbar to create two stacked progressbar. It should look something like
Articles[####### ]
Links [############]
So if you notice, two progress bars are of differing lengths. I have some code below that creates two progress bars of the same length. I was wondering if someone could tell me how to adjust this so that I can allow each progress bar to be different sizes.
Here is the test code that I developed.
from time import sleep
from blessings import Terminal
from progressive.bar import Bar
from progressive.tree import ProgressTree, Value, BarDescriptor
def progbar(_outer, _inner):
leaf_values = [Value(0) for i in range(2)]
test_d = {
'Link pages scraped': BarDescriptor(value=leaf_values[0],
type=Bar, max_value = _outer),
'Articles collected': BarDescriptor(value = leaf_values[1],
type=Bar, max_value= _inner)
}
def incr_value(obj, _counter_outer, _counter_inner):
if _counter_inner < _outer:
leaf_values[0].value += 1
if _counter_outer < _inner:
leaf_values[1].value += 1
def are_we_done(obj):
if _counter_inner == _outer and _counter_outer == _inner:
return(True)
else:
return(False)
# Create blessings.Terminal instance
t = Terminal()
# Initialize a ProgressTree instance
n = ProgressTree(term=t)
# We'll use the make_room method to make sure the terminal
# is filled out with all the room we need
n.make_room(test_d)
_counter_inner = 0
_counter_outer = 0
while not are_we_done(test_d):
sleep(2)
n.cursor.restore()
# We use our incr_value method to bump the fake numbers
incr_value(test_d,_counter_outer, _counter_inner)
# Actually draw out the bars
n.draw(test_d)
_counter_inner += 1
_counter_outer += 1
if __name__ == '__main__':
progbar(100, 20)
ok, first I assume the indention problem is from the copy and paste
to make it in diffrent sizes you need to change the lines
test_d = {
'Link pages scraped': BarDescriptor(value=leaf_values[0],
type=Bar, max_value = _outer),
'Articles collected': BarDescriptor(value = leaf_values[1],
type=Bar, max_value= _inner)
}
to:
test_d = {
'Link pages scraped': BarDescriptor(value=leaf_values[0],
type=Bar, kwargs=dict(max_value = _outer,width="50%")),
'Articles collected': BarDescriptor(value = leaf_values[1],
type=Bar, kwargs=dict(max_value= _inner,width="10%"))
}
notice that i call the BarDescriptor with kwargs as a normal dict and not with **. thats how they use it in this exapmle: https://github.com/hfaran/progressive/blob/master/progressive/examples.py
and it seems to work (the parameters in the kwargs are used to call the Bar class)
you probably want to change the 10% and 50% to something not hardcoded.
the % means percentage of the terminal width. you can also do "20c" witch means it width will be 20 characters

AlwaysError when running a testbench on a synchronizer

I encountered this error when running a testbench, together with a synchronizer built on two existing D-FFs.
File "/home/runner/design.py", line 28, in Sync
#always_seq(clk.posedge, reset=reset)
File "/usr/share/myhdl-0.8/lib/python/myhdl/_always_seq.py", line 76, in _always_seq_decorator
raise AlwaysSeqError(_error.ArgType)
myhdl.AlwaysError: decorated object should be a classic (non-generator) function
My testbench is outlined as follows
from myhdl import *
from random import randrange
HALF_PERIOD = delay(10) ### This makes a 20-ns clock signal
ACTIVE_HIGH = 1
G_DELAY = delay(15)
def Main():
### Signal declaration
clk, d, dout = [Signal(intbv(0)) for i in range(3)]
reset = ResetSignal(1,active=ACTIVE_HIGH,async=True)
### Module Instantiation
S1 = Sync(dout, d, clk,reset)
### Clk generator
#always(HALF_PERIOD)
def ClkGen():
clk.next = not clk
### TB def
#instance
def Driver():
yield(HALF_PERIOD)
reset.next = 0
for i in range(4):
yield(G_DELAY)
d.next = not d
raise StopSimulation
return ClkGen, Driver, S1
m1 = traceSignals(Main)
sim = Simulation(m1)
sim.run()
And my synchronizer is coded as follows.
from myhdl import *
from DFF import *
def Sync(dout,din,clk,reset):
""" The module consists of two FFs with one internal signal
External signals
dout : output
din : input
clk : input
Internal signal:
F2F : output-to-input signal that connects two FFs together
"""
### Connectivity
F2F = Signal(intbv(0))
F1 = DFF(F2F,din,clk,reset)
F2 = DFF(dout,F2F,clk,reset)
### Function
#always_seq(clk.posedge,reset=reset)
def SyncLogic():
if reset:
F2F.next = 0
dout.next = 0
else:
F2F.next = din
yield(WIRE_DELAY)
dout.next = F2F
return SyncLogic
and the FF prototype is coded as follows.
from myhdl import *
def DFF(dout,din,clk,reset):
#always_seq(clk.posedge, reset=reset)
def Flogic():
if reset:
dout.next = 0
else:
dout.next = din
return Flogic
The testbench did work with the similar testbench I coded earlier(with slight modification), but it didn't work when combining two modules together. Please clarify. Thank you.
To model a wire delay, use the "delay" argument in the Signal.
change
#always_seq(clk.posedge,reset=reset)
def SyncLogic():
if reset:
F2F.next = 0
dout.next = 0
else:
F2F.next = din
yield(WIRE_DELAY)
dout.next = F2F
return SyncLogic
to:
dout = Signal(<type>, delay=WIRE_DELAY)
# ...
#always_seq(clk.posedge, reset=reset)
def synclogic():
dout.next = din
With the "always_seq" don't define the reset (it is automatically added). If you want to explicitly define the reset use "#always(clock.posedge, reset.negedge)".

Categories

Resources