Exception: Failed to process waveform - python

Error:
Traceback (most recent call last):
File "c:\Programming\New_assistant\speech_to_text.py", line 18, in <module>
if rec.AcceptWaveform(data):
File "C:\Users\david\AppData\Local\Programs\Python\Python310\lib\site-packages\vosk\__init__.py", line 84, in AcceptWaveform
raise Exception("Failed to process waveform")
Exception: Failed to process waveform
PS C:\Programming\New_assistant>
I get this error when I try to use AcceptWaveform regardless of the file (wav) or the rest of the code, but the error is removed only when using vosk-model-small-ru-0.22, and does not give errors on vosk-model-ru-0.22, but the processing time is too long.
Code:
from vosk import Model, KaldiRecognizer
import json
import wave
model = Model(r"File\vosk-model-small-ru-0.22")
wf = wave.open(r"File\record1.wav", "rb")
rec = KaldiRecognizer(model, 8000)
result = ''
last_n = False
while True:
data = wf.readframes(8000)
if len(data) == 0:
break
if rec.AcceptWaveform(data):
res = json.loads(rec.Result())
if res['text'] != '':
result += f" {res['text']}"
last_n = False
elif not last_n:
result += '\n'
last_n = True
res = json.loads(rec.FinalResult())
result += f" {res['text']}"
print(result)

Using the poke method, I found a solution and got an assumption about the error occurring, so if I'm wrong or you have a more complete solution, add it and I'll mark it.
If you copied the sample code using vosk, then most likely this is the version with the module vosk-model-ru-0.22 which works with the sampling rate 8000,but vosk-model-small-ru-0.22 works with 44100, so just change 8000 to 44100 (depending on the entry)

Related

'_csv.reader' object is not callable

This program is supposed to emit sound based on a CSV file.
There is a frequency range in the dataset of 37-32677. In the beginning I didn't add this in and got this same error message. I tried adding in this range and I am still getting the same error.
import winsound
import csv
winsound.Beep(261,100)
def preload(filename):
file = open(filename)
data = csv.reader(file)
return data
def getNote(sensorVal):
return int(sensorVal * 75)
def setup():
cleanedData = {}
notes = []
data = preload("data1.csv")
for row in data(range(36,32677)):
print(row)
if row[1] != "trial number":
sensorVal = float(row[4])
channel = int(row[7])
if channel not in cleanedData:
cleanedData[channel] = []
cleanedData[channel].append({"sensorVal":sensorVal})
notes.append(getNote(sensorVal))
return cleanedData,notes
def play(notes,time):
for note in notes:
winsound.Beep(note,time)
data, notes = setup()
play(notes, 200)
Error message:
Traceback (most recent call last):
File "C:/Users/clair/PycharmProjects/winSound/main.py", line 32, in <module>
data, notes = setup()
File "C:/Users/clair/PycharmProjects/winSound/main.py", line 16, in setup
for row in data(range(36,32677)):
TypeError: '_csv.reader' object is not callable
Process finished with exit code 1

Truncated file header while using multiprocessing

When I run the line:
def book_processing(pair, pool_length):
p = Pool(len(pool_length)*3)
temp_parameters = partial(book_call_mprocess, pair)
p.map_async(temp_parameters, pool_length).get(999999)
p.close()
p.join()
return exchange_books
I get the following error:
Traceback (most recent call last):
File "test_code.py", line 214, in <module>
current_books = book_call.book_processing(cp, book_list)
File "/home/user/Desktop/book_call.py", line 155, in book_processing
p.map_async(temp_parameters, pool_length).get(999999)
File "/usr/lib/python2.7/multiprocessing/pool.py", line 567, in get
raise self._value
zipfile.BadZipfile: Truncated file header
I feel as though there is some resource that is being used that didn't close during the last loop, but I am not sure how to close it (still learning about multiprocessing library). This error only occurs when my code repeats this section relatively quickly (within the same minute). This does not happen often, but is clear when it does.
Edit (adding the book_call code):
def book_call_mprocess(currency_pair, ex_list):
polo_error = 0
live_error = 0
kraken_error = 0
gdax_error = 0
ex_list = set([ex_list])
ex_Polo = 'Polo'
ex_Live = 'Live'
ex_GDAX = 'GDAX'
ex_Kraken = 'Kraken'
cp_polo = 'BTC_ETH'
cp_kraken = 'XETHXXBT'
cp_live = 'ETH/BTC'
cp_GDAX = 'ETH-BTC'
# Instances
polo_instance = poloapi.poloniex(polo_key, polo_secret)
fookraken = krakenapi.API(kraken_key, kraken_secret)
publicClient = GDAX.PublicClient()
flag = False
while not flag:
flag = False
err = False
# Polo Book
try:
if ex_Polo in ex_list:
polo_books = polo_instance.returnOrderBook(cp_polo)
exchange_books['Polo'] = polo_books
except:
err = True
polo_error = 1
# Livecoin
try:
if ex_Live in ex_list:
method = "/exchange/order_book"
live_books = OrderedDict([('currencyPair', cp_live)])
encoded_data = urllib.urlencode(live_books)
sign = hmac.new(live_secret, msg=encoded_data, digestmod=hashlib.sha256).hexdigest().upper()
headers = {"Api-key": live_key, "Sign": sign}
conn = httplib.HTTPSConnection(server)
conn.request("GET", method + '?' + encoded_data, '', headers)
response = conn.getresponse()
live_books = json.load(response)
conn.close()
exchange_books['Live'] = live_books
except:
err = True
live_error = 1
# Kraken
try:
if ex_Kraken in ex_list:
kraken_books = fookraken.query_public('Depth', {'pair': cp_kraken})
exchange_books['Kraken'] = kraken_books
except:
err = True
kraken_error = 1
# GDAX books
try:
if ex_GDAX in ex_list:
gdax_books = publicClient.getProductOrderBook(level=2, product=cp_GDAX)
exchange_books['GDAX'] = gdax_books
except:
err = True
gdax_error = 1
flag = True
if err:
flag = False
err = False
error_list = ['Polo', polo_error, 'Live', live_error, 'Kraken', kraken_error, 'GDAX', gdax_error]
print_to_excel('excel/error_handler.xlsx', 'Book Call Errors', error_list)
print "Holding..."
time.sleep(30)
return exchange_books
def print_to_excel(workbook, worksheet, data_list):
ts = str(datetime.datetime.now()).split('.')[0]
data_list = [ts] + data_list
wb = load_workbook(workbook)
if worksheet == 'active':
ws = wb.active
else:
ws = wb[worksheet]
ws.append(data_list)
wb.save(workbook)
The problem lies in the function print_to_excel
And more specifically in here:
wb = load_workbook(workbook)
If two processes are running this function at the same time, you'll run into the following race condition:
Process 1 wants to open error_handler.xlsx, since it doesn't exist it creates an empty file
Process 2 wants to open error_handler.xlsx, it does exist, so it tries to read it, but it is still empty. Since the xlsx format is just a zip file consisting of a bunch of XML files, the process expects a valid ZIP header which it doesn't find and it omits zipfile.BadZipfile: Truncated file header
What looks strange though is your error message as in the call stack I would have expected to see print_to_excel and load_workbook.
Anyway, Since you confirmed that the problem really is in the XLSX handling you can either
generate a new filename via tempfile for every process
use locking to ensure that only one process runs print_to_excel at a time

How to catch and manage exceptions for GPS module?

I have a raspberry pi using a GPS module. To use the module, I am running a code such as this:
##Prints the latitude and longitude every second.
import time
import microstacknode.hardware.gps.l80gps
if __name__ == '__main__':
gps = microstacknode.hardware.gps.l80gps.L80GPS()
while True:
data = gps.get_gpgga()
List = [list(data.values())[x] for x in [7, 9, 12]]
string=str(List)
string = string[1:-1]
text_file = open("/home/pi/fyp/gps.txt","a")
text_file.write(string + "\n")
time.sleep(1)
However, every now and then it gives this error because it cannot find my location:
Traceback (most recent call last):
File "gps.py", line 8, in <module>
data = gps.get_gpgga()
File "/usr/lib/python3/dist-packages/microstacknode/hardware/gps/l80gps.py", line 119, in get_gpgga
pkt = self.get_nmea_pkt('GPGGA')
File "/usr/lib/python3/dist-packages/microstacknode/hardware/gps/l80gps.py", line 293, in get_nmea_pkt
"Timed out before valid '{}'.".format(pattern))
microstacknode.hardware.gps.l80gps.NMEAPacketNotFoundError: Timed out before valid 'GPGGA'.
It's alright to have that error. The trouble I have is that the program stops running if it occurs. Is there a way to catch that error and get the program to loop back and try again even if it encounters this error?
UPDATE
if I try Stefan_Reinhardt's method, I would get the following error instead:
Traceback (most recent call last):
File "gps.py", line 9, in <module>
data = gps.get_gpgga()
File "/usr/lib/python3/dist-packages/microstacknode/hardware/gps/l80gps.py", line 119, in get_gpgga
pkt = self.get_nmea_pkt('GPGGA')
File "/usr/lib/python3/dist-packages/microstacknode/hardware/gps/l80gps.py", line 293, in get_nmea_pkt
"Timed out before valid '{}'.".format(pattern))
microstacknode.hardware.gps.l80gps.NMEAPacketNotFoundError: Timed out before valid 'GPGGA'.
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "gps.py", line 10, in <module>
except NMEAPacketNotFoundError:
NameError: name 'NMEAPacketNotFoundError' is not defined
I agree to the answer of Oisin,
but i'd suggest to put the try-except clause only arround the line where it could happen, and pass the rest of the while-loop with a continue statement so it would look like
##Prints the latitude and longitude every second.
import time
import microstacknode.hardware.gps.l80gps
if __name__ == '__main__':
gps = microstacknode.hardware.gps.l80gps.L80GPS()
while True:
try:
data = gps.get_gpgga()
except NMEAPacketNotFoundError:
continue
List = [list(data.values())[x] for x in [7, 9, 12]]
string=str(List)
string = string[1:-1]
text_file = open("/home/pi/fyp/gps.txt","a")
text_file.write(string + "\n")
time.sleep(1)
This should work but it could get stuck in an infinite recursion loop.
##Prints the latitude and longitude every second.
import time
import microstacknode.hardware.gps.l80gps
if __name__ == '__main__':
getPos()
def getPos():
try:
while True:
gps = microstacknode.hardware.gps.l80gps.L80GPS()
data = gps.get_gpgga()
List = [list(data.values())[x] for x in [7, 9, 12]]
string=str(List)
string = string[1:-1]
text_file = open("/home/pi/fyp/gps.txt","a")
text_file.write(string + "\n")
time.sleep(1)
except microstacknode.hardware.gps.l80gps.NMEAPacketNotFoundError:
getPos()

AWS Kinesis Consumer Python 3.4 Boto

I am trying to build a kinesis consumer script using python 3.4 below is an example of my code. I want the records to be saved to a local file that I can later push to S3:
from boto import kinesis
import time
import json
# AWS Connection Credentials
aws_access_key = 'your_key'
aws_access_secret = 'your_secret key'
# Selected Kinesis Stream
stream = 'TwitterTesting'
# Aws Authentication
auth = {"aws_access_key_id": aws_access_key, "aws_secret_access_key": aws_access_secret}
conn = kinesis.connect_to_region('us-east-1',**auth)
# Targeted file to be pushed to S3 bucket
fileName = "KinesisDataTest2.txt"
file = open("C:\\Users\\csanders\\PycharmProjects\\untitled\\KinesisDataTest.txt", "a")
# Describe stream and get shards
tries = 0
while tries < 10:
tries += 1
time.sleep(1)
response = conn.describe_stream(stream)
if response['StreamDescription']['StreamStatus'] == 'ACTIVE':
break
else:
raise TimeoutError('Stream is still not active, aborting...')
# Get Shard Iterator and get records from stream
shard_ids = []
stream_name = None
if response and 'StreamDescription' in response:
stream_name = response['StreamDescription']['StreamName']
for shard_id in response['StreamDescription']['Shards']:
shard_id = shard_id['ShardId']
shard_iterator = conn.get_shard_iterator(stream,
shard_id, 'TRIM_HORIZON')
shard_ids.append({'shard_id': shard_id, 'shard_iterator': shard_iterator['ShardIterator']})
tries = 0
result = []
while tries < 100:
tries += 1
response = conn.get_records(shard_iterator, 100)
shard_iterator = response['NextShardIterator']
if len(response['Records'])> 0:
for res in response['Records']:
result.append(res['Data'])
print(result, shard_iterator)
For some reason when I run this script I get the following error each time:
Traceback (most recent call last):
File "C:/Users/csanders/PycharmProjects/untitled/Get_records_Kinesis.py", line 57, in <module>
response = json.load(conn.get_records(shard_ids, 100))
File "C:\Python34\lib\site-packages\boto-2.38.0-py3.4.egg\boto\kinesis\layer1.py", line 327, in get_records
body=json.dumps(params))
File "C:\Python34\lib\site-packages\boto-2.38.0- py3.4.egg\boto\kinesis\layer1.py", line 874, in make_request
body=json_body)
boto.exception.JSONResponseError: JSONResponseError: 400 Bad Request
{'Message': 'Start of list found where not expected', '__type': 'SerializationException'}
My end goal is to eventually kick this data into an S3 bucket. I just need to get these records to return and print first. The data going into the stream is JSON dump twitter data using the put_record function. I can post that code too if needed.
Updated that one line from response = json.load(conn.get_records(shard_ids, 100)) to response = conn.get_records(shard_iterator, 100)
response = json.load(conn.get_records(shard_ids, 100))
get_records expects a shard_id not an array of shards. when it's trying to get records it fails miserably (you see the 400 from Kinesis saying that the request is bad).
http://boto.readthedocs.org/en/latest/ref/kinesis.html?highlight=get_records#boto.kinesis.layer1.KinesisConnection.get_records
if you replace following will work ( "while" you set up according for how many record you would like to collect, you can make infinite "with == 0" and remove "tries += 1")
shard_iterator = conn.get_shard_iterator(stream,
shard_id, 'TRIM_HORIZON')
shard_ids.append({'shard_id': shard_id, 'shard_iterator': shard_iterator['ShardIterator']})
with following:
shard_iterator = conn.get_shard_iterator(stream,
shard_id, "LATEST")["ShardIterator"]
also to write to a file change("\n" is for new line):
print(result, shard_iterator)
to:
file.write(str(result) + "\n")
Hope it helps.

Error always on line 102 of my code

So I am creating a module, and I am importing it to a python shell and running some stuff to make sure all features work and such.
For some reason every time I run the code, it gives the following error:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/Users/ryansaxe/Desktop/Code/python/modules/pymaps.py", line 102, in url_maker
#anything can be here
AttributeError: type object 'datetime.datetime' has no attribute 'datetime'
So where the #anything can be here is, is whatever is on line 102 of my code. Originally line 102 was:
if isinstance(startindex,datetime.datetime):
and I got the error above. I put a quick print statement on line 102 to check and it gave the same error:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/Users/ryansaxe/Desktop/Code/python/modules/pymaps.py", line 102, in url_maker
print 'Hello'
AttributeError: type object 'datetime.datetime' has no attribute 'datetime'
Is this some sort of bug? Why is it telling me there is an error with datetime on the line print 'Hello'?
Because it may be helpful, I will give you the function I am having trouble with since I have no clue how this is possible. I am keeping the print 'Hello' line so you can see where line 102 is:
def url_maker(latitudes,longitudes,times=None,color='red',label=' ',zoom=12,center=None,start=None,end=None,by=None,size='600x300'):
urls = []
import datetime
if isinstance(times[0],str) or isinstance(times[0],datetime.datetime):
from dateutil import parser
if isinstance(times[0],str):
times = [parser.parse(x) for x in times]
if isinstance(start,str):
startindex = parser.parse(start)
else:
startindex = start
if isinstance(end,str):
endindex = parse.parse(end)
else:
endindex = end
print 'Hello'
if isinstance(startindex,datetime.datetime):
startpos = between_times(times,startindex,by='start')
elif isinstance(startindex,int):
if isinstance(endindex,datetime.datetime):
startpos = between_times(times,endindex,by='end') - start
else:
startpos = start
else:
pass
if isinstance(endindex,datetime.datetime):
endpos = between_times(times,endindex,by='end')
elif isinstance(endindex,int):
if isinstance(startindex,datetime.datetime):
endpos = between_times(times,startindex,by='start') + end
else:
endpos = end
else:
pass
else:
times = range(1,len(latitudes) + 1)
if isinstance(start,int):
startpos = start
else:
startpos = None
if isinstance(end,int):
endpos = end
else:
endpos = None
if isinstance(by,str):
lat,lon,t = latitudes[startpos:endpos],latitudes[startpos:endpos],times[startpos:endpos]
print lat
t,lats,lons = time_sample(t,by,lat,lon)
elif isinstance(by,int):
lats,lons,t = latitudes[startpos:endpos:by],latitudes[startpos:endpos:by],times[startpos:endpos:by]
else:
lats,lons,t= latitudes[startpos:endpos],latitudes[startpos:endpos],times[startpos:endpos]
print t
print len(t)
if center == None:
latit = [str(i) for i in lats]
longi = [str(i) for i in lons]
center = '&center=' + common_finder(latit,longi)
else:
center = '&center=' + '+'.join(center.split())
zoom = '&zoom=' + str(zoom)
for i in range(len(lats)):
#label = str(i)
x,y = str(lats[i]),str(lons[i])
marker = '&markers=color:' + color + '%7Clabel:' + label + '%7C' + x + ',' + y
url = 'http://maps.googleapis.com/maps/api/staticmap?maptype=roadmap&size=' + size + zoom + center + marker + '&sensor=true'
urls.append(url)
#print i
return urls,t
You are running with a stale bytecode cache or are re-running the code in an existing interpreter without restarting it.
The traceback code has only bytecode to work with, which contains filename and linenumber information. When an exception occurs, the source file is loaded to retrieve the original line of code, but if the source file has changed, that leads to the wrong line being shown.
Restart the interpreter and/or remove all *.pyc files; the latter will be recreated when the interpreter imports the code again.
As for your specific exception; you probably imported the datetime class from the datetime module somewhere:
from datetime import datetime
The datetime class does not have a datetime attribute, only the module does.

Categories

Resources