How to change data stream to the google speech recognition (Python) - python

I have code which stream audio data from laptop microphone to the google Speech recog., but i want to stream audio from other source. From that source i can get buffer of raw data, and this buffer is what i want to stream to the google.Can somebody help me or give some usefull advice?
I try to search and solve this by myself but i couldn find out.
Here is code:
from __future__ import division
import contextlib
import functools
import re
import signal
import sys
import google.auth
import google.auth.transport.grpc
import google.auth.transport.requests
from google.cloud.proto.speech.v1beta1 import cloud_speech_pb2
from google.rpc import code_pb2
import grpc
import pyaudio
from six.moves import queue
RATE = 16000
CHUNK = int(RATE / 10) # 100ms
DEADLINE_SECS = 60 * 3 + 5
SPEECH_SCOPE = 'https://www.googleapis.com/auth/cloud-platform'
def make_channel(host, port):
"""Creates a secure channel with auth credentials from the environment."""
# Grab application default credentials from the environment
credentials, _ = google.auth.default(scopes=[SPEECH_SCOPE])
# Create a secure channel using the credentials.
http_request = google.auth.transport.requests.Request()
target = '{}:{}'.format(host, port)
return google.auth.transport.grpc.secure_authorized_channel(
credentials, http_request, target)
def _audio_data_generator(buff):
stop = False
while not stop:
# Use a blocking get() to ensure there's at least one chunk of data.
data = [buff.get()]
# Now consume whatever other data's still buffered.
while True:
try:
data.append(buff.get(block=False))
except queue.Empty:
break
# `None` in the buffer signals that the audio stream is closed. Yield
# the final bit of the buffer and exit the loop.
if None in data:
stop = True
data.remove(None)
yield b''.join(data)
def _fill_buffer(buff, in_data, frame_count, time_info, status_flags):
"""Continuously collect data from the audio stream, into the buffer."""
buff.put(in_data)
return None, pyaudio.paContinue
# [START audio_stream]
#contextlib.contextmanager
def record_audio(rate, chunk):
"""Opens a recording stream in a context manager."""
# Create a thread-safe buffer of audio data
buff = queue.Queue()
audio_interface = pyaudio.PyAudio()
audio_stream = audio_interface.open(
format=pyaudio.paInt16,
# The API currently only supports 1-channel (mono) audio
channels=1, rate=rate,
input=True, frames_per_buffer=chunk,
# Run the audio stream asynchronously to fill the buffer object.
# This is necessary so that the input device's buffer doesn't
# overflow
# while the calling thread makes network requests, etc.
stream_callback=functools.partial(_fill_buffer, buff),
)
yield _audio_data_generator(buff)
audio_stream.stop_stream()
audio_stream.close()
# Signal the _audio_data_generator to finish
buff.put(None)
audio_interface.terminate()
# [END audio_stream]
def request_stream(data_stream, rate, interim_results=True):
"""Yields `StreamingRecognizeRequest`s constructed from a recording audio
stream.
Args:
data_stream: A generator that yields raw audio data to send.
rate: The sampling rate in hertz.
interim_results: Whether to return intermediate results, before the
transcription is finalized.
"""
# The initial request must contain metadata about the stream, so the
# server knows how to interpret it.
recognition_config = cloud_speech_pb2.RecognitionConfig(
# There are a bunch of config options you can specify.
encoding='LINEAR16', # raw 16-bit signed LE samples
sample_rate=rate, # the rate in hertz
language_code='sk-SK', #sk-SK a BCP-47 language tag
)
streaming_config = cloud_speech_pb2.StreamingRecognitionConfig(
interim_results=interim_results,
config=recognition_config,
)
yield cloud_speech_pb2.StreamingRecognizeRequest(
streaming_config=streaming_config)
for data in data_stream:
# Subsequent requests can all just have the content
yield cloud_speech_pb2.StreamingRecognizeRequest(audio_content=data)
def listen_print_loop(recognize_stream):
"""Iterates through server responses and prints them.
The recognize_stream passed is a generator that will block until a response
is provided by the server. When the transcription response comes, print it.
In this case, responses are provided for interim results as well. If the
response is an interim one, print a line feed at the end of it, to allow
the next result to overwrite it, until the response is a final one. For the
final one, print a newline to preserve the finalized transcription.
"""
num_chars_printed = 0
for resp in recognize_stream:
if resp.error.code != code_pb2.OK:
raise RuntimeError('Server error: ' + resp.error.message)
if not resp.results:
continue
# Display the top transcription
result = resp.results[0]
transcript = result.alternatives[0].transcript
# Display interim results, but with a carriage return at the end of the
# line, so subsequent lines will overwrite them.
#
# If the previous result was longer than this one, we need to print
# some extra spaces to overwrite the previous result
overwrite_chars = ' ' * max(0, num_chars_printed - len(transcript))
if not result.is_final:
sys.stdout.write(transcript + overwrite_chars + '\r')
sys.stdout.flush()
num_chars_printed = len(transcript)
else:
print(transcript + overwrite_chars)
# Exit recognition if any of the transcribed phrases could be
# one of our keywords.
if re.search(r'\b(exit|quit)\b', transcript, re.I):
print('Exiting..')
break
num_chars_printed = 0
def main():
service = cloud_speech_pb2.SpeechStub(
make_channel('speech.googleapis.com', 443))
# For streaming audio from the microphone, there are three threads.
# First, a thread that collects audio data as it comes in
with record_audio(RATE, CHUNK) as buffered_audio_data:
# Second, a thread that sends requests with that data
requests = request_stream(buffered_audio_data, RATE)
# Third, a thread that listens for transcription responses
recognize_stream = service.StreamingRecognize(
requests, DEADLINE_SECS)
# Exit things cleanly on interrupt
signal.signal(signal.SIGINT, lambda *_: recognize_stream.cancel())
# Now, put the transcription responses to use.
try:
listen_print_loop(recognize_stream)
recognize_stream.cancel()
except grpc.RpcError as e:
code = e.code()
# CANCELLED is caused by the interrupt handler, which is expected.
if code is not code.CANCELLED:
raise
if __name__ == '__main__':
main()

I implemented this for java, maybe it will help.
Source: https://github.com/achernetsov/java-docs-samples/blob/stream-from-file-example/speech/cloud-client/src/main/java/com/example/speech/RecognizeStreamFromFile.java

Related

GRPCIO shows error while executing the pushtotalk

i am connecting the Raspberry Pi with Google Ai Assistant and it gives me the following error.
The command, which i run:
googlesamples-assistant-pushtotalk
File "/home/pi/env/bin/googlesamples-assistant-pushtotalk", line 6, in <module>
from googlesamples.assistant.grpc.pushtotalk import main
File "/home/pi/env/lib/python3.9/site-packages/googlesamples/assistant/grpc/pushtotalk.py", line 30, in <module>
from tenacity import retry, stop_after_attempt, retry_if_exception
File "/home/pi/env/lib/python3.9/site-packages/tenacity/__init__.py", line 292
from tenacity.async import AsyncRetrying
^
SyntaxError: invalid syntax```
The file:``` # Copyright (C) 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Sample that implements gRPC client for Google Assistant API."""
#pip install grpc
import grpc
import json
import logging
import os.path
import click
#import grpc
import google.auth.transport.grpc
import google.auth.transport.requests
import google.oauth2.credentials
from google.assistant.embedded.v1alpha1 import embedded_assistant_pb2
from google.rpc import code_pb2
from tenacity import retry, stop_after_attempt, retry_if_exception
try:
from . import (
assistant_helpers,
audio_helpers
)
except SystemError:
import assistant_helpers
import audio_helpers
ASSISTANT_API_ENDPOINT = 'embeddedassistant.googleapis.com'
END_OF_UTTERANCE = embedded_assistant_pb2.ConverseResponse.END_OF_UTTERANCE
DIALOG_FOLLOW_ON = embedded_assistant_pb2.ConverseResult.DIALOG_FOLLOW_ON
CLOSE_MICROPHONE = embedded_assistant_pb2.ConverseResult.CLOSE_MICROPHONE
DEFAULT_GRPC_DEADLINE = 60 * 3 + 5
class SampleAssistant(object):
"""Sample Assistant that supports follow-on conversations.
Args:
conversation_stream(ConversationStream): audio stream
for recording query and playing back assistant answer.
channel: authorized gRPC channel for connection to the
Google Assistant API.
deadline_sec: gRPC deadline in seconds for Google Assistant API call.
"""
def __init__(self, conversation_stream, channel, deadline_sec):
self.conversation_stream = conversation_stream
# Opaque blob provided in ConverseResponse that,
# when provided in a follow-up ConverseRequest,
# gives the Assistant a context marker within the current state
# of the multi-Converse()-RPC "conversation".
# This value, along with MicrophoneMode, supports a more natural
# "conversation" with the Assistant.
self.conversation_state = None
# Create Google Assistant API gRPC client.
self.assistant = embedded_assistant_pb2.EmbeddedAssistantStub(channel)
self.deadline = deadline_sec
def __enter__(self):
return self
def __exit__(self, etype, e, traceback):
if e:
return False
self.conversation_stream.close()
def is_grpc_error_unavailable(e):
is_grpc_error = isinstance(e, grpc.RpcError)
if is_grpc_error and (e.code() == grpc.StatusCode.UNAVAILABLE):
logging.error('grpc unavailable error: %s', e)
return True
return False
#retry(reraise=True, stop=stop_after_attempt(3),
retry=retry_if_exception(is_grpc_error_unavailable))
def converse(self):
"""Send a voice request to the Assistant and playback the response.
Returns: True if conversation should continue.
"""
continue_conversation = False
self.conversation_stream.start_recording()
logging.info('Recording audio request.')
def iter_converse_requests():
for c in self.gen_converse_requests():
assistant_helpers.log_converse_request_without_audio(c)
yield c
self.conversation_stream.start_playback()
# This generator yields ConverseResponse proto messages
# received from the gRPC Google Assistant API.
for resp in self.assistant.Converse(iter_converse_requests(),
self.deadline):
assistant_helpers.log_converse_response_without_audio(resp)
if resp.error.code != code_pb2.OK:
logging.error('server error: %s', resp.error.message)
break
if resp.event_type == END_OF_UTTERANCE:
logging.info('End of audio request detected')
self.conversation_stream.stop_recording()
if resp.result.spoken_request_text:
logging.info('Transcript of user request: "%s".',
resp.result.spoken_request_text)
logging.info('Playing assistant response.')
if len(resp.audio_out.audio_data) > 0:
self.conversation_stream.write(resp.audio_out.audio_data)
if resp.result.spoken_response_text:
logging.info(
'Transcript of TTS response '
'(only populated from IFTTT): "%s".',
resp.result.spoken_response_text)
if resp.result.conversation_state:
self.conversation_state = resp.result.conversation_state
if resp.result.volume_percentage != 0:
self.conversation_stream.volume_percentage = (
resp.result.volume_percentage
)
if resp.result.microphone_mode == DIALOG_FOLLOW_ON:
continue_conversation = True
logging.info('Expecting follow-on query from user.')
elif resp.result.microphone_mode == CLOSE_MICROPHONE:
continue_conversation = False
logging.info('Finished playing assistant response.')
self.conversation_stream.stop_playback()
return continue_conversation
def gen_converse_requests(self):
"""Yields: ConverseRequest messages to send to the API."""
converse_state = None
if self.conversation_state:
logging.debug('Sending converse_state: %s',
self.conversation_state)
converse_state = embedded_assistant_pb2.ConverseState(
conversation_state=self.conversation_state,
)
config = embedded_assistant_pb2.ConverseConfig(
audio_in_config=embedded_assistant_pb2.AudioInConfig(
encoding='LINEAR16',
sample_rate_hertz=self.conversation_stream.sample_rate,
),
audio_out_config=embedded_assistant_pb2.AudioOutConfig(
encoding='LINEAR16',
sample_rate_hertz=self.conversation_stream.sample_rate,
volume_percentage=self.conversation_stream.volume_percentage,
),
converse_state=converse_state
)
# The first ConverseRequest must contain the ConverseConfig
# and no audio data.
yield embedded_assistant_pb2.ConverseRequest(config=config)
for data in self.conversation_stream:
# Subsequent requests need audio data, but not config.
yield embedded_assistant_pb2.ConverseRequest(audio_in=data)
#click.command()
#click.option('--api-endpoint', default=ASSISTANT_API_ENDPOINT,
metavar='<api endpoint>', show_default=True,
help='Address of Google Assistant API service.')
#click.option('--credentials',
metavar='<credentials>', show_default=True,
default=os.path.join(click.get_app_dir('google-oauthlib-tool'),
'credentials.json'),
help='Path to read OAuth2 credentials.')
#click.option('--verbose', '-v', is_flag=True, default=False,
help='Verbose logging.')
#click.option('--input-audio-file', '-i',
metavar='<input file>',
help='Path to input audio file. '
'If missing, uses audio capture')
#click.option('--output-audio-file', '-o',
metavar='<output file>',
help='Path to output audio file. '
'If missing, uses audio playback')
#click.option('--audio-sample-rate',
default=audio_helpers.DEFAULT_AUDIO_SAMPLE_RATE,
metavar='<audio sample rate>', show_default=True,
help='Audio sample rate in hertz.')
#click.option('--audio-sample-width',
default=audio_helpers.DEFAULT_AUDIO_SAMPLE_WIDTH,
metavar='<audio sample width>', show_default=True,
help='Audio sample width in bytes.')
#click.option('--audio-iter-size',
default=audio_helpers.DEFAULT_AUDIO_ITER_SIZE,
metavar='<audio iter size>', show_default=True,
help='Size of each read during audio stream iteration in bytes.')
#click.option('--audio-block-size',
default=audio_helpers.DEFAULT_AUDIO_DEVICE_BLOCK_SIZE,
metavar='<audio block size>', show_default=True,
help=('Block size in bytes for each audio device '
'read and write operation..'))
#click.option('--audio-flush-size',
default=audio_helpers.DEFAULT_AUDIO_DEVICE_FLUSH_SIZE,
metavar='<audio flush size>', show_default=True,
help=('Size of silence data in bytes written '
'during flush operation'))
#click.option('--grpc-deadline', default=DEFAULT_GRPC_DEADLINE,
metavar='<grpc deadline>', show_default=True,
help='gRPC deadline in seconds')
#click.option('--once', default=False, is_flag=True,
help='Force termination after a single conversation.')
def main(api_endpoint, credentials, verbose,
input_audio_file, output_audio_file,
audio_sample_rate, audio_sample_width,
audio_iter_size, audio_block_size, audio_flush_size,
grpc_deadline, once, *args, **kwargs):
"""Samples for the Google Assistant API.
Examples:
Run the sample with microphone input and speaker output:
$ python -m googlesamples.assistant
Run the sample with file input and speaker output:
$ python -m googlesamples.assistant -i <input file>
Run the sample with file input and output:
$ python -m googlesamples.assistant -i <input file> -o <output file>
"""
# Setup logging.
logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO)
# Load OAuth 2.0 credentials.
try:
with open(credentials, 'r') as f:
credentials = google.oauth2.credentials.Credentials(token=None,
**json.load(f))
http_request = google.auth.transport.requests.Request()
credentials.refresh(http_request)
except Exception as e:
logging.error('Error loading credentials: %s', e)
logging.error('Run google-oauthlib-tool to initialize '
'new OAuth 2.0 credentials.')
return
# Create an authorized gRPC channel.
grpc_channel = google.auth.transport.grpc.secure_authorized_channel(
credentials, http_request, api_endpoint)
logging.info('Connecting to %s', api_endpoint)
# Configure audio source and sink.
audio_device = None
if input_audio_file:
audio_source = audio_helpers.WaveSource(
open(input_audio_file, 'rb'),
sample_rate=audio_sample_rate,
sample_width=audio_sample_width
)
else:
audio_source = audio_device = (
audio_device or audio_helpers.SoundDeviceStream(
sample_rate=audio_sample_rate,
sample_width=audio_sample_width,
block_size=audio_block_size,
flush_size=audio_flush_size
)
)
if output_audio_file:
audio_sink = audio_helpers.WaveSink(
open(output_audio_file, 'wb'),
sample_rate=audio_sample_rate,
sample_width=audio_sample_width
)
else:
audio_sink = audio_device = (
audio_device or audio_helpers.SoundDeviceStream(
sample_rate=audio_sample_rate,
sample_width=audio_sample_width,
block_size=audio_block_size,
flush_size=audio_flush_size
)
)
# Create conversation stream with the given audio source and sink.
conversation_stream = audio_helpers.ConversationStream(
source=audio_source,
sink=audio_sink,
iter_size=audio_iter_size,
sample_width=audio_sample_width,
)
with SampleAssistant(conversation_stream,
grpc_channel, grpc_deadline) as assistant:
# If file arguments are supplied:
# exit after the first turn of the conversation.
if input_audio_file or output_audio_file:
assistant.converse()
return
# If no file arguments supplied:
# keep recording voice requests using the microphone
# and playing back assistant response using the speaker.
# When the once flag is set, don't wait for a trigger. Otherwise, wait.
wait_for_user_trigger = not once
while True:
if wait_for_user_trigger:
click.pause(info='Press Enter to send a new request...')
continue_conversation = assistant.converse()
# wait for user trigger if there is no follow-up turn in
# the conversation.
wait_for_user_trigger = not continue_conversation
# If we only want one conversation, break.
if once and (not continue_conversation):
break
if __name__ == '__main__':
thanks for your kind support.

Azure servicebus python - how to send a bunch of messages

To avoid errors when I have to send a group of messages that is larger than max size, I wrote a class useful to send bunch of messages.
Well, first of all would be wonderful if somebody could show me an example that explains how to avoid this problem.
Trying to solve the problem by myself I found extremely hard understanding the size of the message (ServiceBusMessage).
The method sb_msg.message.get_message_encoded_size() it’s the nearest thing of what I need.
Do you know how to calculate the message size?
def send_as_json(self, msg, id_field=None, size_in_bytes=262144):
if isinstance(msg, list):
payload = self.topic_sender.create_message_batch(size_in_bytes)
for m in msg:
try:
# add a message to the batch
sb_msg = ServiceBusMessage(json.dumps(m), message_id=m.get(id_field, uuid.uuid4()), content_type='application/json')
total_size = payload.size_in_bytes + sb_msg.message.get_message_encoded_size()
if total_size > size_in_bytes:
_log.info(f'sending partial batch of {payload.size_in_bytes} bytes')
self.send_service_bus_message(payload)
payload = self.topic_sender.create_message_batch(size_in_bytes)
payload.add_message(sb_msg)
except ValueError as e:
# ServiceBusMessageBatch object reaches max_size.
# New ServiceBusMessageBatch object can be created here to send more data.
raise Exception('', e)
self.send_service_bus_message(payload)
else:
sb_msg = ServiceBusMessage(json.dumps(msg), message_id=msg.get(id_field, uuid.uuid4()), content_type = 'application/json')
self.send_service_bus_message(sb_msg)
Azure ServiceBus - Python:
Below code will give you a insight on how to send batch of messages:
def send_batch_message(sender):
# create a batch of messages
batch_message = sender.create_message_batch()
for _ in range(10):
try:
# add a message to the batch
batch_message.add_message(ServiceBusMessage("Message inside a ServiceBusMessageBatch"))
except ValueError:
# ServiceBusMessageBatch object reaches max_size.
# New ServiceBusMessageBatch object can be created here to send more data.
break
# send the batch of messages to the queue
sender.send_messages(batch_message)
print("Sent a batch of 10 messages")
For more detail information you can visit below Microsoft docs, In this link we have clear information of sending messages as single, list or batch: Link
How to find the size of message:
With the help of below function we can find the size of the message in bytes:
Import sys
sys.getsizeof(variable_name) #this gives us the bytes occupied by the variable.
To check the max_size_in_bytes of a batch, we can simply print below message
batch_message = sender.create_message_batch() # Step1
batch_message.add_message(ServiceBusMessage("Message we want to add")) #Step2 Add message to the batch in a loop.
print(batch_message) #Step3 We will get the output as below as yellow after printing, below is the output:
Output:
ServiceBusMessageBatch(max_size_in_bytes=1048576, message_count=10)

Kinesis Video stream async get frames

I would like to capture a video stream from AWS Kinesis and use asyncio. My goal is to extract frames from the stream and pass them to processing queue.
Below is the working example of what I have so far. It receives chunks of data of various length (may be in range of, say, 76 bytes up to 8192) which do not seem to be a complete frame.
Is there a cheap way to split the stream to chunks with support of asyncio and preferrably without threads?
I want one application to handle at least 10-20 streams and to have one application per CPU running on server.
I was thinking about ffmpeg and opencv, but they seem too heavyweight and do not seem to be good compatible with asyncio.
import asyncio
import aiobotocore
VIDEO_STREAM_NAME = 'bc-test1'
async def get_data2(loop):
chunk_size = 1024 * 1024 * 500
session = aiobotocore.get_session(loop=loop)
async with session.create_client('kinesisvideo', region_name='us-west-2', ) as client:
resp = await client.get_data_endpoint(
StreamName=VIDEO_STREAM_NAME,
APIName='GET_MEDIA',
)
data_url = resp['DataEndpoint']
async with session.create_client('kinesis-video-media', endpoint_url=data_url) as client:
resp = await client.get_media(
StreamName=VIDEO_STREAM_NAME,
StartSelector={"StartSelectorType": "NOW", },
)
print(resp)
while True:
data = await resp['Payload'].read(1024 * 8)
if data:
print("frame len: %s" % len(frame))
else:
print("No data")
break
def main():
loop = asyncio.get_event_loop()
loop.run_until_complete(get_data2(loop))
if __name__ == '__main__':
main()
I would consider advices to do not use asyncio with another solution to satisfy the requirements above.
Does it have to be python?
KinesisVideoStream has examples in Java to consume the stream, parse the data and decode the video: https://github.com/aws/amazon-kinesis-video-streams-parser-library
KinesisVideoStream also has KIT: https://aws.amazon.com/blogs/machine-learning/analyze-live-video-at-scale-in-real-time-using-amazon-kinesis-video-streams-and-amazon-sagemaker/
It can be used to process video streams in scale.

Avoid error 420s with streaming API tweepy

I made a python script which uses tweepy streaming module to stream mentions to a twitter account and carry some functions based on the status text.
I wanted it to stream until a mention is made, next stop streaming, carry some functions based on the status text and again start streaming.
This is my code:
class StdOutListener(tweepy.StreamListener):
def on_data(self, data):
tweet = json.loads(data.strip())
global d
d=tweet
return False #stops streaming after a tweet is fed to it
def on_error(self, status_code):
print(status_code)
time.sleep(120)
return F # To continue listening
def on_timeout(self)
time.sleep(120)
return True # To continue listening
while True:
d={}
listener = StdOutListener()
stream = tweepy.Stream(twitter_auth(tokens), listener)
stream.filter(track=['#xxx'])
stream.disconnect()
doSomething(d)
But it only works for one loop and later shows 420(Exceeding Rate Limit) errors,even though I just take in a single tweet (per stream, if I'm not wrong).
Can anyone please explain where I'm doing it wrong? And also when should we use async mode in tweepy stream listener?

[python]I want to merge two code, but occur webpy error type 'exceptions.keyerror'

I want merge two code.
A.py is webpy code.
B.py is Google cloud speech(STT) example code.
but when I merge two code, it occurs webpy error
type 'exceptions.keyerror'
I insert A.py code to B.py in main() first line.
how to merge this code?
This is A.py
import web
urls = ("/.*", "hello")
app = web.application(urls, globals())
class hello:
def GET(self):
return 'Hello, world!'
if __name__ == "__main__":
app.run()
This is B.py(Google colud speech(STT) example code)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Sample that streams audio to the Google Cloud Speech API via GRPC."""
from __future__ import division
import contextlib
import functools
import re
import signal
import sys
import web
import google.auth
import google.auth.transport.grpc
import google.auth.transport.requests
from google.cloud.proto.speech.v1beta1 import cloud_speech_pb2
from google.rpc import code_pb2
import grpc
import pyaudio
from six.moves import queue
# Audio recording parameters
RATE = 48000
CHUNK = int(RATE / 10) # 100ms
# The Speech API has a streaming limit of 60 seconds of audio*, so keep the
# connection alive for that long, plus some more to give the API time to figure
# out the transcription.
# * https://g.co/cloud/speech/limits#content
DEADLINE_SECS = 60 * 3 + 5
SPEECH_SCOPE = 'https://www.googleapis.com/auth/cloud-platform'
def make_channel(host, port):
"""Creates a secure channel with auth credentials from the environment."""
# Grab application default credentials from the environment
credentials, _ = google.auth.default(scopes=[SPEECH_SCOPE])
# Create a secure channel using the credentials.
http_request = google.auth.transport.requests.Request()
target = '{}:{}'.format(host, port)
return google.auth.transport.grpc.secure_authorized_channel(
credentials, http_request, target)
def _audio_data_generator(buff):
"""A generator that yields all available data in the given buffer.
Args:
buff - a Queue object, where each element is a chunk of data.
Yields:
A chunk of data that is the aggregate of all chunks of data in `buff`.
The function will block until at least one data chunk is available.
"""
stop = False
while not stop:
# Use a blocking get() to ensure there's at least one chunk of data.
data = [buff.get()]
# Now consume whatever other data's still buffered.
while True:
try:
data.append(buff.get(block=False))`enter code here`
except queue.Empty:
break
# `None` in the buffer signals that the audio stream is closed. Yield
# the final bit of the buffer and exit the loop.
if None in data:
stop = True
data.remove(None)
yield b''.join(data)
def _fill_buffer(buff, in_data, frame_count, time_info, status_flags):
"""Continuously collect data from the audio stream, into the buffer."""
buff.put(in_data)
return None, pyaudio.paContinue
# [START audio_stream]
#contextlib.contextmanager
def record_audio(rate, chunk):
"""Opens a recording stream in a context manager."""
# Create a thread-safe buffer of audio data
buff = queue.Queue()
audio_interface = pyaudio.PyAudio()
audio_stream = audio_interface.open(
format=pyaudio.paInt16,
# The API currently only supports 1-channel (mono) audio
channels=1, rate=rate,
input=True, frames_per_buffer=chunk,
# Run the audio stream asynchronously to fill the buffer object.
# This is necessary so that the input device's buffer doesn't overflow
# while the calling thread makes network requests, etc.
stream_callback=functools.partial(_fill_buffer, buff),
)
yield _audio_data_generator(buff)
audio_stream.stop_stream()
audio_stream.close()
# Signal the _audio_data_generator to finish
buff.put(None)
audio_interface.terminate()
# [END audio_stream]
def request_stream(data_stream, rate, interim_results=True):
"""Yields `StreamingRecognizeRequest`s constructed from a recording audio
stream.
Args:
data_stream: A generator that yields raw audio data to send.
rate: The sampling rate in hertz.
interim_results: Whether to return intermediate results, before the
transcription is finalized.
"""
# The initial request must contain metadata about the stream, so the
# server knows how to interpret it.
recognition_config = cloud_speech_pb2.RecognitionConfig(
# There are a bunch of config options you can specify. See
encoding='LINEAR16', # raw 16-bit signed LE samples
sample_rate=rate, # the rate in hertz
# See http://g.co/cloud/speech/docs/languages
# for a list of supported languages.
language_code='ko-KR', # a BCP-47 language tag
)
streaming_config = cloud_speech_pb2.StreamingRecognitionConfig(
interim_results=interim_results,
config=recognition_config,
)
yield cloud_speech_pb2.StreamingRecognizeRequest(
streaming_config=streaming_config)
for data in data_stream:
# Subsequent requests can all just have the content
yield cloud_speech_pb2.StreamingRecognizeRequest(audio_content=data)
def listen_print_loop(recognize_stream):
"""Iterates through server responses and prints them.
The recognize_stream passed is a generator that will block until a response
is provided by the server. When the transcription response comes, print it.
In this case, responses are provided for interim results as well. If the
response is an interim one, print a line feed at the end of it, to allow
the next result to overwrite it, until the response is a final one. For the
final one, print a newline to preserve the finalized transcription.
"""
num_chars_printed = 0
for resp in recognize_stream:
if resp.error.code != code_pb2.OK:
raise RuntimeError('Server error: ' + resp.error.message)
if not resp.results:
continue
# Display the top transcription
result = resp.results[0]
transcript = result.alternatives[0].transcript
# Display interim results, but with a carriage return at the end of the
# line, so subsequent lines will overwrite them.
#
# If the previous result was longer than this one, we need to print
# some extra spaces to overwrite the previous result
overwrite_chars = ' ' * max(0, num_chars_printed - len(transcript))
if not result.is_final:
sys.stdout.write(transcript + overwrite_chars + '\r')
sys.stdout.flush()
num_chars_printed = len(transcript)
else:
print(transcript + overwrite_chars)
# Exit recognition if any of the transcribed phrases could be
# one of our keywords.
if re.search(r'\b(exit|quit)\b', transcript, re.I):
print('Exiting..')
break
num_chars_printed = 0
def main():
urls = ("/.*", "hello")
app = web.application(urls, globals())
class hello:
def GET(self):
return 'Hello, world!'
app.run()
service = cloud_speech_pb2.SpeechStub(
make_channel('speech.googleapis.com', 443))
# For streaming audio from the microphone, there are three threads.
# First, a thread that collects audio data as it comes in
with record_audio(RATE, CHUNK) as buffered_audio_data:
# Second, a thread that sends requests with that data
requests = request_stream(buffered_audio_data, RATE)
# Third, a thread that listens for transcription responses
recognize_stream = service.StreamingRecognize(
requests, DEADLINE_SECS)
# Exit things cleanly on interrupt
signal.signal(signal.SIGINT, lambda *_: recognize_stream.cancel())
# Now, put the transcription responses to use.
try:
listen_print_loop(recognize_stream)
recognize_stream.cancel()
except grpc.RpcError as e:
code = e.code()
# CANCELLED is caused by the interrupt handler, which is expected.
if code is not code.CANCELLED:
raise
if __name__ == '__main__':
main()
The error refers to the fact the web.py will be looking for a class hello, accessible from global scope. You've defined your class hello within main(). web.py will never find it.
That being said, there are other issues. Your call to app.run() within main starts the web.py webserver and never returns so nothing after that will ever get executed.
Combining two code examples requires one to understand both snippets. Read the docs, and keep trying.

Categories

Resources