I'm using google-cloud-speech api for my project . I'm using pipenv for virtual environment i installed google-cloud-speech api with
pipenv install google-cloud-speech
and
pipenv update google-cloud-speech
i followed this docs https://cloud.google.com/speech-to-text/docs/reference/libraries
This is my code:
google.py:
# !/usr/bin/env python
# coding: utf-8
import argparse
import io
import sys
import codecs
import datetime
import locale
import os
from google.cloud import speech_v1 as speech
from google.cloud.speech import enums
from google.cloud.speech import types
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = os.path.join("alt_speech_dev_01-fa5fec6806d9.json")
def get_model_by_language_id(language_id):
model = ''
if language_id == 1:
model = 'ja-JP'
elif language_id == 2:
model = 'en-US'
elif language_id == 3:
model = "zh-CN"
else:
raise ('Not Match Lang')
return model
def transcribe_gcs_without_speech_contexts(audio_file_path, model):
client = speech.SpeechClient()
with io.open(audio_file_path, 'rb') as audio_file:
content = audio_file.read()
audio = types.RecognitionAudio(content=content)
config = {
"encoding": enums.RecognitionConfig.AudioEncoding.FLAC,
"sample_rate_hertz": 16000,
"languageCode": model
}
operation = client.long_running_recognize(config, audio)
print('Waiting for operation to complete...')
operationResult = operation.result()
ret=''
for result in operationResult.results:
for alternative in result.alternatives:
ret = alternative.transcript
return ret
def transcribe_gcs(audio_file_path, model, keywords=None):
client = speech.SpeechClient()
with io.open(audio_file_path, 'rb') as audio_file:
content = audio_file.read()
audio = types.RecognitionAudio(content=content)
config = {
"encoding": enums.RecognitionConfig.AudioEncoding.FLAC,
"sample_rate_hertz": 16000,
"languageCode": model,
"speech_contexts":[{"phrases":keywords}]
}
operation = client.long_running_recognize(config, audio)
print('Waiting for operation to complete...')
operationResult = operation.result()
ret=''
for result in operationResult.results:
for alternative in result.alternatives:
ret = alternative.transcript
return ret
transcribe_gcs_without_speech_contexts('alt_en.wav', get_model_by_language_id(2))
When i try to run the python file with
python google.py
it return error ImportError: cannot import name 'SpeechClient' with the following traceback:
Traceback (most recent call last):
File "google.py", line 11, in <module>
from google.cloud import speech_v1 as speech
File "/home/hoanglinh/Documents/practice_speech/.venv/lib/python3.6/site-packages/google/cloud/speech_v1/__init__.py", line 17, in <module>
from google.cloud.speech_v1.gapic import speech_client
File "/home/hoanglinh/Documents/practice_speech/.venv/lib/python3.6/site-packages/google/cloud/speech_v1/gapic/speech_client.py", line 18, in <module>
import pkg_resources
File "/home/hoanglinh/Documents/practice_speech/.venv/lib/python3.6/site-packages/pkg_resources/__init__.py", line 3241, in <module>
#_call_aside
File "/home/hoanglinh/Documents/practice_speech/.venv/lib/python3.6/site-packages/pkg_resources/__init__.py", line 3225, in _call_aside
f(*args, **kwargs)
File "/home/hoanglinh/Documents/practice_speech/.venv/lib/python3.6/site-packages/pkg_resources/__init__.py", line 3269, in _initialize_master_working_set
for dist in working_set
File "/home/hoanglinh/Documents/practice_speech/.venv/lib/python3.6/site-packages/pkg_resources/__init__.py", line 3269, in <genexpr>
for dist in working_set
File "/home/hoanglinh/Documents/practice_speech/.venv/lib/python3.6/site-packages/pkg_resources/__init__.py", line 2776, in activate
declare_namespace(pkg)
File "/home/hoanglinh/Documents/practice_speech/.venv/lib/python3.6/site-packages/pkg_resources/__init__.py", line 2275, in declare_namespace
_handle_ns(packageName, path_item)
File "/home/hoanglinh/Documents/practice_speech/.venv/lib/python3.6/site-packages/pkg_resources/__init__.py", line 2208, in _handle_ns
loader.load_module(packageName)
File "/home/hoanglinh/Documents/practice_speech/google.py", line 12, in <module>
from google.cloud.speech import enums
File "/home/hoanglinh/Documents/practice_speech/.venv/lib/python3.6/site-packages/google/cloud/speech.py", line 19, in <module>
from google.cloud.speech_v1 import SpeechClient
ImportError: cannot import name 'SpeechClient'
Am i doing something wrong ? when i search the error online there only 1 question with no answer to it
UPDATE:
i changed from
google.cloud import speech_v1 as speech
to this
from google.cloud import speech
now i got another return error with traceback like so
Traceback (most recent call last):
File "google.py", line 11, in <module>
from google.cloud import speech
File "/home/hoanglinh/Documents/practice_speech/.venv/lib/python3.6/site-packages/google/cloud/speech.py", line 19, in <module>
from google.cloud.speech_v1 import SpeechClient
File "/home/hoanglinh/Documents/practice_speech/.venv/lib/python3.6/site-packages/google/cloud/speech_v1/__init__.py", line 17, in <module>
from google.cloud.speech_v1.gapic import speech_client
File "/home/hoanglinh/Documents/practice_speech/.venv/lib/python3.6/site-packages/google/cloud/speech_v1/gapic/speech_client.py", line 18, in <module>
import pkg_resources
File "/home/hoanglinh/Documents/practice_speech/.venv/lib/python3.6/site-packages/pkg_resources/__init__.py", line 3241, in <module>
#_call_aside
File "/home/hoanglinh/Documents/practice_speech/.venv/lib/python3.6/site-packages/pkg_resources/__init__.py", line 3225, in _call_aside
f(*args, **kwargs)
File "/home/hoanglinh/Documents/practice_speech/.venv/lib/python3.6/site-packages/pkg_resources/__init__.py", line 3269, in _initialize_master_working_set
for dist in working_set
File "/home/hoanglinh/Documents/practice_speech/.venv/lib/python3.6/site-packages/pkg_resources/__init__.py", line 3269, in <genexpr>
for dist in working_set
File "/home/hoanglinh/Documents/practice_speech/.venv/lib/python3.6/site-packages/pkg_resources/__init__.py", line 2776, in activate
declare_namespace(pkg)
File "/home/hoanglinh/Documents/practice_speech/.venv/lib/python3.6/site-packages/pkg_resources/__init__.py", line 2275, in declare_namespace
_handle_ns(packageName, path_item)
File "/home/hoanglinh/Documents/practice_speech/.venv/lib/python3.6/site-packages/pkg_resources/__init__.py", line 2208, in _handle_ns
loader.load_module(packageName)
File "/home/hoanglinh/Documents/practice_speech/google.py", line 12, in <module>
from google.cloud.speech import enums
ImportError: cannot import name 'enums'
Have anyone tried this library before ? because it seem there so much errors just with following the docs of its
The following error message is seen
from google.cloud.speech import enums
ImportError: cannot import name 'enums'
if an 'new' installation of the google speech api was performed. Please see this page.
Along the same lines usage of nanos attributes would result in the following message if you have update the api
AttributeError: 'datetime.timedelta' object has no attribute 'nanos'
Please see this page. Use 'microseconds' instead of 'nanos'.
First solution try to check your python3.6/site-packages/google/cloud if there is speech_v1. if there is none, you need to install it first
Second solution try to check your python3.6/site-packages/google/cloud if there is an existing speech file, if it exists then the cause of the import is shadowing. since your alias is 'speech'
Hope this helps
try this line of codes if your using speech_v1:
from google.cloud import speech_v1 as speech
from google.cloud.speech_v1 import enums
from google.cloud.speech_v1 import types
speech:
from google.cloud import speech
from google.cloud.speech import enums
from google.cloud.speech import types
If you can check this link.
Google has moved the AudioEncodings under google.cloud.speech_v1.types you can use it by importing types and then running the code below:
from google.cloud.speech_v1 import types
types.RecognitionConfig.AudioEncoding.LINEAR16
From Google Cloud documentation :
Enums and Types
WARNING: Breaking change
The submodules enums and types have been removed.
Before:
from google.cloud import videointelligence
features = [videointelligence.enums.Feature.SPEECH_TRANSCRIPTION]
video_context = videointelligence.types.VideoContext()
After:
from google.cloud import videointelligence
features = [videointelligence.Feature.SPEECH_TRANSCRIPTION]
video_context = videointelligence.VideoContext()
Related
I'm trying to use WordNet within PyScript but I can't seem to properly load Wordnet.
At first I tried:
<py-env>
- nltk
</py-env>
<py-script>
import nltk
from nltk.corpus import wordnet as wn
<py-script>
This gave me a LookupError(resource_not_found), along with the message
Please use the NLTK Downloader to obtain the resource: [31m>>> import nltk >>> nltk.download('wordnet')
I then tried:
<py-script>
import nltk
nltk.download('wordnet')
from nltk.corpus import wordnet as wn
<py-script>
which gave me this message in the console:
writing to py-3f0adca1-a38a-4161-c36f-7e6548260aa5 [nltk_data] Error loading wordnet: <urlopen error unknown url type:
[nltk_data] https> true
I looked at the responses here: Pyodide filesystem for NLTK resources : missing files
and tried to replicate their code
from js import fetch
from pathlib import Path
import asyncio, os, sys, io, zipfile
response = await fetch('https://github.com/nltk/wordnet/archive/refs/heads/master.zip')
js_buffer = await response.arrayBuffer()
py_buffer = js_buffer.to_py() # this is a memoryview
stream = py_buffer.tobytes() # now we have a bytes object
d = Path("/nltk/wordnet")
d.mkdir(parents=True, exist_ok=True)
Path('/nltk/wordnet/master.zip').write_bytes(stream)
zipfile.ZipFile('/nltk/wordnet/master.zip').extractall(
path='/nltk/wordnet/'
)
This is the error message that I got:
APPENDING: True ==> py-2880055f-8922-cb23-34e4-db404fb1d7a4 --> PythonError: Traceback (most recent call last):
File "/lib/python3.10/asyncio/futures.py", line 201, in result
raise self._exception
File "/lib/python3.10/asyncio/tasks.py", line 232, in __step
result = coro.send(None)
File "/lib/python3.10/site-packages/_pyodide/_base.py", line 500, in eval_code_async
await CodeRunner(
File "/lib/python3.10/site-packages/_pyodide/_base.py", line 353, in run_async
await coroutine
File "<exec>", line 21, in
File "/lib/python3.10/zipfile.py", line 1258, in init
self._RealGetContents()
File "/lib/python3.10/zipfile.py", line 1325, in _RealGetContents
raise BadZipFile("File is not a zip file")
zipfile.BadZipFile: File is not a zip file
What am I doing wrong? Thanks!
UPDATE:
I tried installing the wn library from PyPi using
await micropip.install('https://files.pythonhosted.org/packages/ce/f1/53b07100f5c3d41fd33fc78ebb9e99d736b0460ced8acff94840311ffc60/wn-0.9.1-py3-none-any.whl')
But I get the error:
JsException(PythonError: Traceback (most recent call last): File "/lib/python3.10/asyncio/futures.py", line 201, in result raise self._exception File "/lib/python3.10/asyncio/tasks.py", line 232, in __step result = coro.send(None) File "/lib/python3.10/site-packages/_pyodide/_base.py", line 500, in eval_code_async await CodeRunner( File "/lib/python3.10/site-packages/_pyodide/_base.py", line 353, in run_async await coroutine File "", line 14, in File "/lib/python3.10/site-packages/wn/init.py", line 47, in from wn._add import add, remove File "/lib/python3.10/site-packages/wn/_add.py", line 21, in from wn.project import iterpackages File "/lib/python3.10/site-packages/wn/project.py", line 12, in import lzma File "/lib/python3.10/lzma.py", line 27, in from _lzma import * ModuleNotFoundError: No module named '_lzma' )
I have the following error while running a python package cvu(connectome visualization utility):
Traceback (most recent call last):
File "/usr/local/bin/cvu", line 4, in <module>
__import__('pkg_resources').run_script('cvu==0.5.2', 'cvu')
File "/usr/local/lib/python2.7/dist-packages/pkg_resources/__init__.py", line 657, in run_script
self.require(requires)[0].run_script(script_name, ns)
File "/usr/local/lib/python2.7/dist-packages/pkg_resources/__init__.py", line 1437, in run_script
exec(code, namespace, namespace)
File "/usr/local/lib/python2.7/dist-packages/cvu-0.5.2-py2.7.egg/EGG-INFO/scripts/cvu", line 33, in <module>
from cvu.main import main
File "/usr/local/lib/python2.7/dist-packages/cvu-0.5.2-py2.7.egg/cvu/__init__.py", line 20, in <module>
from .main import load_adj, load_parc, usage, main
File "/usr/local/lib/python2.7/dist-packages/cvu-0.5.2-py2.7.egg/cvu/main.py", line 20, in <module>
from gui import CvuGUI,ErrorHandler
File "/usr/local/lib/python2.7/dist-packages/cvu-0.5.2-py2.7.egg/cvu/gui.py", line 24, in <module>
from dataset import Dataset
File "/usr/local/lib/python2.7/dist-packages/cvu-0.5.2-py2.7.egg/cvu/dataset.py", line 26, in <module>
from dataview import (DataView,DVMayavi,DVMatrix,DVCircle)
File "/usr/local/lib/python2.7/dist-packages/cvu-0.5.2-py2.7.egg/cvu/dataview.py", line 28, in <module>
from chaco.api import (Plot,ArrayPlotData,ColorMapper,PlotGraphicsContext)
File "/usr/lib/python2.7/dist-packages/chaco/api.py", line 37, in <module>
from abstract_plot_renderer import AbstractPlotRenderer
File "/usr/lib/python2.7/dist-packages/chaco/abstract_plot_renderer.py", line 7, in <module>
from plot_component import PlotComponent
File "/usr/lib/python2.7/dist-packages/chaco/plot_component.py", line 4, in <module>
from enable.api import Component
File "/usr/lib/python2.7/dist-packages/enable/api.py", line 21, in <module>
from markers import MarkerTrait, marker_trait, MarkerNameDict, marker_names, \
File "/usr/lib/python2.7/dist-packages/enable/markers.py", line 18, in <module>
from compiled_path import CompiledPath
File "/usr/lib/python2.7/dist-packages/enable/compiled_path.py", line 17, in <module>
from toolkit import toolkit_object
File "/usr/lib/python2.7/dist-packages/enable/toolkit.py", line 47, in <module>
_init_toolkit()
File "/usr/lib/python2.7/dist-packages/enable/toolkit.py", line 40, in _init_toolkit
format_exception_only(t, v))
ImportError: Unable to import the image backend for the qt4 toolkit (reason: ['ImportError: No module named QtOpenGL\n']).
I have installed this
apt-get install python-qt4-gl
but stil I have the same error.
I don't know what is wrong.
Thanks for any guide.
edit
this is the script that run the pakcage:
run.py
#!/usr/bin/env python
import os
import sys
#pyside can cause problems with old versions of enable
#however, forcing traits to use pyqt causes problems in anaconda
#We will let the user set this optionally, it is good for debugging to not set.
#os.environ['QT_API']='pyqt'
#os.environ['QT_API']='pyside'
#One or the other should usually be set so that matplotlib and pyface can
#communicate and use the right Qt backend
#pyside is a better default choice
scriptdir=os.getcwd()
python_cmd = 'pythonw' if sys.platform=='darwin' else 'python'
#arguments = '%s %s'%(scriptdir, " ".join(sys.argv[1:]))
#exec_cmd = "%s main.py %s" % (python_cmd, arguments)
arguments = '%s main.py %s %s' % (python_cmd, scriptdir, " ".join(sys.argv[1:]))
try:
#see if cvu extracted locally
os.chdir(os.path.join(os.path.dirname(os.path.dirname(
os.path.realpath(__file__))),'cvu'))
os.system(arguments)
except OSError:
#see if cvu installed to system python
from cvu.main import main
sys.argv = arguments.split()
main()
filename='metamorphosis_clean.txt'
file=open(filename,'rt')
text=file.read()
file.close()
from nltk import sent_tokenize
sentences=sent_tokenize(text)
print(sentences[0])
Error:
Traceback (most recent call last):
File "split_into_sentenes.py", line 1, in <module>
import nltk
File "/usr/local/lib/python2.7/dist-packages/nltk/__init__.py", line 114, in <module>
from nltk.collocations import *
File "/usr/local/lib/python2.7/dist-packages/nltk/collocations.py", line 37, in <module>
from nltk.probability import FreqDist
File "/usr/local/lib/python2.7/dist-packages/nltk/probability.py", line 47, in <module>
from collections import defaultdict, Counter
File "/usr/local/lib/python2.7/dist-packages/nltk/collections.py", line 13, in <module>
import pydoc
File "/usr/lib/python2.7/pydoc.py", line 56, in <module>
import sys, imp, os, re, types, inspect, __builtin__, pkgutil, warnings
File "/usr/lib/python2.7/inspect.py", line 39, in <module>
import tokenize
File "/usr/lib/python2.7/tokenize.py", line 39, in <module>
COMMENT = N_TOKENS
NameError: name 'N_TOKENS' is not defined
In all likelihood you have a file named token.py in the current directory, i.e. the directory from which you are running your split_into_sentenes.py script.
If present locally, token.py will be imported before the one in the standard library, and this would result in the error that you see.
Check whether it exists and if necessary rename it to something that doesn't clash with the standard library.
Hey I need help with this python error:
File "bot.py", line 3, in <module>
import telepot
File "/home/lee/venv/telegram/local/lib/python2.7/site-packages/telepot/__init__.py", line 220, in <module>
from . import helper
File "/home/lee/venv/telegram/local/lib/python2.7/site-packages/telepot/helper.py", line 7, in <module>
import inspect
File "/usr/lib/python2.7/inspect.py", line 39, in <module>
import tokenize
File "/usr/lib/python2.7/tokenize.py", line 39, in <module>
COMMENT = N_TOKENS
bot.py
import telepot
bot = telepot.Bot('<bot_token>')
print bot.getMe()
from pprint import pprint
response = bot.getUpdates()
pprint(response)
This error comes about as a result of importing any telegram python module. All help will be appreciated.
I am trying to use Boto3 in order to manage some EC2 instances from my GAE app, but importing Boto3 results in the following error:
Traceback (most recent call last):
File "/Applications/GoogleAppEngineLauncher.app/Contents/Resources/GoogleAppEngine-default.bundle/Contents/Resources/google_appengine/google/appengine/runtime/wsgi.py", line 240, in Handle
handler = _config_handle.add_wsgi_middleware(self._LoadHandler())
File "/Applications/GoogleAppEngineLauncher.app/Contents/Resources/GoogleAppEngine-default.bundle/Contents/Resources/google_appengine/google/appengine/runtime/wsgi.py", line 299, in _LoadHandler
handler, path, err = LoadObject(self._handler)
File "/Applications/GoogleAppEngineLauncher.app/Contents/Resources/GoogleAppEngine-default.bundle/Contents/Resources/google_appengine/google/appengine/runtime/wsgi.py", line 85, in LoadObject
obj = __import__(path[0])
File "/Users/squad/Desktop/Squad/Squad/squad.py", line 6, in <module>
from boto3 import Session
File "/Users/squad/Desktop/Squad/Squad/lib/boto3/__init__.py", line 16, in <module>
from boto3.session import Session
File "/Users/squad/Desktop/Squad/Squad/lib/boto3/session.py", line 17, in <module>
import botocore.session
File "/Users/squad/Desktop/Squad/Squad/lib/botocore/session.py", line 32, in <module>
from botocore.loaders import create_loader
File "/Users/squad/Desktop/Squad/Squad/lib/botocore/loaders.py", line 188, in <module>
class Loader(object):
File "/Users/squad/Desktop/Squad/Squad/lib/botocore/loaders.py", line 201, in Loader
CUSTOMER_DATA_PATH = os.path.join(os.path.expanduser('~'),
File "/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/posixpath.py", line 261, in expanduser
import pwd
File "/Applications/GoogleAppEngineLauncher.app/Contents/Resources/GoogleAppEngine-default.bundle/Contents/Resources/google_appengine/google/appengine/tools/devappserver2/python/sandbox.py", line 963, in load_module
raise ImportError('No module named %s' % fullname)
ImportError: No module named pwd
I am able to use Boto just fine, so this in no way is a pressing need, but I would prefer to use Boto3 if possible.
I am using Python2.7, if that is of any help.
Thanks for the help.
You will have to fake the pwd module.
Create a file named fake_pwd.py with the necessary shims:
class struct_passwd_dummy(object):
def __init__(self, uid):
self.pw_name = "user"
self.pw_passwd = "x"
self.pw_uid = uid
self.pw_gid = uid
self.pw_gecos = "user"
self.pw_dir = "/home/user"
self.pw_shell = "/bin/sh"
def getpwuid(uid):
return struct_passwd_dummy(uid)
Then, in appengine_config.py, try this hack:
try:
import boto3
except ImportError:
import sys
import fake_pwd
sys.modules["pwd"] = fake_pwd
import boto3